diff --git a/.gitignore b/.gitignore
index c27602b70d00ecdc6d095ee560ee81250b91a79f..6a1bcbf1f0bb64821739366cc9074d3b6720c5f2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,6 +37,7 @@ var/
 *.egg-info/
 .installed.cfg
 *.egg
+pip-wheel-metadata
 
 # PyInstaller
 #  Usually these files are written by a python script from a template
@@ -124,3 +125,24 @@ tests/failed_blocks/
 /tests/envdir-to-envfile.sh
 /deploy/
 /scripts/hive.sqlite
+
+# vscode
+.vscode/*
+
+# pyrest tests
+*.out.json
+
+# version.py
+hive/version.py
+
+hivemind.port
+hive_server.pid
+hivemind-server.pid
+
+Pipfile.lock
+
+pghero.yml
+*~
+.tmp
+
+.private
diff --git a/.gitlab-ci-ssh.yaml b/.gitlab-ci-ssh.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8b0a7d49bc50f5b42a402594e50c93196feaa5fe
--- /dev/null
+++ b/.gitlab-ci-ssh.yaml
@@ -0,0 +1,265 @@
+stages:
+  - build
+  - test
+  - data-supply
+  - deploy
+  - e2e-test
+  - benchmark-tests
+  - post-deploy
+
+variables:
+  GIT_DEPTH: 1
+  LC_ALL: "C"
+  GIT_STRATEGY: clone
+  GIT_SUBMODULE_STRATEGY: recursive
+  GIT_CLONE_PATH: $CI_BUILDS_DIR/$CI_COMMIT_REF_SLUG/$CI_CONCURRENT_ID/project-name
+
+  HIVEMIND_SOURCE_HIVED_URL: $HIVEMIND_SOURCE_HIVED_URL
+  HIVEMIND_DB_NAME: "hive_${CI_COMMIT_REF_SLUG}"
+  HIVEMIND_HTTP_PORT: $((HIVEMIND_HTTP_PORT + CI_CONCURRENT_ID))
+  # Configured at gitlab repository settings side
+  POSTGRES_USER: $HIVEMIND_POSTGRES_USER
+  POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+  POSTGRES_HOST_AUTH_METHOD: trust
+  # official way to provide password to psql: http://www.postgresql.org/docs/9.3/static/libpq-envars.html
+  PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+
+
+default:
+  before_script:
+    - pwd
+    - echo "CI_NODE_TOTAL is $CI_NODE_TOTAL"
+    - echo "CI_NODE_INDEX is $CI_NODE_INDEX"
+    - echo "CI_CONCURRENT_ID is $CI_CONCURRENT_ID"
+    - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
+
+hivemind_build:
+  stage: build
+  script:
+    - pip3 install --user --upgrade pip setuptools
+    - git fetch --tags
+    - git tag -f ci_implicit_tag
+    - echo $PYTHONUSERBASE
+    - "python3 setup.py bdist_egg"
+    - ls -l dist/*
+  artifacts:
+    paths:
+      - dist/
+    expire_in: 1 week
+  tags:
+     - hivemind
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+
+hivemind_sync:
+  stage: data-supply
+  environment:
+      name: "hive sync built from branch $CI_COMMIT_REF_NAME targeting database $HIVEMIND_DB_NAME"
+  needs:
+    - job: hivemind_build
+      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+    PYTHONUSERBASE: ./local-site
+  script:
+    - pip3 install --user --upgrade pip setuptools
+    # WARNING!!! temporarily hardcoded 5000017 instead $HIVEMIND_MAX_BLOCK
+    # revert that change when $HIVEMIND_MAX_BLOCK will be set to 5000017
+    - scripts/ci_sync.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" 5000017 $HIVEMIND_HTTP_PORT
+  artifacts:
+    paths:
+      - hivemind-sync.log
+    expire_in: 1 week
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+
+hivemind_start_server:
+  stage: deploy
+  environment:
+    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
+    url: "http://hive-4.pl.syncad.com:$HIVEMIND_HTTP_PORT"
+    on_stop: hivemind_stop_server
+  needs:
+    - job: hivemind_build
+      artifacts: true
+#    - job: hivemind_sync
+#      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+    PYTHONUSERBASE: ./local-site
+  script:
+    - scripts/ci_start_server.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_HTTP_PORT
+  artifacts:
+    paths:
+      - hive_server.pid
+    expire_in: 1 week
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+
+  tags:
+     - hivemind
+
+hivemind_stop_server:
+  stage: post-deploy
+  environment:
+    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
+    action: stop
+  variables:
+    GIT_STRATEGY: none
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+  script:
+    - scripts/ci_stop_server.sh hive_server.pid
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  tags:
+     - hivemind
+  artifacts:
+    paths:
+      - hive_server.log
+
+.hivemind_start_api_smoketest: &common_api_smoketest_job
+  stage: e2e-test
+  environment: hive-4.pl.syncad.com
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+
+bridge_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_patterns/ api_smoketest_bridge.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_bridge.xml
+
+bridge_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_negative/ api_smoketest_bridge_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_bridge_negative.xml
+
+condenser_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_patterns/ api_smoketest_condenser_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_condenser_api.xml
+
+condenser_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_negative/ api_smoketest_condenser_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_condenser_api_negative.xml
+
+database_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_patterns/ api_smoketest_database_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_database_api.xml
+
+database_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_negative/ api_smoketest_database_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_database_api_negative.xml
+
+follow_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_patterns/ api_smoketest_follow_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest.xml
+
+follow_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_negative/ api_smoketest_follow_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_follow_api_negative.xml
+
+tags_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_patterns/ api_smoketest_tags_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_tags_api.xml
+
+tags_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml
+
+mock_tests:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" mock_tests/ api_smoketest_mock_tests.xml
+
+api_smoketest_benchmark:
+  stage: benchmark-tests
+  environment: hive-4.pl.syncad.com
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  allow_failure: true
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+  script:
+    - ./scripts/ci_start_api_benchmarks.sh localhost $HIVEMIND_HTTP_PORT 5
+  artifacts:
+    when: always
+    paths:
+      - tavern_benchmarks_report.html
diff --git a/.gitlab-ci.yaml b/.gitlab-ci.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..956bc7c0b958ed60c7754ec25224d4d0e0425a62
--- /dev/null
+++ b/.gitlab-ci.yaml
@@ -0,0 +1,421 @@
+stages:
+  - build
+  - test
+  - sync-e2e-benchmark
+  - data-supply
+  - deploy
+  - e2e-test
+  - benchmark-tests
+  - post-deploy
+
+.setup-pip: &setup-pip
+  - python -m venv .venv
+  - source .venv/bin/activate
+  - time pip install --upgrade pip setuptools wheel
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+  - time pip install --editable .[dev]
+
+.setup-runner-env: &setup-runner-env
+  # Setup runner environment (to connect to correct postgres server, mainly).
+  - TMP_VAR=$(cat hive-sync-runner-id.txt 2>/dev/null || true); export HIVE_SYNC_RUNNER_ID=${TMP_VAR:-0}
+  - eval $(cat "$RUNNER_CONF" | ./scripts/ci/setup_env.py --current-runner-id=${CI_RUNNER_ID} --hive-sync-runner-id=${HIVE_SYNC_RUNNER_ID})
+
+.set-variables: &set-variables
+  # - export # List all variables and its values set by Gitlab CI.
+  - whoami
+  - echo "CI_RUNNER_ID is $CI_RUNNER_ID"
+  - echo "CI_PIPELINE_URL is $CI_PIPELINE_URL"
+  - echo "CI_PIPELINE_ID is $CI_PIPELINE_ID"
+  - echo "CI_COMMIT_SHORT_SHA is $CI_COMMIT_SHORT_SHA"
+  - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
+  - export HIVEMIND_DB_NAME=${HIVEMIND_DB_NAME//[^a-zA-Z0-9_]/_}
+  - echo "HIVEMIND_DB_NAME is $HIVEMIND_DB_NAME"
+
+.fetch-git-tags: &fetch-git-tags
+  # - git fetch --tags # Looks to be unnecessary.
+  - git tag -f ci_implicit_tag # Needed to build python package
+
+.start-timer: &start-timer
+  - ./scripts/ci/timer.sh start
+
+.stop-timer: &stop-timer
+  - ./scripts/ci/timer.sh check
+
+.hive-sync-script-common: &hive-sync-script-common
+  - echo "${CI_RUNNER_ID}" > hive-sync-runner-id.txt
+  - ./scripts/ci/wait-for-postgres.sh "$RUNNER_POSTGRES_HOST" "$RUNNER_POSTGRES_PORT"
+  - export POSTGRES_MAJOR_VERSION=$(./scripts/ci/get-postgres-version.sh)
+  - ./scripts/ci/drop-db.sh
+  - ./scripts/ci/create-db.sh
+  - ./scripts/ci/hive-sync.sh
+  - ./scripts/ci/collect-db-stats.sh
+
+.rules-for-sync: &rules-for-sync
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: manual
+
+.rules-for-test: &rules-for-test
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: on_success
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: on_success
+    - when: on_success
+
+.default:
+  image: hivemind/python:3.6
+  interruptible: true
+  inherit:
+    default: false
+    variables: false
+  variables:
+    GIT_DEPTH: 10
+    GIT_STRATEGY: fetch
+    GIT_SUBMODULE_STRATEGY: recursive
+    PIPENV_VENV_IN_PROJECT: 1
+    PIPENV_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pipenv"
+    PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
+    POSTGRES_CLIENT_TOOLS_PATH: /usr/lib/postgresql
+    HIVEMIND_DB_NAME: "hive_${CI_COMMIT_REF_SLUG}"
+  cache: &global-cache
+    # Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
+    # key: "$CI_COMMIT_REF_NAME"
+    # Per project caching – use any key.
+    # Change this key, if you need to clear cache.
+    key: common-1
+    paths:
+      - .cache/
+      - .venv/
+      - .tox/
+  before_script:
+    - *start-timer
+    - *fetch-git-tags
+    - *set-variables
+    - *setup-pip
+    - *setup-runner-env
+  after_script:
+    - *stop-timer
+
+##### Jobs #####
+
+.hivemind-sync:
+  # Postgres shared on host.
+  extends: .default
+  <<: *rules-for-sync
+  stage: data-supply
+  needs: []
+  script:
+    - *hive-sync-script-common
+  artifacts:
+    paths:
+      - hivemind-sync.log
+      - pg-stats
+      - hive-sync-runner-id.txt
+    expire_in: 7 days
+  tags:
+    - hivemind-heavy-job
+
+.test-common:
+  extends: .default
+  <<: *rules-for-test
+  needs:
+    - job: hivemind-sync
+      artifacts: true
+  allow_failure: false
+  before_script:
+    - *start-timer
+    - *fetch-git-tags
+    - *set-variables
+    - *setup-pip
+    - *setup-runner-env
+    - ./scripts/ci/wait-for-postgres.sh "$RUNNER_POSTGRES_HOST" "$RUNNER_POSTGRES_PORT"
+    - ./scripts/ci/hive-server.sh start
+  after_script:
+    - *stop-timer
+  tags:
+    - hivemind-light-job
+
+
+.bridge_api_smoketest-script: &bridge_api_smoketest-script
+  - |
+    ./scripts/ci/start-api-smoketest.sh \
+        localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+        bridge_api_patterns/ api_smoketest_bridge.xml \
+        $RUNNER_TEST_JOBS
+
+.bridge_api_smoketest:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *bridge_api_smoketest-script
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest_bridge.xml
+
+
+.bridge_api_smoketest_negative-script: &bridge_api_smoketest_negative-script
+  - |
+    ./scripts/ci/start-api-smoketest.sh \
+        localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+        bridge_api_negative/ api_smoketest_bridge_negative.xml \
+        $RUNNER_TEST_JOBS
+
+.bridge_api_smoketest_negative:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *bridge_api_smoketest_negative-script
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest_bridge_negative.xml
+
+
+.condenser_api_smoketest-script: &condenser_api_smoketest-script
+  - |
+    ./scripts/ci/start-api-smoketest.sh \
+        localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+        condenser_api_patterns/ api_smoketest_condenser_api.xml \
+        $RUNNER_TEST_JOBS
+
+.condenser_api_smoketest:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *condenser_api_smoketest-script
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest_condenser_api.xml
+
+
+.condenser_api_smoketest_negative-script: &condenser_api_smoketest_negative-script
+  - |
+    ./scripts/ci/start-api-smoketest.sh \
+        localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+        condenser_api_negative/ api_smoketest_condenser_api_negative.xml \
+        $RUNNER_TEST_JOBS
+
+.condenser_api_smoketest_negative:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *condenser_api_smoketest_negative-script
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest_condenser_api_negative.xml
+
+
+.database_api_smoketest-script: &database_api_smoketest-script
+  - |
+    ./scripts/ci/start-api-smoketest.sh \
+        localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+        database_api_patterns/ api_smoketest_database_api.xml \
+        $RUNNER_TEST_JOBS
+
+.database_api_smoketest:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *database_api_smoketest-script
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest_database_api.xml
+
+
+.database_api_smoketest_negative-script: &database_api_smoketest_negative-script
+  - |
+    ./scripts/ci/start-api-smoketest.sh \
+        localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+        database_api_negative/ api_smoketest_database_api_negative.xml \
+        $RUNNER_TEST_JOBS
+
+.database_api_smoketest_negative:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *database_api_smoketest_negative-script
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest_database_api_negative.xml
+
+.follow_api_smoketest-script: &follow_api_smoketest-script
+  - |
+    ./scripts/ci/start-api-smoketest.sh \
+        localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+        follow_api_patterns/ api_smoketest_follow_api.xml \
+        $RUNNER_TEST_JOBS
+
+.follow_api_smoketest:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *follow_api_smoketest-script
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest.xml
+
+
+.follow_api_smoketest_negative-script: &follow_api_smoketest_negative-script
+  - |
+    ./scripts/ci/start-api-smoketest.sh \
+        localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+        follow_api_negative/ api_smoketest_follow_api_negative.xml \
+        $RUNNER_TEST_JOBS
+
+.follow_api_smoketest_negative:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *follow_api_smoketest_negative-script
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest_follow_api_negative.xml
+
+
+.tags_api_smoketest-script: &tags_api_smoketest-script
+  - |
+    ./scripts/ci/start-api-smoketest.sh \
+        localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+        tags_api_negative/ api_smoketest_tags_api_negative.xml \
+        $RUNNER_TEST_JOBS
+
+.tags_api_smoketest:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *tags_api_smoketest-script
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest_tags_api_negative.xml
+
+
+.tags_api_smoketest_negative-script: &tags_api_smoketest_negative-script
+  - |
+    ./scripts/ci/start-api-smoketest.sh \
+        localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+        tags_api_patterns/ api_smoketest_tags_api.xml \
+        $RUNNER_TEST_JOBS
+
+.tags_api_smoketest_negative:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *tags_api_smoketest_negative-script
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest_tags_api.xml
+
+
+.mock_tests-script: &mock_tests-script
+  - |
+    scripts/ci/start-api-smoketest.sh \
+    localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+    mock_tests/ api_smoketest_mock_tests.xml \
+    $RUNNER_TEST_JOBS
+
+.mock_tests:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *mock_tests-script
+  artifacts:
+    reports:
+      junit: api_smoketest_mock_tests.xml
+
+
+.hive_api_smoketest-script: &hive_api_smoketest-script
+  - |
+    scripts/ci/start-api-smoketest.sh \
+    localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+    hive_api_patterns/ api_smoketest_hive_api.xml \
+    $RUNNER_TEST_JOBS
+
+.hive_api_smoketest:
+  stage: e2e-test
+  extends: .test-common
+  script:
+    - *hive_api_smoketest-script
+  artifacts:
+    reports:
+      junit: api_smoketest_hive_api.xml
+
+
+.api-smoketest-benchmark-script: &api-smoketest-benchmark-script
+  - |
+    ./scripts/ci/start-api-benchmarks.sh \
+        localhost $RUNNER_HIVEMIND_SERVER_HTTP_PORT \
+        $RUNNER_BENCHMARK_ITERATIONS \
+        $RUNNER_BENCHMARK_JOBS
+  - ./scripts/xml_report_parser.py --time-threshold=1.5 . ./tests/tests_api/hivemind/tavern
+
+.api-smoketest-benchmark:
+  stage: benchmark-tests
+  extends: .test-common
+  # Temporary failure (when any call is longer than 1s is allowed)
+  allow_failure: true
+  script:
+    - *api-smoketest-benchmark-script
+  artifacts:
+    when: always
+    paths:
+      - tavern_benchmarks_report.html
+
+
+sync-e2e-benchmark:
+  extends: .default
+  <<: *rules-for-sync
+  stage: sync-e2e-benchmark
+  needs: []
+  script:
+    - *hive-sync-script-common
+    - ./scripts/ci/hive-server.sh start
+    - pip install tox
+    - touch tox-installed
+    - *bridge_api_smoketest-script
+    - *bridge_api_smoketest_negative-script
+    - *condenser_api_smoketest-script
+    - *condenser_api_smoketest_negative-script
+    - *database_api_smoketest-script
+    - *database_api_smoketest_negative-script
+    - *follow_api_smoketest-script
+    - *follow_api_smoketest_negative-script
+    - *tags_api_smoketest-script
+    - *tags_api_smoketest_negative-script
+    - *mock_tests-script
+    - *hive_api_smoketest-script
+    - *api-smoketest-benchmark-script
+  artifacts:
+    when: always
+    paths:
+      - hivemind-sync.log
+      - hivemind-server.log
+      - pg-stats
+      - hive-sync-runner-id.txt
+      - tavern_benchmarks_report.html
+      - tests/tests_api/hivemind/tavern/**/*.out.json
+    reports:
+      junit: "*.xml"
+    expire_in: 7 days
+  tags:
+    - hivemind-heavy-job
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000000000000000000000000000000000000..1ab6d0b66b25381f1b41add1a97443b65c735471
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "tests/tests_api"]
+	path = tests/tests_api
+	url = ../../hive/tests_api.git
diff --git a/Dockerfile b/Dockerfile
index 9cfffd91a1bef807629822cf3bacf9c546a28192..233c800a0ae8d987bd7beec004131d4c7e320993 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -5,7 +5,7 @@ ENV ENVIRONMENT DEV
 ENV LOG_LEVEL INFO
 ENV LANG en_US.UTF-8
 ENV LC_ALL en_US.UTF-8
-ENV PIPENV_VENV_IN_PROJECT 1
+#ENV PIPENV_VENV_IN_PROJECT 1
 ARG SOURCE_COMMIT
 ENV SOURCE_COMMIT ${SOURCE_COMMIT}
 ARG SCHEMA_HASH
@@ -39,10 +39,11 @@ RUN \
         runit \
         s3cmd \
         libpcre3 \
-        libpcre3-dev
+        libpcre3-dev \
+        git
 
-RUN \
-    pip3 install --upgrade pip setuptools
+#RUN \
+#    pip3 install --upgrade pip setuptools
 
 ADD . /app
 
@@ -56,7 +57,7 @@ RUN chmod +x /usr/local/bin/hivesync.sh
 RUN chmod +x /usr/local/bin/hivesynccontinue.sh
 
 RUN \
-    pip3 install . && \
+    python3 setup.py build && python3 setup.py install && \
     apt-get remove -y \
         build-essential \
         libffi-dev \
diff --git a/Pipfile b/Pipfile
index 2ab052f1dac0fd87492f3080c111c25b45bd6616..5dfb204d52fcf2a8ebbd6fb2932c9ee7c01f21b0 100644
--- a/Pipfile
+++ b/Pipfile
@@ -11,21 +11,25 @@ toolz = "*"
 maya = "*"
 ujson = "*"
 prettytable = "*"
-jsonrpcserver = "*"
+jsonrpcserver = "4.1.3+8f3437a"
 aiohttp = "*"
 aiopg = "*"
-"psycopg2-binary" = "*"
-
+psycopg2-binary = "*"
+diff-match-patch = "*"
 
 [dev-packages]
 
-"ipython" = "*"
-"pep8" = "*"
-"pytest" = "*"
-"pytest-cov" = "*"
-"pytest-docker" = "*"
-"pytest-pylint" = "*"
-"pytest-asyncio" = "*"
-"pytest-console-scripts" = "*"
-"yapf" = "*"
-"autopep8" = "*"
+ipython = "*"
+pep8 = "*"
+pytest = "*"
+pytest-cov = "*"
+pytest-docker = "*"
+pytest-pylint = "*"
+pytest-asyncio = "*"
+pytest-console-scripts = "*"
+yapf = "*"
+autopep8 = "*"
+tox = "*"
+
+[requires]
+python_version = "3.6"
diff --git a/README.md b/README.md
index c339ea800e2b57df80b86b3f357e3912284eacc8..d408d6e7217eb365829dbff0f166ccb809548832 100644
--- a/README.md
+++ b/README.md
@@ -1,20 +1,20 @@
 # Hivemind [BETA]
 
-#### Developer-friendly microservice powering social networks on the Steem blockchain.
+#### Developer-friendly microservice powering social networks on the Hive blockchain.
 
-Hive is a "consensus interpretation" layer for the Steem blockchain, maintaining the state of social features such as post feeds, follows, and communities. Written in Python, it synchronizes an SQL database with chain state, providing developers with a more flexible/extensible alternative to the raw steemd API.
+Hivemind is a "consensus interpretation" layer for the Hive blockchain, maintaining the state of social features such as post feeds, follows, and communities. Written in Python, it synchronizes an SQL database with chain state, providing developers with a more flexible/extensible alternative to the raw hived API.
 
 ## Development Environment
 
  - Python 3.6 required
  - Postgres 10+ recommended
 
-Dependencies:
+### Dependencies:
 
  - OSX: `$ brew install python3 postgresql`
  - Ubuntu: `$ sudo apt-get install python3 python3-pip`
 
-Installation:
+### Installation (DO NOT USE pip! It will install incorrect versions of some packages):
 
 ```bash
 $ createdb hive
@@ -22,12 +22,14 @@ $ export DATABASE_URL=postgresql://user:pass@localhost:5432/hive
 ```
 
 ```bash
-$ git clone https://github.com/steemit/hivemind.git
+$ git clone https://gitlab.syncad.com/hive/hivemind.git
 $ cd hivemind
-$ pip3 install -e .[test]
+$ git submodule update --init --recursive
+$ python3 setup.py build
+$ python3 setup.py install --user
 ```
 
-Start the indexer:
+### Start the indexer:
 
 ```bash
 $ hive sync
@@ -38,7 +40,7 @@ $ hive status
 {'db_head_block': 19930833, 'db_head_time': '2018-02-16 21:37:36', 'db_head_age': 10}
 ```
 
-Start the server:
+### Start the server:
 
 ```bash
 $ hive server
@@ -49,28 +51,43 @@ $ curl --data '{"jsonrpc":"2.0","id":0,"method":"hive.db_head_state","params":{}
 {"jsonrpc": "2.0", "result": {"db_head_block": 19930795, "db_head_time": "2018-02-16 21:35:42", "db_head_age": 10}, "id": 0}
 ```
 
-Run tests:
+### Run tests:
+
+To run unit tests:
 
 ```bash
 $ make test
 ```
 
+To run api tests:
+1. Make sure that current version of `hivemind` is installed,
+2. Api tests require that `hivemind` is synced to a node replayed up to 5 000 000 blocks,
+3. Run `hivemind` in `server` mode
+4. Set env variables:
+```bash
+$ export HIVEMIND_PORT=8080
+$ export HIVEMIND_ADDRESS=127.0.0.1
+```
+5. Run tests using tox:
+```bash
+$ tox -- -v -n auto --durations=0
+```
 
 ## Production Environment
 
 Hivemind is deployed as a Docker container.
 
-Here is an example command that will initialize the DB schema and start the syncing process:
+Here is an example command that will initialize the database schema and start the syncing process:
 
 ```
-docker run -d --name hivemind --env DATABASE_URL=postgresql://user:pass@hostname:5432/databasename --env STEEMD_URL='{"default":"https://yoursteemnode"}' --env SYNC_SERVICE=1 -p 8080:8080 steemit/hivemind:latest
+docker run -d --name hivemind --env DATABASE_URL=postgresql://user:pass@hostname:5432/databasename --env STEEMD_URL='{"default":"https://yourhivenode"}' --env SYNC_SERVICE=1 -p 8080:8080 hive/hivemind:latest
 ```
 
-Be sure to set `DATABASE_URL` to point to your postgres database and `STEEMD_URL` to point to your steemd node to sync from.
+Be sure to set `DATABASE_URL` to point to your postgres database and set `STEEMD_URL` to point to your hived node to sync from.
 
 Once the database is synced, Hivemind will be available for serving requests.
 
-To follow along the logs, use this:
+To watch the logs on your console:
 
 ```
 docker logs -f hivemind
@@ -84,7 +101,7 @@ docker logs -f hivemind
 | `LOG_LEVEL`              | `--log-level`        | INFO    |
 | `HTTP_SERVER_PORT`       | `--http-server-port` | 8080    |
 | `DATABASE_URL`           | `--database-url`     | postgresql://user:pass@localhost:5432/hive |
-| `STEEMD_URL`             | `--steemd-url`       | '{"default":"https://yoursteemnode"}' |
+| `STEEMD_URL`             | `--steemd-url`       | '{"default":"https://yourhivenode"}' |
 | `MAX_BATCH`              | `--max-batch`        | 50      |
 | `MAX_WORKERS`            | `--max-workers`      | 4       |
 | `TRAIL_BLOCKS`           | `--trail-blocks`     | 2       |
@@ -103,7 +120,7 @@ Precedence: CLI over ENV over hive.conf. Check `hive --help` for details.
  - 250GB storage for database
 
 
-### Steem config
+### Hive config
 
 Build flags
 
@@ -133,9 +150,11 @@ checkpoint_timeout = 30min
 max_wal_size = 4GB
 ```
 
+It is required to load 'intarray' extension. The postgresql user who has CREATE privilege can load the module with command `CREATE EXTENSION intarray`.
+
 ## JSON-RPC API
 
-The minimum viable API is to remove the requirement for the `follow` and `tags` plugins (now rolled into [`condenser_api`](https://github.com/steemit/steem/blob/master/libraries/plugins/apis/condenser_api/condenser_api.cpp)) from the backend node while still being able to power condenser's non-wallet features. Thus, this is the core API set:
+The minimum viable API is to remove the requirement for the `follow` and `tags` plugins (now rolled into [`condenser_api`](https://gitlab.syncad.com/hive/hive/-/tree/master/libraries/plugins/apis/condenser_api/condenser_api.cpp)) from the backend node while still being able to power condenser's non-wallet features. Thus, this is the core API set:
 
 ```
 condenser_api.get_followers
@@ -168,26 +187,21 @@ condenser_api.get_discussions_by_author_before_date
 ## Overview
 
 
-#### History
-
-Initially, the [steemit.com](https://steemit.com) app was powered exclusively by `steemd` nodes. It was purely a client-side app without *any* backend other than a public and permissionless API node. As powerful as this model is, there are two issues: (a) maintaining UI-specific indices/APIs becomes expensive when tightly coupled to critical consensus nodes; and (b) frontend developers must be able to iterate quickly and access data in flexible and creative ways without writing C++.
-
-To relieve backend and frontend pressure, non-consensus and frontend-oriented concerns can be decoupled from `steemd` itself. This (a) allows the consensus node to focus on scalability and reliability, and (b) allows the frontend to maintain its own state layer, allowing for flexibility not feasible otherwise.
-
-Specifically, the goal is to completely remove the `follow` and `tags` plugins, as well as `get_state` from the backend node itself, and re-implement them in `hive`. In doing so, we form the foundational infrastructure on which to implement communities and more.
-
 #### Purpose
 
+Hivemind is a 2nd layer microservice that reads blocks of operations and virtual operations generated by the Hive blockchain network (hived nodes), then organizes the data from these operations into a convenient form for querying by Hive applications.
+Hivemind's API is focused on providing social media-related information to Hive apps. This includes information about posts, comments, votes, reputation, and Hive user profiles.
+
 ##### Hive tracks posts, relationships, social actions, custom operations, and derived states.
 
  - *discussions:* by blog, trending, hot, created, etc
- - *communities:* mod roles/actions, members, feeds (in 1.5; [spec](https://github.com/steemit/hivemind/blob/master/docs/communities.md))
+ - *communities:* mod roles/actions, members, feeds (in 1.5; [spec](https://gitlab.syncad.com/hive/hivemind/-/blob/master/docs/communities.md))
  - *accounts:* normalized profile data, reputation
  - *feeds:* un/follows and un/reblogs
 
 ##### Hive does not track most blockchain operations.
 
-For anything to do with wallets, orders, escrow, keys, recovery, or account history, query SBDS or steemd.
+For anything to do with wallets, orders, escrow, keys, recovery, or account history, query hived.
 
 ##### Hive can be extended or leveraged to create:
 
@@ -207,15 +221,15 @@ For anything to do with wallets, orders, escrow, keys, recovery, or account hist
 
 #### Core indexer
 
-Ingests blocks sequentially, processing operations relevant to accounts, post creations/edits/deletes, and custom_json ops for follows, reblogs, and communities. From these we build account and post lookup tables, follow/reblog state, and communities/members data. Built exclusively from raw blocks, it becomes the ground truth for internal state. Hive does not reimplement logic required for deriving payout values, reputation, and other statistics which are much more easily attained from steemd itself in the cache layer.
+Ingests blocks sequentially, processing operations relevant to accounts, post creations/edits/deletes, and custom_json ops for follows, reblogs, and communities. From these we build account and post lookup tables, follow/reblog state, and communities/members data. Built exclusively from raw blocks, it becomes the ground truth for internal state. Hive does not reimplement logic required for deriving payout values, reputation, and other statistics which are much more easily attained from hived itself in the cache layer.
 
 #### Cache layer
 
-Synchronizes the latest state of posts and users, allowing us to serve discussions and lists of posts with all expected information (title, preview, image, payout, votes, etc) without needing `steemd`. This layer is first built once the initial core indexing is complete. Incoming blocks trigger cache updates (including recalculation of trending score) for any posts referenced in `comment` or `vote` operations. There is a sweep to paid out posts to ensure they are updated in full with their final state.
+Synchronizes the latest state of posts and users, allowing us to serve discussions and lists of posts with all expected information (title, preview, image, payout, votes, etc) without needing `hived`. This layer is first built once the initial core indexing is complete. Incoming blocks trigger cache updates (including recalculation of trending score) for any posts referenced in `comment` or `vote` operations. There is a sweep to paid out posts to ensure they are updated in full with their final state.
 
 #### API layer
 
-Performs queries against the core and cache tables, merging them into a response in such a way that the frontend will not need to perform any additional calls to `steemd` itself. The initial API simply mimics steemd's `condenser_api` for backwards compatibility, but will be extended to leverage new opportunities and simplify application development.
+Performs queries against the core and cache tables, merging them into a response in such a way that the frontend will not need to perform any additional calls to `hived` itself. The initial API simply mimics hived's `condenser_api` for backwards compatibility, but will be extended to leverage new opportunities and simplify application development.
 
 
 #### Fork Resolution
diff --git a/checkpoints/README.md b/checkpoints/README.md
deleted file mode 100644
index 72b146a03c09ed645b8d276e75cc8dbbbcd88aaf..0000000000000000000000000000000000000000
--- a/checkpoints/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Hive Checkpoints
-
-`hive` will detect checkpoint files in this directory. They can be used to speed up core reindexing: it will check this directory on startup.
-
-### Format
-
-Files should be named `(block_num).json.lst` where `block_num` is the last block in the file.
-
-The first file must begin with block 1. Successive files must begin with the previous file's block_num, plus 1.
-
-e.g.:
-
- - 1000000.json.lst -- blocks 1 - 1,000,000
- - 2000000.json.lst -- blocks 1,000,001 - 2,000,000
- - 3000000.json.lst -- blocks 2,000,001 - 3,000,000
-
-The intervals do not need to be regular, but blocks *must* be successive and there must be no duplicates.
diff --git a/checkpoints/dump.rb b/checkpoints/dump.rb
deleted file mode 100755
index e0bab29fa5f9bae4c8accc1e8a4b23e3edf2d4f1..0000000000000000000000000000000000000000
--- a/checkpoints/dump.rb
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/local/bin/ruby
-
-WSURL = "ws://0.0.0.0:8090/ws"
-
-require 'progress_bar'
-require 'faye/websocket'
-require 'eventmachine'
-
-def stream_blocks_from_ws n1, n2
-  EM.run do
-    pending = 0
-    loaded  = 0
-    expect  = n1
-    buffer  = (n1..n2).to_a
-
-    pb = ProgressBar.new(buffer.size)
-    ws = Faye::WebSocket::Client.new(WSURL)
-
-    ws.on :open do |event|
-      100.times do
-        n = buffer.shift
-        pending += 1
-        ws.send("{\"id\":#{n},\"method\":\"call\",\"params\":[0,\"get_block\",[#{n},false]]}")
-      end
-    end
-
-    ws.on :message do |event|
-      num, block = extract_block(event.data)
-      raise "out of sequence: expected #{expect}, got #{num}" unless num == expect
-      expect = num + 1
-      yield block
-
-      loaded  += 1
-      pending -= 1
-      pb.increment!(1000) if loaded % 1000 == 0
-
-      n = buffer.shift
-      if n
-        pending += 1
-        ws.send("{\"id\":#{n},\"method\":\"call\",\"params\":[0,\"get_block\",[#{n},false]]}")
-      elsif pending == 0
-        ws.close
-      end
-    end
-
-    ws.on :close do |event|
-      ws = nil
-      EM.stop_event_loop
-    end
-  end
-end
-
-def extract_block(data)
-  m = data.match(/^\{"id":(\d+),"result":(.+)\}$/)
-  raise "unexpected body: #{data[0,64]}" unless m
-  [m[1].to_i, m[2]]
-end
-
-def stream_blocks_to_file n1, n2, file
-  raise "File already exists" if File.exists?(file)
-  File.open(file, 'w') do |f|
-    stream_blocks_from_ws(n1, n2){|r| f.write(r+"\n")}
-  end
-end
-
-# Save all blocks up to 12M in batches of 1M
-(1..13).each do |mil|
-  n1 = (mil - 1) * 1000000 + 1
-  n2 = mil * 1000000
-  stream_blocks_to_file n1, n2, "#{n2}.json.lst"
-end
diff --git a/docker-compose-ci.yml b/docker-compose-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6c8dd6d5170f62f9ad7fb28f498f129cac96b7d1
--- /dev/null
+++ b/docker-compose-ci.yml
@@ -0,0 +1,135 @@
+version: "3.2"
+
+services:
+
+
+  python-3.6-dev:
+    # Run container this way:
+    # docker-compose -f docker-compose-ci.yml run --rm python-3.6-dev bash
+    # This opens terminal inside container. Project directory is mounted
+    # into container.
+    image: hivemind/python:3.6-dev
+    build:
+      context: .
+      dockerfile: ./scripts/ci/python/3.6/dev.dockerfile
+      args:
+        - user=${USER}
+        - workdir=/home/${USER}/hivemind
+    user: ${USER}
+    # security_opt:
+    #   # Significant performance boost (about 5%), but very insecure.
+    #   # See https://medium.com/better-programming/faster-python-in-docker-d1a71a9b9917
+    #   # See https://docs.docker.com/engine/security/seccomp/
+    #   - seccomp:unconfined
+    shm_size: 2g
+    volumes:
+      # Sockets of postgres servers on dockers.
+      - "postgres-10-run:/var/run/postgres-10"
+      - "postgres-12-run:/var/run/postgres-12"
+      # Sockets of postgres servers on host.
+      - "/var/run/postgresql:/var/run/postgresql"
+      # For keeping python dependencies created in docker.
+      - "python-3.6-dev:/home/${USER}"
+      # Application stuff from host.
+      - "$PWD/hive:$PWD/hive"
+      - "$PWD/tests:$PWD/tests"
+      - "$PWD/hive.conf:$PWD/hive.conf"
+      - "$PWD/pyproject.toml:$PWD/pyproject.toml"
+      - "$PWD/README.md:$PWD/README.md"
+      - "$PWD/setup.cfg:$PWD/setup.cfg"
+      - "$PWD/setup.py:$PWD/setup.py"
+      - "$PWD/tox.ini:$PWD/tox.ini"
+
+
+  python-3.6:
+    image: hivemind/python:3.6
+    build:
+      context: .
+      dockerfile: ./scripts/ci/python/3.6/Dockerfile
+      args:
+        - user=worker
+    user: worker
+    shm_size: 2g
+    volumes:
+      # Sockets of postgres servers on host.
+      - "/var/run/postgresql:/var/run/postgresql"
+
+
+  python-3.8:
+    image: hivemind/python:3.8
+    build:
+      context: .
+      dockerfile: ./scripts/ci/python/3.8/Dockerfile
+      args:
+        - user=worker
+    user: worker
+    shm_size: 2g
+    volumes:
+      # Sockets of postgres servers on host.
+      - "/var/run/postgresql:/var/run/postgresql"
+
+
+  postgres-10:
+    image: hivemind/postgres:10
+    restart: unless-stopped
+    build:
+      context: .
+      dockerfile: ./scripts/ci/postgres/10/Dockerfile
+    environment:
+      - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
+    volumes:
+      - $PWD/$POSTGRES_10_CONF_FILE:/etc/postgresql/postgresql.conf:ro
+      - postgres-10-run:/var/run/postgresql
+    ports:
+      - "${POSTGRES_10_PUBLISHED_PORT}:5432"
+    shm_size: 12g
+    command: [
+      "postgres",
+      "-c", "config_file=/etc/postgresql/postgresql.conf"
+    ]
+
+
+  postgres-12:
+    image: hivemind/postgres:12
+    restart: unless-stopped
+    build:
+      context: .
+      dockerfile: ./scripts/ci/postgres/12/Dockerfile
+    environment:
+      - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
+    volumes:
+      - $PWD/$POSTGRES_12_CONF_FILE:/etc/postgresql/postgresql.conf:ro
+      - postgres-12-run:/var/run/postgresql
+    ports:
+      - "${POSTGRES_12_PUBLISHED_PORT}:5432"
+    shm_size: 12g
+    command: [
+      "postgres",
+      "-c", "config_file=/etc/postgresql/postgresql.conf"
+    ]
+
+
+  hived-node:
+    image: $HIVED_IMAGE
+    restart: unless-stopped
+    ports:
+      - "$HIVED_PUBLISHED_WS_PORT:8090" # websocket
+      - "$HIVED_PUBLISHED_HTTP_PORT:8091"
+    shm_size: 12g
+    entrypoint: /usr/local/hive/consensus/entrypoint.sh
+    command: [
+      "--replay-blockchain",
+      "--stop-replay-at-block 5000000"
+    ]
+    volumes:
+      - $PWD/scripts/ci/hived-node/entrypoint.sh:/usr/local/hive/consensus/entrypoint.sh
+      - $PWD/scripts/ci/hived-node/config.ini:/usr/local/hive/consensus/datadir/config.ini
+      - ${HIVED_BLOCK_LOG_FILE}:/usr/local/hive/consensus/datadir/blockchain/block_log
+      - hived-node-datadir:/usr/local/hive/consensus/datadir
+
+
+volumes:
+  postgres-10-run:
+  postgres-12-run:
+  hived-node-datadir:
+  python-3.6-dev:
diff --git a/hive/cli.py b/hive/cli.py
index 8d617960990622866ad41e05b32a80cf51e113f7..1278641b612afd6990c454197941c7badae64019 100755
--- a/hive/cli.py
+++ b/hive/cli.py
@@ -1,19 +1,65 @@
-#!/usr/local/bin/python3
+#!/usr/bin/env python3
 
 """CLI service router"""
 
+import os
 import logging
+import time
 from hive.conf import Conf
 from hive.db.adapter import Db
+from hive.utils.stats import PrometheusClient
+
+
+def setup_logging(conf):
+    """Setup logging with timestamps"""
+
+    timestamp = conf.get('log_timestamp')
+    epoch = conf.get('log_epoch')
+    if timestamp and epoch:
+        datefmt='%Y-%m-%d %H:%M:%S'
+        timezone = time.strftime('%z')
+        fmt = '%(asctime)s.%(msecs)03d{} %(created).6f ' \
+            '%(levelname)s - %(name)s - %(message)s'.format(timezone)
+        logging.basicConfig(format=fmt, datefmt=datefmt)
+    elif timestamp:
+        datefmt='%Y-%m-%d %H:%M:%S'
+        timezone = time.strftime('%z')
+        fmt = '%(asctime)s.%(msecs)03d{} ' \
+            '%(levelname)s - %(name)s - %(message)s'.format(timezone)
+        logging.basicConfig(format=fmt, datefmt=datefmt)
+    elif epoch:
+        fmt = '%(created).6f %(levelname)s - %(name)s - %(message)s'
+        logging.basicConfig(format=fmt)
+    else:
+        fmt = '%(levelname)s - %(name)s - %(message)s'
+        logging.basicConfig(format=fmt)
 
-logging.basicConfig()
 
 def run():
     """Run the service specified in the `--mode` argument."""
 
     conf = Conf.init_argparse()
-    Db.set_shared_instance(conf.db())
     mode = conf.mode()
+    PrometheusClient( conf.get('prometheus_port') )
+
+    setup_logging(conf)
+
+    if mode == 'completion':
+        conf.generate_completion()
+        return
+
+    Db.set_shared_instance(conf.db())
+
+    pid_file_name = conf.pid_file()
+    if pid_file_name is not None:
+        fh = open(pid_file_name, 'w')
+        if fh is None:
+          print("Cannot write into specified pid_file: %s", pid_file_name)
+        else:
+            pid = os.getpid()
+            fh.write(str(pid))
+            fh.close()
+
 
     if conf.get('test_profile'):
         from hive.utils.profiler import Profiler
@@ -22,7 +68,6 @@ def run():
     else:
         launch_mode(mode, conf)
 
-
 def launch_mode(mode, conf):
     """Launch a routine as indicated by `mode`."""
     if mode == 'server':
diff --git a/hive/conf.py b/hive/conf.py
index a902e87091a5633e9904c519de5275d148d827a0..94753bc204562493a21c2ba47a253677c35e362f 100644
--- a/hive/conf.py
+++ b/hive/conf.py
@@ -34,13 +34,15 @@ class Conf():
         add('--database-url', env_var='DATABASE_URL', required=False, help='database connection url', default='')
         add('--steemd-url', env_var='STEEMD_URL', required=False, help='steemd/jussi endpoint', default='{"default" : "https://api.hive.blog"}')
         add('--muted-accounts-url', env_var='MUTED_ACCOUNTS_URL', required=False, help='url to flat list of muted accounts', default='https://raw.githubusercontent.com/hivevectordefense/irredeemables/master/full.txt')
-        add('--blacklist-api-url', env_var='BLACKLIST_API_URL', required=False, help='url to acccess blacklist api', default='https://blacklist.usehive.com')
+        add('--blacklist-api-url', env_var='BLACKLIST_API_URL', required=False, help='url to access blacklist api', default='https://blacklist.usehive.com')
+
         # server
         add('--http-server-port', type=int, env_var='HTTP_SERVER_PORT', default=8080)
+        add('--prometheus-port', type=int, env_var='PROMETHEUS_PORT', required=False, help='if specified, runs prometheus deamon on specified port, which provide statistic and performance data')
 
         # sync
-        add('--max-workers', type=int, env_var='MAX_WORKERS', help='max workers for batch requests', default=4)
-        add('--max-batch', type=int, env_var='MAX_BATCH', help='max chunk size for batch requests', default=50)
+        add('--max-workers', type=int, env_var='MAX_WORKERS', help='max workers for batch requests', default=6)
+        add('--max-batch', type=int, env_var='MAX_BATCH', help='max chunk size for batch requests', default=35)
         add('--trail-blocks', type=int, env_var='TRAIL_BLOCKS', help='number of blocks to trail head by', default=2)
         add('--sync-to-s3', type=strtobool, env_var='SYNC_TO_S3', help='alternative healthcheck for background sync service', default=False)
 
@@ -49,17 +51,63 @@ class Conf():
         add('--test-disable-sync', type=strtobool, env_var='TEST_DISABLE_SYNC', help='(debug) skip sync and sweep; jump to block streaming', default=False)
         add('--test-max-block', type=int, env_var='TEST_MAX_BLOCK', help='(debug) only sync to given block, for running sync test', default=None)
         add('--test-profile', type=strtobool, env_var='TEST_PROFILE', help='(debug) profile execution', default=False)
+        add('--log-virtual-op-calls', type=strtobool, env_var='LOG_VIRTUAL_OP_CALLS', help='(debug) log virtual op calls and responses', default=False)
+        add('--mock-block-data-path', type=str, nargs='+', env_var='MOCK_BLOCK_DATA_PATH', help='(debug/testing) load additional data from block data file')
+        add('--mock-vops-data-path', type=str, env_var='MOCK_VOPS_DATA_PATH', help='(debug/testing) load additional data from virtual operations data file')
+        add('--community-start-block', type=int, env_var='COMMUNITY_START_BLOCK', default=37500000)
+
+        # logging
+        add('--log-timestamp', help='Output timestamp in log', action='store_true')
+        add('--log-epoch', help='Output unix epoch in log', action='store_true')
+        add('--log-mask-sensitive-data', help='Mask sensitive data, e.g. passwords', action='store_true')
+
+        add('--pid-file', type=str, env_var='PID_FILE', help='Allows to dump current process pid into specified file', default=None)
+
+        add('--auto-http-server-port', nargs='+', type=int, help='Hivemind will listen on first available port from this range')
 
         # needed for e.g. tests - other args may be present
         args = (parser.parse_args() if strict
                 else parser.parse_known_args()[0])
-        conf = Conf(args=vars(args))
+
+        conf = Conf(args=vars(args), arguments=parser._actions)
 
         # configure logger and print config
         root = logging.getLogger()
         root.setLevel(conf.log_level())
-        root.info("loaded configuration:\n%s",
-                  _sanitized_conf(parser))
+
+        try:
+            if 'auto_http_server_port' in vars(args) and vars(args)['auto_http_server_port'] is not None:
+                port_range = vars(args)['auto_http_server_port']
+                port_range_len = len(port_range)
+                if port_range_len == 0 or port_range_len > 2:
+                    raise ValueError("auto-http-server-port expect maximum two values, minimum one")
+                if port_range_len == 2 and port_range[0] > port_range[1]:
+                    raise ValueError("port min value is greater than port max value")
+        except Exception as ex:
+            root.error("Value error: {}".format(ex))
+            exit(1)
+
+        # Print command line args, but on continuous integration server
+        # hide db connection string.
+        from sys import argv
+        if conf.get('log_mask_sensitive_data'):
+            my_args = []
+            upcoming_connection_string = False
+            for elem in argv[1:]:
+                if upcoming_connection_string:
+                    upcoming_connection_string = False
+                    my_args.append('MASKED')
+                    continue
+                if elem == '--database-url':
+                    upcoming_connection_string = True
+                my_args.append(elem)
+            root.info("Used command line args: %s", " ".join(my_args))
+        else:
+            root.info("Used command line args: %s", " ".join(argv[1:]))
+
+        # uncomment for full list of program args
+        #args_list = ["--" + k + " " + str(v) for k,v in vars(args).items()]
+        #root.info("Full command line args: %s", " ".join(args_list))
 
         if conf.mode() == 'server':
             #DbStats.SLOW_QUERY_MS = 750
@@ -72,11 +120,12 @@ class Conf():
         """Initialize hive config for testing."""
         return cls.init_argparse(strict=False)
 
-    def __init__(self, args, env=None):
+    def __init__(self, args, env=None, arguments=None):
         self._args = args
         self._env = env
         self._db = None
         self._steem = None
+        self.arguments = arguments
 
     def args(self):
         """Get the raw Namespace object as generated by configargparse"""
@@ -118,3 +167,24 @@ class Conf():
     def log_level(self):
         """Get `logger`s internal int level from config string."""
         return int_log_level(self.get('log_level'))
+
+    def pid_file(self):
+        """Get optional pid_file name to put current process pid in"""
+        return self._args.get("pid_file", None)
+
+    def generate_completion(self):
+        arguments = []
+        for arg in self.arguments:
+            arguments.extend(arg.option_strings)
+        arguments = " ".join(arguments)
+        with open('hive-completion.bash', 'w') as file:
+            file.writelines([
+                "#!/bin/bash\n",
+                "# to run type: source hive-completion.bash\n\n",
+                "# if you want to have completion everywhere, execute theese commands\n",
+                "# ln $PWD/hive-completion.bash $HOME/.local/\n",
+                '# echo "source $HOME/.local/hive-completion.bash" >> $HOME/.bashrc\n',
+                "# source $HOME/.bashrc\n\n"
+                f'complete -f -W "{arguments}" hive\n',
+                "\n"
+            ])
diff --git a/hive/db/adapter.py b/hive/db/adapter.py
index c4f63262ee56d57032e3cbef515946b01d3cc25c..68c26621f6204995a9c1bb54b822c4696b231774 100644
--- a/hive/db/adapter.py
+++ b/hive/db/adapter.py
@@ -5,6 +5,7 @@ from time import perf_counter as perf
 from collections import OrderedDict
 from funcy.seqs import first
 import sqlalchemy
+import os
 
 from hive.utils.stats import Stats
 
@@ -50,12 +51,21 @@ class Db:
         self._exec = self._conn.execute
         self._exec(sqlalchemy.text("COMMIT"))
 
+    def clone(self):
+        return Db(self._url)
+
     def engine(self):
         """Lazy-loaded SQLAlchemy engine."""
         if not self._engine:
+            pool_size = os.cpu_count()
+            if pool_size > 5:
+                pool_size = pool_size - 1
+            else:
+                pool_size = 5
             self._engine = sqlalchemy.create_engine(
                 self._url,
                 isolation_level="READ UNCOMMITTED", # only supported in mysql
+                pool_size=pool_size,
                 pool_recycle=3600,
                 echo=False)
         return self._engine
@@ -78,6 +88,9 @@ class Db:
         assert self._is_write_query(sql), sql
         return self._query(sql, **kwargs)
 
+    def query_no_return(self, sql, **kwargs):
+        self._query(sql, **kwargs)
+
     def query_all(self, sql, **kwargs):
         """Perform a `SELECT n*m`"""
         res = self._query(sql, **kwargs)
@@ -154,11 +167,12 @@ class Db:
         return (sql, values)
 
     def _sql_text(self, sql):
-        if sql in self._prep_sql:
-            query = self._prep_sql[sql]
-        else:
-            query = sqlalchemy.text(sql).execution_options(autocommit=False)
-            self._prep_sql[sql] = query
+#        if sql in self._prep_sql:
+#            query = self._prep_sql[sql]
+#        else:
+#            query = sqlalchemy.text(sql).execution_options(autocommit=False)
+#            self._prep_sql[sql] = query
+        query = sqlalchemy.text(sql).execution_options(autocommit=False)
         return query
 
     def _query(self, sql, **kwargs):
@@ -173,7 +187,11 @@ class Db:
         try:
             start = perf()
             query = self._sql_text(sql)
+            if 'log_query' in kwargs and kwargs['log_query']:
+                log.info("QUERY: {}".format(query))
             result = self._exec(query, **kwargs)
+            if 'log_result' in kwargs and kwargs['log_result']:
+                log.info("RESULT: {}".format(result))
             Stats.log_db(sql, perf() - start)
             return result
         except Exception as e:
diff --git a/hive/db/db_state.py b/hive/db/db_state.py
index 9d49ae1b5f872cdb41925984617f2dde103b7a7b..848e55f6f968dd596a70a23a4819a22d81f93c9a 100644
--- a/hive/db/db_state.py
+++ b/hive/db/db_state.py
@@ -3,14 +3,25 @@
 #pylint: disable=too-many-lines
 
 import time
+from time import perf_counter
+
 import logging
+import sqlalchemy
+
 
-from hive.db.schema import (setup, reset_autovac, build_metadata,
+from hive.db.schema import (setup, reset_autovac, set_logged_table_attribute, build_metadata,
                             build_metadata_community, teardown, DB_VERSION)
 from hive.db.adapter import Db
 
+from hive.utils.post_active import update_active_starting_from_posts_on_block
+from hive.utils.communities_rank import update_communities_posts_and_rank
+
+from hive.server.common.payout_stats import PayoutStats
+
 log = logging.getLogger(__name__)
 
+SYNCED_BLOCK_LIMIT = 7*24*1200 # 7 days
+
 class DbState:
     """Manages database state: sync status, migrations, etc."""
 
@@ -37,17 +48,13 @@ class DbState:
         if not cls._is_schema_loaded():
             log.info("[INIT] Create db schema...")
             setup(cls.db())
-            cls._before_initial_sync()
 
         # perform db migrations
         cls._check_migrations()
 
         # check if initial sync complete
-        cls._is_initial_sync = cls._is_feed_cache_empty()
-        if cls._is_initial_sync:
-            log.info("[INIT] Continue with initial sync...")
-        else:
-            log.info("[INIT] Hive initialized.")
+        cls._is_initial_sync = True
+        log.info("[INIT] Continue with initial sync...")
 
     @classmethod
     def teardown(cls):
@@ -62,10 +69,10 @@ class DbState:
         return cls._db
 
     @classmethod
-    def finish_initial_sync(cls):
+    def finish_initial_sync(cls, current_imported_block):
         """Set status to initial sync complete."""
         assert cls._is_initial_sync, "initial sync was not started."
-        cls._after_initial_sync()
+        cls._after_initial_sync(current_imported_block)
         cls._is_initial_sync = False
         log.info("[INIT] Initial sync complete!")
 
@@ -85,28 +92,55 @@ class DbState:
     @classmethod
     def _disableable_indexes(cls):
         to_locate = [
-            'hive_posts_ix3', # (author, depth, id)
-            'hive_posts_ix4', # (parent_id, id, is_deleted=0)
-            'hive_posts_ix5', # (community_id>0, is_pinned=1)
+            'hive_blocks_created_at_idx',
+
+            'hive_feed_cache_block_num_idx',
+            'hive_feed_cache_created_at_idx',
+            'hive_feed_cache_post_id_idx',
+
             'hive_follows_ix5a', # (following, state, created_at, follower)
             'hive_follows_ix5b', # (follower, state, created_at, following)
-            'hive_reblogs_ix1', # (post_id, account, created_at)
-            'hive_posts_cache_ix6a', # (sc_trend, post_id, paidout=0)
-            'hive_posts_cache_ix6b', # (post_id, sc_trend, paidout=0)
-            'hive_posts_cache_ix7a', # (sc_hot, post_id, paidout=0)
-            'hive_posts_cache_ix7b', # (post_id, sc_hot, paidout=0)
-            'hive_posts_cache_ix8', # (category, payout, depth, paidout=0)
-            'hive_posts_cache_ix9a', # (depth, payout, post_id, paidout=0)
-            'hive_posts_cache_ix9b', # (category, depth, payout, post_id, paidout=0)
-            'hive_posts_cache_ix10', # (post_id, payout, gray=1, payout>0)
-            'hive_posts_cache_ix30', # API: community trend
-            'hive_posts_cache_ix31', # API: community hot
-            'hive_posts_cache_ix32', # API: community created
-            'hive_posts_cache_ix33', # API: community payout
-            'hive_posts_cache_ix34', # API: community muted
-            'hive_accounts_ix3', # (vote_weight, name VPO)
-            'hive_accounts_ix4', # (id, name)
-            'hive_accounts_ix5', # (cached_at, name)
+            'hive_follows_block_num_idx',
+            'hive_follows_created_at_idx',
+
+            'hive_posts_parent_id_counter_deleted_id_idx',
+            'hive_posts_depth_idx',
+            'hive_posts_root_id_id_idx',
+
+            'hive_posts_community_id_id_idx',
+            'hive_posts_payout_at_idx',
+            'hive_posts_payout_idx',
+            'hive_posts_promoted_id_idx',
+            'hive_posts_sc_trend_id_idx',
+            'hive_posts_sc_hot_id_idx',
+            'hive_posts_block_num_idx',
+            'hive_posts_block_num_created_idx',
+            'hive_posts_cashout_time_id_idx',
+            'hive_posts_updated_at_idx',
+            'hive_posts_payout_plus_pending_payout_id_idx',
+            'hive_posts_category_id_payout_plus_pending_payout_depth_idx',
+            'hive_posts_tags_ids_idx',
+            'hive_posts_author_id_created_at_id_idx',
+            'hive_posts_author_id_id_idx',
+
+
+            'hive_posts_api_helper_author_s_permlink_idx',
+
+            'hive_votes_voter_id_last_update_idx',
+            'hive_votes_block_num_idx',
+
+            'hive_subscriptions_block_num_idx',
+            'hive_subscriptions_community_idx',
+            'hive_communities_block_num_idx',
+            'hive_reblogs_created_at_idx',
+
+            'hive_votes_voter_id_post_id_idx',
+            'hive_votes_post_id_voter_id_idx',
+
+            'hive_reputation_data_block_num_idx',
+
+            'hive_notification_cache_block_num_idx',
+            'hive_notification_cache_dst_score_idx'
         ]
 
         to_return = []
@@ -123,46 +157,223 @@ class DbState:
         return to_return
 
     @classmethod
-    def _before_initial_sync(cls):
+    def has_index(cls, idx_name):
+        sql = "SELECT count(*) FROM pg_class WHERE relname = :relname"
+        count = cls.db().query_one(sql, relname=idx_name)
+        if count == 1:
+            return True
+        else:
+            return False
+
+    @classmethod
+    def _execute_query(cls, query):
+        time_start = perf_counter()
+   
+        current_work_mem = cls.update_work_mem('2GB')
+        log.info("[INIT] Attempting to execute query: `%s'...", query)
+
+        row = cls.db().query_no_return(query)
+
+        cls.update_work_mem(current_work_mem)
+
+        time_end = perf_counter()
+        log.info("[INIT] Query `%s' done in %.4fs", query, time_end - time_start)
+
+
+    @classmethod
+    def processing_indexes(cls, is_pre_process, drop, create):
+        DB = cls.db()
+        engine = DB.engine()
+        log.info("[INIT] Begin %s-initial sync hooks", "pre" if is_pre_process else "post")
+
+        any_index_created = False
+
+        for index in cls._disableable_indexes():
+            log.info("%s index %s.%s", ("Drop" if is_pre_process else "Recreate"), index.table, index.name)
+            try:
+                if drop:
+                    if cls.has_index(index.name):
+                        time_start = perf_counter()
+                        index.drop(engine)
+                        end_time = perf_counter()
+                        elapsed_time = end_time - time_start
+                        log.info("Index %s dropped in time %.4f s", index.name, elapsed_time)
+            except sqlalchemy.exc.ProgrammingError as ex:
+                log.warning("Ignoring ex: {}".format(ex))
+
+            if create:
+                if cls.has_index(index.name):
+                    log.info("Index %s already exists... Creation skipped.", index.name)
+                else:
+                    time_start = perf_counter()
+                    index.create(engine)
+                    end_time = perf_counter()
+                    elapsed_time = end_time - time_start
+                    log.info("Index %s created in time %.4f s", index.name, elapsed_time)
+                    any_index_created = True
+        if any_index_created:
+            cls._execute_query("ANALYZE")
+
+    @classmethod
+    def before_initial_sync(cls, last_imported_block, hived_head_block):
         """Routine which runs *once* after db setup.
 
         Disables non-critical indexes for faster initial sync, as well
         as foreign key constraints."""
 
-        engine = cls.db().engine()
-        log.info("[INIT] Begin pre-initial sync hooks")
+        to_sync = hived_head_block - last_imported_block
 
-        for index in cls._disableable_indexes():
-            log.info("Drop index %s.%s", index.table, index.name)
-            index.drop(engine)
+        if to_sync < SYNCED_BLOCK_LIMIT:
+            log.info("[INIT] Skipping pre-initial sync hooks")
+            return
+
+        #is_pre_process, drop, create
+        cls.processing_indexes( True, True, False )
+
+        from hive.db.schema import drop_fk, set_logged_table_attribute
+        log.info("Dropping FKs")
+        drop_fk(cls.db())
 
-        # TODO: #111
-        #for key in cls._all_foreign_keys():
-        #    log.info("Drop fk %s", key.name)
-        #    key.drop(engine)
+        # intentionally disabled since it needs a lot of WAL disk space when switching back to LOGGED
+        #set_logged_table_attribute(cls.db(), False)
 
         log.info("[INIT] Finish pre-initial sync hooks")
 
     @classmethod
-    def _after_initial_sync(cls):
+    def update_work_mem(cls, workmem_value):
+        row = cls.db().query_row("SHOW work_mem")
+        current_work_mem = row['work_mem']
+
+        sql = """
+              DO $$
+              BEGIN
+                EXECUTE 'ALTER DATABASE '||current_database()||' SET work_mem TO "{}"';
+              END
+              $$;
+              """
+        cls.db().query_no_return(sql.format(workmem_value))
+
+        return current_work_mem
+
+    @classmethod
+    def _after_initial_sync(cls, current_imported_block):
         """Routine which runs *once* after initial sync.
 
         Re-creates non-core indexes for serving APIs after init sync,
         as well as all foreign keys."""
 
-        engine = cls.db().engine()
-        log.info("[INIT] Begin post-initial sync hooks")
+        last_imported_block = DbState.db().query_one("SELECT block_num FROM hive_state LIMIT 1")
 
-        for index in cls._disableable_indexes():
-            log.info("Create index %s.%s", index.table, index.name)
-            index.create(engine)
+        log.info("[INIT] Current imported block: %s. Last imported block: %s.", current_imported_block, last_imported_block)
+        if last_imported_block > current_imported_block:
+          last_imported_block = current_imported_block
+
+        synced_blocks = current_imported_block - last_imported_block
+
+        force_index_rebuild = False
+        massive_sync_preconditions = False
+        if synced_blocks >= SYNCED_BLOCK_LIMIT:
+            force_index_rebuild = True
+            massive_sync_preconditions = True
+
+        def vacuum_hive_posts(cls):
+            if massive_sync_preconditions:
+                cls._execute_query("VACUUM ANALYZE hive_posts")
+
+        #is_pre_process, drop, create
+        cls.processing_indexes( False, force_index_rebuild, True )
+   
+        if massive_sync_preconditions:
+            # Update count of all child posts (what was hold during initial sync)
+            cls._execute_query("select update_all_hive_posts_children_count()")
+        else:
+            # Update count of child posts processed during partial sync (what was hold during initial sync)
+            sql = "select update_hive_posts_children_count({}, {})".format(last_imported_block, current_imported_block)
+            cls._execute_query(sql)
+
+        vacuum_hive_posts(cls)
+
+        time_start = perf_counter()
+        # Update root_id all root posts
+        sql = """
+              select update_hive_posts_root_id({}, {})
+              """.format(last_imported_block, current_imported_block)
+        cls._execute_query(sql)
+
+        vacuum_hive_posts(cls)
+
+        # Update root_id all root posts
+        sql = """
+              select update_hive_posts_api_helper({}, {})
+              """.format(last_imported_block, current_imported_block)
+        cls._execute_query(sql)
+
+        time_start = perf_counter()
+
+        log.info("[INIT] Attempting to execute update_all_posts_active...")
+        update_active_starting_from_posts_on_block(last_imported_block, current_imported_block)
+
+        time_end = perf_counter()
+        log.info("[INIT] update_all_posts_active executed in %.4fs", time_end - time_start)
 
-        # TODO: #111
-        #for key in cls._all_foreign_keys():
-        #    log.info("Create fk %s", key.name)
-        #    key.create(engine)
+        vacuum_hive_posts(cls)
+
+        sql = """
+            SELECT update_feed_cache({}, {});
+        """.format(last_imported_block, current_imported_block)
+        cls._execute_query(sql)
+
+        sql = """
+            SELECT update_hive_posts_mentions({}, {});
+        """.format(last_imported_block, current_imported_block)
+        cls._execute_query(sql)
+
+        time_start = perf_counter()
+        PayoutStats.generate()
+        time_end = perf_counter()
+        log.info("[INIT] filling payout_stats_view executed in %.4fs", time_end - time_start)
+
+        sql = """
+              SELECT update_account_reputations({}, {}, True);
+              """.format(last_imported_block, current_imported_block)
+        cls._execute_query(sql)
+
+        log.info("[INIT] Attempting to execute update_communities_posts_and_rank...")
+        time_start = perf_counter()
+        update_communities_posts_and_rank()
+        time_end = perf_counter()
+        log.info("[INIT] update_communities_posts_and_rank executed in %.4fs", time_end - time_start)
+
+        sql = """
+              SELECT update_posts_rshares({}, {});
+              """.format(last_imported_block, current_imported_block)
+        cls._execute_query(sql)
+
+        vacuum_hive_posts(cls)
+
+        sql = """
+              SELECT update_notification_cache(NULL, NULL, False);
+              """
+        cls._execute_query(sql)
+
+        sql = """
+              SELECT update_follow_count({}, {});
+              """.format(last_imported_block, current_imported_block)
+        cls._execute_query(sql)
+
+        # Update a block num immediately
+        cls.db().query_no_return("UPDATE hive_state SET block_num = :block_num", block_num = current_imported_block)
+
+        if massive_sync_preconditions:
+            from hive.db.schema import create_fk, set_logged_table_attribute
+            # intentionally disabled since it needs a lot of WAL disk space when switching back to LOGGED
+            #set_logged_table_attribute(cls.db(), True)
+
+            log.info("Recreating FKs")
+            create_fk(cls.db())
+
+            cls._execute_query("VACUUM ANALYZE")
 
-        log.info("[INIT] Finish post-initial sync hooks")
 
     @staticmethod
     def status():
@@ -212,7 +423,6 @@ class DbState:
             cls._set_ver(3)
 
         if cls._ver == 3:
-            cls.db().query("CREATE INDEX hive_accounts_ix3 ON hive_accounts (vote_weight, name varchar_pattern_ops)")
             cls._set_ver(4)
 
         if cls._ver == 4:
@@ -225,22 +435,21 @@ class DbState:
             from hive.indexer.accounts import Accounts
             names = SteemClient().get_all_account_names()
             Accounts.load_ids()
-            Accounts.register(names, '1970-01-01T00:00:00')
+            Accounts.register(names, None, '1970-01-01T00:00:00', 0)
             Accounts.clear_ids()
             cls._set_ver(6)
 
         if cls._ver == 6:
             cls.db().query("DROP INDEX hive_posts_cache_ix6")
-            cls.db().query("CREATE INDEX hive_posts_cache_ix6a ON hive_posts_cache (sc_trend, post_id) WHERE is_paidout = '0'")
-            cls.db().query("CREATE INDEX hive_posts_cache_ix6b ON hive_posts_cache (post_id, sc_trend) WHERE is_paidout = '0'")
-            cls.db().query("DROP INDEX hive_posts_cache_ix7")
-            cls.db().query("CREATE INDEX hive_posts_cache_ix7a ON hive_posts_cache (sc_hot, post_id) WHERE is_paidout = '0'")
-            cls.db().query("CREATE INDEX hive_posts_cache_ix7b ON hive_posts_cache (post_id, sc_hot) WHERE is_paidout = '0'")
+            #cls.db().query("CREATE INDEX hive_posts_cache_ix6a ON hive_posts_cache (sc_trend, post_id) WHERE is_paidout = '0'")
+            #cls.db().query("CREATE INDEX hive_posts_cache_ix6b ON hive_posts_cache (post_id, sc_trend) WHERE is_paidout = '0'")
+            #cls.db().query("DROP INDEX hive_posts_cache_ix7")
+            #cls.db().query("CREATE INDEX hive_posts_cache_ix7a ON hive_posts_cache (sc_hot, post_id) WHERE is_paidout = '0'")
+            #cls.db().query("CREATE INDEX hive_posts_cache_ix7b ON hive_posts_cache (post_id, sc_hot) WHERE is_paidout = '0'")
             cls._set_ver(7)
 
         if cls._ver == 7:
-            cls.db().query("CREATE INDEX hive_accounts_ix4 ON hive_accounts (id, name)")
-            cls.db().query("CREATE INDEX hive_accounts_ix5 ON hive_accounts (cached_at, name)")
+            cls.db().query("DROP INDEX IF EXISTS hive_accounts_ix4; CREATE INDEX hive_accounts_ix4 ON hive_accounts (id, name)")
             cls._set_ver(8)
 
         if cls._ver == 8:
@@ -252,21 +461,21 @@ class DbState:
             cls._set_ver(9)
 
         if cls._ver == 9:
-            from hive.indexer.follow import Follow
-            Follow.force_recount()
+            #from hive.indexer.follow import Follow
+            #Follow.force_recount()
             cls._set_ver(10)
 
         if cls._ver == 10:
-            cls.db().query("CREATE INDEX hive_posts_cache_ix8 ON hive_posts_cache (category, payout, depth) WHERE is_paidout = '0'")
-            cls.db().query("CREATE INDEX hive_posts_cache_ix9a ON hive_posts_cache (depth, payout, post_id) WHERE is_paidout = '0'")
-            cls.db().query("CREATE INDEX hive_posts_cache_ix9b ON hive_posts_cache (category, depth, payout, post_id) WHERE is_paidout = '0'")
+            #cls.db().query("CREATE INDEX hive_posts_cache_ix8 ON hive_posts_cache (category, payout, depth) WHERE is_paidout = '0'")
+            #cls.db().query("CREATE INDEX hive_posts_cache_ix9a ON hive_posts_cache (depth, payout, post_id) WHERE is_paidout = '0'")
+            #cls.db().query("CREATE INDEX hive_posts_cache_ix9b ON hive_posts_cache (category, depth, payout, post_id) WHERE is_paidout = '0'")
             cls._set_ver(11)
 
         if cls._ver == 11:
             cls.db().query("DROP INDEX hive_posts_ix1")
             cls.db().query("DROP INDEX hive_posts_ix2")
-            cls.db().query("CREATE INDEX hive_posts_ix3 ON hive_posts (author, depth, id) WHERE is_deleted = '0'")
-            cls.db().query("CREATE INDEX hive_posts_ix4 ON hive_posts (parent_id, id) WHERE is_deleted = '0'")
+            cls.db().query("CREATE INDEX hive_posts_ix3 ON hive_posts (author, depth, id) WHERE counter_deleted = 0")
+            cls.db().query("CREATE INDEX hive_posts_ix4 ON hive_posts (parent_id, id) WHERE counter_deleted = 0")
             cls._set_ver(12)
 
         if cls._ver == 12: # community schema
@@ -285,14 +494,8 @@ class DbState:
             cls._set_ver(13)
 
         if cls._ver == 13:
-            sqls = ("CREATE INDEX hive_posts_ix5 ON hive_posts (id) WHERE is_pinned = '1' AND is_deleted = '0'",
-                    "CREATE INDEX hive_posts_ix6 ON hive_posts (community_id, id) WHERE community_id IS NOT NULL AND is_pinned = '1' AND is_deleted = '0'",
-                    "CREATE INDEX hive_posts_cache_ix10 ON hive_posts_cache (post_id, payout) WHERE is_grayed = '1' AND payout > 0",
-                    "CREATE INDEX hive_posts_cache_ix30 ON hive_posts_cache (community_id, sc_trend,   post_id) WHERE community_id IS NOT NULL AND is_grayed = '0' AND depth = 0",
-                    "CREATE INDEX hive_posts_cache_ix31 ON hive_posts_cache (community_id, sc_hot,     post_id) WHERE community_id IS NOT NULL AND is_grayed = '0' AND depth = 0",
-                    "CREATE INDEX hive_posts_cache_ix32 ON hive_posts_cache (community_id, created_at, post_id) WHERE community_id IS NOT NULL AND is_grayed = '0' AND depth = 0",
-                    "CREATE INDEX hive_posts_cache_ix33 ON hive_posts_cache (community_id, payout,     post_id) WHERE community_id IS NOT NULL AND is_grayed = '0' AND is_paidout = '0'",
-                    "CREATE INDEX hive_posts_cache_ix34 ON hive_posts_cache (community_id, payout,     post_id) WHERE community_id IS NOT NULL AND is_grayed = '1' AND is_paidout = '0'")
+            sqls = ("CREATE INDEX hive_posts_ix5 ON hive_posts (id) WHERE is_pinned = '1' AND counter_deleted = 0",
+                    "CREATE INDEX hive_posts_ix6 ON hive_posts (community_id, id) WHERE community_id IS NOT NULL AND is_pinned = '1' AND counter_deleted = 0",)
             for sql in sqls:
                 cls.db().query(sql)
             cls._set_ver(14)
@@ -302,7 +505,7 @@ class DbState:
             cls.db().query("ALTER TABLE hive_communities ADD COLUMN category    VARCHAR(32)   NOT NULL DEFAULT ''")
             cls.db().query("ALTER TABLE hive_communities ADD COLUMN avatar_url  VARCHAR(1024) NOT NULL DEFAULT ''")
             cls.db().query("ALTER TABLE hive_communities ADD COLUMN num_authors INTEGER       NOT NULL DEFAULT 0")
-            cls.db().query("CREATE INDEX hive_posts_cache_ix20 ON hive_posts_cache (community_id, author, payout, post_id) WHERE is_paidout = '0'")
+            #cls.db().query("CREATE INDEX hive_posts_cache_ix20 ON hive_posts_cache (community_id, author, payout, post_id) WHERE is_paidout = '0'")
             cls._set_ver(15)
 
         if cls._ver == 15:
@@ -315,6 +518,12 @@ class DbState:
             cls.db().query("CREATE INDEX hive_communities_ft1 ON hive_communities USING GIN (to_tsvector('english', title || ' ' || about))")
             cls._set_ver(17)
 
+        if cls._ver == 17:
+            cls.db().query("INSERT INTO hive_accounts (name, created_at) VALUES ('', '1970-01-01T00:00:00') ON CONFLICT (name) DO NOTHING")
+            cls.db().query("INSERT INTO hive_permlink_data (permlink) VALUES ('') ON CONFLICT (permlink) DO NOTHING")
+            cls.db().query("INSERT INTO hive_category_data (category) VALUES ('') ON CONFLICT (category) DO NOTHING")
+            cls._set_ver(18)
+
         reset_autovac(cls.db())
 
         log.info("[HIVE] db version: %d", cls._ver)
diff --git a/hive/db/schema.py b/hive/db/schema.py
index 250916fadb9d4c2f008b00528b06300781a86b63..0a58bbbb1eb55c8cf4b98b7e5cbd4bc5fea51118 100644
--- a/hive/db/schema.py
+++ b/hive/db/schema.py
@@ -8,9 +8,13 @@ from sqlalchemy.types import VARCHAR
 from sqlalchemy.types import TEXT
 from sqlalchemy.types import BOOLEAN
 
+import logging
+log = logging.getLogger(__name__)
+
 #pylint: disable=line-too-long, too-many-lines, bad-whitespace
 
-DB_VERSION = 17
+# [DK] we changed and removed some tables so i upgraded DB_VERSION to 18
+DB_VERSION = 18
 
 def build_metadata():
     """Build schema def with SqlAlchemy"""
@@ -27,108 +31,228 @@ def build_metadata():
 
         sa.UniqueConstraint('hash', name='hive_blocks_ux1'),
         sa.ForeignKeyConstraint(['prev'], ['hive_blocks.hash'], name='hive_blocks_fk1'),
+        sa.Index('hive_blocks_created_at_idx', 'created_at')
     )
 
     sa.Table(
         'hive_accounts', metadata,
         sa.Column('id', sa.Integer, primary_key=True),
-        sa.Column('name', VARCHAR(16), nullable=False),
+        sa.Column('name', VARCHAR(16, collation='C'), nullable=False),
         sa.Column('created_at', sa.DateTime, nullable=False),
         #sa.Column('block_num', sa.Integer, nullable=False),
-        sa.Column('reputation', sa.Float(precision=6), nullable=False, server_default='25'),
-
-        sa.Column('display_name', sa.String(20)),
-        sa.Column('about', sa.String(160)),
-        sa.Column('location', sa.String(30)),
-        sa.Column('website', sa.String(100)),
-        sa.Column('profile_image', sa.String(1024), nullable=False, server_default=''),
-        sa.Column('cover_image', sa.String(1024), nullable=False, server_default=''),
-
+        sa.Column('reputation', sa.BigInteger, nullable=False, server_default='0'),
+        sa.Column('is_implicit', sa.Boolean, nullable=False, server_default='1'),
         sa.Column('followers', sa.Integer, nullable=False, server_default='0'),
         sa.Column('following', sa.Integer, nullable=False, server_default='0'),
 
-        sa.Column('proxy', VARCHAR(16), nullable=False, server_default=''),
-        sa.Column('post_count', sa.Integer, nullable=False, server_default='0'),
-        sa.Column('proxy_weight', sa.Float(precision=6), nullable=False, server_default='0'),
-        sa.Column('vote_weight', sa.Float(precision=6), nullable=False, server_default='0'),
-        sa.Column('kb_used', sa.Integer, nullable=False, server_default='0'), # deprecated
         sa.Column('rank', sa.Integer, nullable=False, server_default='0'),
 
         sa.Column('lastread_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),
-        sa.Column('active_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),
-        sa.Column('cached_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),
-        sa.Column('raw_json', sa.Text),
-
+        sa.Column('posting_json_metadata', sa.Text),
+        sa.Column('json_metadata', sa.Text),
 
         sa.UniqueConstraint('name', name='hive_accounts_ux1'),
-        sa.Index('hive_accounts_ix1', 'vote_weight', 'id'), # core: quick ranks
-        sa.Index('hive_accounts_ix2', 'name', 'id'), # core: quick id map
-        sa.Index('hive_accounts_ix3', 'vote_weight', 'name', postgresql_ops=dict(name='varchar_pattern_ops')), # API: lookup
-        sa.Index('hive_accounts_ix4', 'id', 'name'), # API: quick filter/sort
-        sa.Index('hive_accounts_ix5', 'cached_at', 'name'), # core/listen sweep
+        sa.Index('hive_accounts_ix6', 'reputation')
     )
 
-    hive_posts = sa.Table(
+    sa.Table(
+        'hive_reputation_data', metadata,
+        sa.Column('id', sa.Integer, primary_key=True),
+        sa.Column('author_id', sa.Integer, nullable=False),
+        sa.Column('voter_id', sa.Integer, nullable=False),
+        sa.Column('permlink', sa.String(255, collation='C'), nullable=False),
+        sa.Column('rshares', sa.BigInteger, nullable=False),
+        sa.Column('block_num', sa.Integer,  nullable=False),
+
+        sa.Index('hive_reputation_data_author_permlink_voter_idx', 'author_id', 'permlink', 'voter_id'),
+        sa.Index('hive_reputation_data_block_num_idx', 'block_num')
+    )
+
+    sa.Table(
         'hive_posts', metadata,
         sa.Column('id', sa.Integer, primary_key=True),
-        sa.Column('parent_id', sa.Integer),
-        sa.Column('author', VARCHAR(16), nullable=False),
-        sa.Column('permlink', VARCHAR(255), nullable=False),
-        sa.Column('category', VARCHAR(255), nullable=False, server_default=''),
+        sa.Column('root_id', sa.Integer, nullable=False), # records having initially set 0 will be updated to their id
+        sa.Column('parent_id', sa.Integer, nullable=False),
+        sa.Column('author_id', sa.Integer, nullable=False),
+        sa.Column('permlink_id', sa.Integer, nullable=False),
+        sa.Column('category_id', sa.Integer, nullable=False),
         sa.Column('community_id', sa.Integer, nullable=True),
         sa.Column('created_at', sa.DateTime, nullable=False),
         sa.Column('depth', SMALLINT, nullable=False),
-        sa.Column('is_deleted', BOOLEAN, nullable=False, server_default='0'),
+        sa.Column('counter_deleted', sa.Integer, nullable=False, server_default='0'),
         sa.Column('is_pinned', BOOLEAN, nullable=False, server_default='0'),
         sa.Column('is_muted', BOOLEAN, nullable=False, server_default='0'),
         sa.Column('is_valid', BOOLEAN, nullable=False, server_default='1'),
         sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),
 
-        sa.ForeignKeyConstraint(['author'], ['hive_accounts.name'], name='hive_posts_fk1'),
+        sa.Column('children', sa.Integer, nullable=False, server_default='0'),
+
+        # core stats/indexes
+        sa.Column('payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),
+        sa.Column('pending_payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),
+        sa.Column('payout_at', sa.DateTime, nullable=False, server_default='1970-01-01'),
+        sa.Column('last_payout_at', sa.DateTime, nullable=False, server_default='1970-01-01'),
+        sa.Column('updated_at', sa.DateTime, nullable=False, server_default='1970-01-01'),
+        sa.Column('is_paidout', BOOLEAN, nullable=False, server_default='0'),
+
+        # ui flags/filters
+        sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),
+        sa.Column('is_declined', BOOLEAN, nullable=False, server_default='0'),
+        sa.Column('is_full_power', BOOLEAN, nullable=False, server_default='0'),
+        sa.Column('is_hidden', BOOLEAN, nullable=False, server_default='0'),
+
+        # important indexes
+        sa.Column('sc_trend', sa.Float(precision=6), nullable=False, server_default='0'),
+        sa.Column('sc_hot', sa.Float(precision=6), nullable=False, server_default='0'),
+
+        sa.Column('total_payout_value', sa.String(30), nullable=False, server_default='0.000 HBD'),
+        sa.Column('author_rewards', sa.BigInteger, nullable=False, server_default='0'),
+
+        sa.Column('author_rewards_hive', sa.BigInteger, nullable=False, server_default='0'),
+        sa.Column('author_rewards_hbd', sa.BigInteger, nullable=False, server_default='0'),
+        sa.Column('author_rewards_vests', sa.BigInteger, nullable=False, server_default='0'),
+
+        sa.Column('abs_rshares', sa.Numeric, nullable=False, server_default='0'),
+        sa.Column('vote_rshares', sa.Numeric, nullable=False, server_default='0'),
+        sa.Column('total_vote_weight', sa.Numeric, nullable=False, server_default='0'),
+        sa.Column('total_votes', sa.BigInteger, nullable=False, server_default='0'),
+        sa.Column('net_votes', sa.BigInteger, nullable=False, server_default='0'),
+        sa.Column('active', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),
+        sa.Column('cashout_time', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),
+        sa.Column('percent_hbd', sa.Integer, nullable=False, server_default='10000'),
+
+        sa.Column('curator_payout_value', sa.String(30), nullable=False, server_default='0.000 HBD'),
+        sa.Column('max_accepted_payout',  sa.String(30), nullable=False, server_default='1000000.000 HBD'),
+        sa.Column('allow_votes', BOOLEAN, nullable=False, server_default='1'),
+        sa.Column('allow_curation_rewards', BOOLEAN, nullable=False, server_default='1'),
+        sa.Column('beneficiaries', sa.JSON, nullable=False, server_default='[]'),
+        sa.Column('block_num', sa.Integer,  nullable=False ),
+        sa.Column('block_num_created', sa.Integer,  nullable=False ),
+        sa.Column('tags_ids', sa.ARRAY(sa.Integer),  nullable=True ),
+
+        sa.ForeignKeyConstraint(['author_id'], ['hive_accounts.id'], name='hive_posts_fk1'),
+        sa.ForeignKeyConstraint(['root_id'], ['hive_posts.id'], name='hive_posts_fk2'),
         sa.ForeignKeyConstraint(['parent_id'], ['hive_posts.id'], name='hive_posts_fk3'),
-        sa.UniqueConstraint('author', 'permlink', name='hive_posts_ux1'),
-        sa.Index('hive_posts_ix3', 'author', 'depth', 'id', postgresql_where=sql_text("is_deleted = '0'")), # API: author blog/comments
-        sa.Index('hive_posts_ix5', 'id', postgresql_where=sql_text("is_pinned = '1' AND is_deleted = '0'")), # API: pinned post status
-        sa.Index('hive_posts_ix6', 'community_id', 'id', postgresql_where=sql_text("community_id IS NOT NULL AND is_pinned = '1' AND is_deleted = '0'")), # API: community pinned
+        sa.UniqueConstraint('author_id', 'permlink_id', 'counter_deleted', name='hive_posts_ux1'),
+
+        sa.Index('hive_posts_depth_idx', 'depth'),
+
+        sa.Index('hive_posts_root_id_id_idx', 'root_id','id'),
+
+        sa.Index('hive_posts_parent_id_counter_deleted_id_idx', 'parent_id', 'counter_deleted', 'id'),
+        sa.Index('hive_posts_community_id_id_idx', 'community_id', sa.text('id DESC')),
+
+        sa.Index('hive_posts_payout_at_idx', 'payout_at'),
+        sa.Index('hive_posts_payout_idx', 'payout'),
+        sa.Index('hive_posts_promoted_id_idx', 'promoted', 'id', postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0")),
+        sa.Index('hive_posts_sc_trend_id_idx', 'sc_trend', 'id', postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0 AND depth = 0")),
+        sa.Index('hive_posts_sc_hot_id_idx', 'sc_hot', 'id', postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0 AND depth = 0")),
+        sa.Index('hive_posts_author_id_created_at_id_idx', sa.text('author_id DESC, created_at DESC, id')),
+        sa.Index('hive_posts_author_id_id_idx', 'author_id', 'id', postgresql_where=sql_text('depth = 0')),
+        sa.Index('hive_posts_block_num_idx', 'block_num'),
+        sa.Index('hive_posts_block_num_created_idx', 'block_num_created'),
+        sa.Index('hive_posts_cashout_time_id_idx', 'cashout_time', 'id'),
+        sa.Index('hive_posts_updated_at_idx', sa.text('updated_at DESC')),
+        sa.Index('hive_posts_payout_plus_pending_payout_id_idx', sa.text('(payout+pending_payout), id, is_paidout'), postgresql_where=sql_text("counter_deleted = 0 AND NOT is_paidout")),
+        sa.Index('hive_posts_category_id_payout_plus_pending_payout_depth_idx', sa.text('category_id, (payout+pending_payout), depth'), postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0")),
+        sa.Index('hive_posts_tags_ids_idx', 'tags_ids', postgresql_using="gin", postgresql_ops={'tags_ids': 'gin__int_ops'})
+        )
+
+    sa.Table(
+        'hive_post_data', metadata,
+        sa.Column('id', sa.Integer, primary_key=True, autoincrement=False),
+        sa.Column('title', VARCHAR(512), nullable=False, server_default=''),
+        sa.Column('preview', VARCHAR(1024), nullable=False, server_default=''), # first 1k of 'body'
+        sa.Column('img_url', VARCHAR(1024), nullable=False, server_default=''), # first 'image' from 'json'
+        sa.Column('body', TEXT, nullable=False, server_default=''),
+        sa.Column('json', TEXT, nullable=False, server_default='')
+    )
+
+    sa.Table(
+        'hive_permlink_data', metadata,
+        sa.Column('id', sa.Integer, primary_key=True),
+        sa.Column('permlink', sa.String(255, collation='C'), nullable=False),
+        sa.UniqueConstraint('permlink', name='hive_permlink_data_permlink')
     )
 
-    sa.Index('hive_posts_ix4', hive_posts.c.parent_id.desc().nullslast(), hive_posts.c.id)
-    sa.Index('hive_posts_id_parent_id_created_at', hive_posts.c.id, hive_posts.c.parent_id.desc().nullslast(), hive_posts.c.created_at)
+    sa.Table(
+        'hive_category_data', metadata,
+        sa.Column('id', sa.Integer, primary_key=True),
+        sa.Column('category', sa.String(255, collation='C'), nullable=False),
+        sa.UniqueConstraint('category', name='hive_category_data_category')
+    )
 
     sa.Table(
-        'hive_post_tags', metadata,
+        'hive_votes', metadata,
+        sa.Column('id', sa.BigInteger, primary_key=True),
         sa.Column('post_id', sa.Integer, nullable=False),
-        sa.Column('tag', sa.String(32), nullable=False),
-        sa.UniqueConstraint('tag', 'post_id', name='hive_post_tags_ux1'), # core
-        sa.Index('hive_post_tags_ix1', 'post_id'), # core
+        sa.Column('voter_id', sa.Integer, nullable=False),
+        sa.Column('author_id', sa.Integer, nullable=False),
+        sa.Column('permlink_id', sa.Integer, nullable=False),
+        sa.Column('weight', sa.Numeric, nullable=False, server_default='0'),
+        sa.Column('rshares', sa.BigInteger, nullable=False, server_default='0'),
+        sa.Column('vote_percent', sa.Integer, server_default='0'),
+        sa.Column('last_update', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),
+        sa.Column('num_changes', sa.Integer, server_default='0'),
+        sa.Column('block_num', sa.Integer,  nullable=False ),
+        sa.Column('is_effective', BOOLEAN, nullable=False, server_default='0'),
+
+        sa.UniqueConstraint('voter_id', 'author_id', 'permlink_id', name='hive_votes_voter_id_author_id_permlink_id_uk'),
+
+        sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_votes_fk1'),
+        sa.ForeignKeyConstraint(['voter_id'], ['hive_accounts.id'], name='hive_votes_fk2'),
+        sa.ForeignKeyConstraint(['author_id'], ['hive_accounts.id'], name='hive_votes_fk3'),
+        sa.ForeignKeyConstraint(['permlink_id'], ['hive_permlink_data.id'], name='hive_votes_fk4'),
+        sa.ForeignKeyConstraint(['block_num'], ['hive_blocks.num'], name='hive_votes_fk5'),
+
+        sa.Index('hive_votes_voter_id_post_id_idx', 'voter_id', 'post_id'), # probably this index is redundant to hive_votes_voter_id_last_update_idx because of starting voter_id.
+        sa.Index('hive_votes_voter_id_last_update_idx', 'voter_id', 'last_update'), # this index is critical for hive_accounts_info_view performance
+        sa.Index('hive_votes_post_id_voter_id_idx', 'post_id', 'voter_id'),
+        sa.Index('hive_votes_block_num_idx', 'block_num') # this is also important for hive_accounts_info_view
+    )
+
+    sa.Table(
+        'hive_tag_data', metadata,
+        sa.Column('id', sa.Integer, nullable=False, primary_key=True),
+        sa.Column('tag', VARCHAR(64, collation='C'), nullable=False, server_default=''),
+        sa.UniqueConstraint('tag', name='hive_tag_data_ux1')
     )
 
     sa.Table(
         'hive_follows', metadata,
+        sa.Column('id', sa.Integer, primary_key=True ),
         sa.Column('follower', sa.Integer, nullable=False),
         sa.Column('following', sa.Integer, nullable=False),
         sa.Column('state', SMALLINT, nullable=False, server_default='1'),
         sa.Column('created_at', sa.DateTime, nullable=False),
-        sa.Column('blacklisted', BOOLEAN, nullable=False, server_default='0'),
-        sa.Column('follow_blacklists', BOOLEAN, nullable=False, server_default='0'),
+        sa.Column('blacklisted', sa.Boolean, nullable=False, server_default='0'),
+        sa.Column('follow_blacklists', sa.Boolean, nullable=False, server_default='0'),
         sa.Column('follow_muted', BOOLEAN, nullable=False, server_default='0'),
+        sa.Column('block_num', sa.Integer,  nullable=False ),
 
-        sa.UniqueConstraint('following', 'follower', name='hive_follows_ux3'), # core
+        sa.UniqueConstraint('following', 'follower', name='hive_follows_ux1'), # core
+        sa.ForeignKeyConstraint(['block_num'], ['hive_blocks.num'], name='hive_follows_fk1'),
         sa.Index('hive_follows_ix5a', 'following', 'state', 'created_at', 'follower'),
         sa.Index('hive_follows_ix5b', 'follower', 'state', 'created_at', 'following'),
-        sa.Index('hive_follows_all_columns', 'follower', 'following', 'state', 'created_at', 'blacklisted', 'follow_blacklists', 'follow_muted')
+        sa.Index('hive_follows_block_num_idx', 'block_num'),
+        sa.Index('hive_follows_created_at_idx', 'created_at'),
     )
 
     sa.Table(
         'hive_reblogs', metadata,
-        sa.Column('account', VARCHAR(16), nullable=False),
+        sa.Column('id', sa.Integer, primary_key=True ),
+        sa.Column('blogger_id', sa.Integer, nullable=False),
         sa.Column('post_id', sa.Integer, nullable=False),
         sa.Column('created_at', sa.DateTime, nullable=False),
+        sa.Column('block_num', sa.Integer,  nullable=False ),
 
-        sa.ForeignKeyConstraint(['account'], ['hive_accounts.name'], name='hive_reblogs_fk1'),
+        sa.ForeignKeyConstraint(['blogger_id'], ['hive_accounts.id'], name='hive_reblogs_fk1'),
         sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_reblogs_fk2'),
-        sa.UniqueConstraint('account', 'post_id', name='hive_reblogs_ux1'), # core
-        sa.Index('hive_reblogs_ix1', 'post_id', 'account', 'created_at'), # API -- not yet used
+        sa.ForeignKeyConstraint(['block_num'], ['hive_blocks.num'], name='hive_reblogs_fk3'),
+        sa.UniqueConstraint('blogger_id', 'post_id', name='hive_reblogs_ux1'), # core
+        sa.Index('hive_reblogs_post_id', 'post_id'),
+        sa.Index('hive_reblogs_block_num_idx', 'block_num'),
+        sa.Index('hive_reblogs_created_at_idx', 'created_at')
     )
 
     sa.Table(
@@ -145,6 +269,9 @@ def build_metadata():
         sa.ForeignKeyConstraint(['from_account'], ['hive_accounts.id'], name='hive_payments_fk1'),
         sa.ForeignKeyConstraint(['to_account'], ['hive_accounts.id'], name='hive_payments_fk2'),
         sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_payments_fk3'),
+        sa.Index('hive_payments_from', 'from_account'),
+        sa.Index('hive_payments_to', 'to_account'),
+        sa.Index('hive_payments_post_id', 'post_id'),
     )
 
     sa.Table(
@@ -152,98 +279,46 @@ def build_metadata():
         sa.Column('post_id', sa.Integer, nullable=False),
         sa.Column('account_id', sa.Integer, nullable=False),
         sa.Column('created_at', sa.DateTime, nullable=False),
-        sa.UniqueConstraint('post_id', 'account_id', name='hive_feed_cache_ux1'), # core
-        sa.Index('hive_feed_cache_ix1', 'account_id', 'post_id', 'created_at'), # API (and rebuild?)
-    )
-
-    sa.Table(
-        'hive_posts_cache', metadata,
-        sa.Column('post_id', sa.Integer, primary_key=True, autoincrement=False),
-        sa.Column('author', VARCHAR(16), nullable=False),
-        sa.Column('permlink', VARCHAR(255), nullable=False),
-        sa.Column('category', VARCHAR(255), nullable=False, server_default=''),
-
-        # important/index
-        sa.Column('community_id', sa.Integer, nullable=True),
-        sa.Column('depth', SMALLINT, nullable=False, server_default='0'),
-        sa.Column('children', SMALLINT, nullable=False, server_default='0'),
-
-        # basic/extended-stats
-        sa.Column('author_rep', sa.Float(precision=6), nullable=False, server_default='0'),
-        sa.Column('flag_weight', sa.Float(precision=6), nullable=False, server_default='0'),
-        sa.Column('total_votes', sa.Integer, nullable=False, server_default='0'),
-        sa.Column('up_votes', sa.Integer, nullable=False, server_default='0'),
-
-        # basic ui fields
-        sa.Column('title', sa.String(255), nullable=False, server_default=''),
-        sa.Column('preview', sa.String(1024), nullable=False, server_default=''),
-        sa.Column('img_url', sa.String(1024), nullable=False, server_default=''),
-
-        # core stats/indexes
-        sa.Column('payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),
-        sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),
-        sa.Column('created_at', sa.DateTime, nullable=False, server_default='1990-01-01'),
-        sa.Column('payout_at', sa.DateTime, nullable=False, server_default='1990-01-01'),
-        sa.Column('updated_at', sa.DateTime, nullable=False, server_default='1990-01-01'),
-        sa.Column('is_paidout', BOOLEAN, nullable=False, server_default='0'),
+        sa.Column('block_num',    sa.Integer,  nullable=False),
+        sa.PrimaryKeyConstraint('account_id', 'post_id', name='hive_feed_cache_pk'),
+        sa.ForeignKeyConstraint(['block_num'], ['hive_blocks.num'], name='hive_feed_cache_fk1'),
 
-        # ui flags/filters
-        sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),
-        sa.Column('is_declined', BOOLEAN, nullable=False, server_default='0'),
-        sa.Column('is_full_power', BOOLEAN, nullable=False, server_default='0'),
-        sa.Column('is_hidden', BOOLEAN, nullable=False, server_default='0'),
-        sa.Column('is_grayed', BOOLEAN, nullable=False, server_default='0'),
-
-        # important indexes
-        sa.Column('rshares', sa.BigInteger, nullable=False, server_default='0'),
-        sa.Column('sc_trend', sa.Float(precision=6), nullable=False, server_default='0'),
-        sa.Column('sc_hot', sa.Float(precision=6), nullable=False, server_default='0'),
-
-        # bulk data
-        sa.Column('body', TEXT),
-        sa.Column('votes', TEXT),
-        sa.Column('json', sa.Text),
-        sa.Column('raw_json', sa.Text),
-
-        # index: misc
-        sa.Index('hive_posts_cache_ix3',  'payout_at', 'post_id',           postgresql_where=sql_text("is_paidout = '0'")),         # core: payout sweep
-        sa.Index('hive_posts_cache_ix8',  'category', 'payout', 'depth',    postgresql_where=sql_text("is_paidout = '0'")),         # API: tag stats
-
-        # index: ranked posts
-        sa.Index('hive_posts_cache_ix2',  'promoted',             postgresql_where=sql_text("is_paidout = '0' AND promoted > 0")),  # API: promoted
-
-        sa.Index('hive_posts_cache_ix6a', 'sc_trend', 'post_id',  postgresql_where=sql_text("is_paidout = '0'")),                   # API: trending             todo: depth=0
-        sa.Index('hive_posts_cache_ix7a', 'sc_hot',   'post_id',  postgresql_where=sql_text("is_paidout = '0'")),                   # API: hot                  todo: depth=0
-        sa.Index('hive_posts_cache_ix6b', 'post_id',  'sc_trend', postgresql_where=sql_text("is_paidout = '0'")),                   # API: trending, filtered   todo: depth=0
-        sa.Index('hive_posts_cache_ix7b', 'post_id',  'sc_hot',   postgresql_where=sql_text("is_paidout = '0'")),                   # API: hot, filtered        todo: depth=0
-
-        sa.Index('hive_posts_cache_ix9a',             'depth', 'payout', 'post_id', postgresql_where=sql_text("is_paidout = '0'")), # API: payout               todo: rem depth
-        sa.Index('hive_posts_cache_ix9b', 'category', 'depth', 'payout', 'post_id', postgresql_where=sql_text("is_paidout = '0'")), # API: payout, filtered     todo: rem depth
-
-        sa.Index('hive_posts_cache_ix10', 'post_id', 'payout',                      postgresql_where=sql_text("is_grayed = '1' AND payout > 0")), # API: muted, by filter/date/payout
-
-        # index: stats
-        sa.Index('hive_posts_cache_ix20', 'community_id', 'author', 'payout', 'post_id', postgresql_where=sql_text("is_paidout = '0'")), # API: pending distribution; author payout
-
-        # index: community ranked posts
-        sa.Index('hive_posts_cache_ix30', 'community_id', 'sc_trend',   'post_id',  postgresql_where=sql_text("community_id IS NOT NULL AND is_grayed = '0' AND depth = 0")),        # API: community trend
-        sa.Index('hive_posts_cache_ix31', 'community_id', 'sc_hot',     'post_id',  postgresql_where=sql_text("community_id IS NOT NULL AND is_grayed = '0' AND depth = 0")),        # API: community hot
-        sa.Index('hive_posts_cache_ix32', 'community_id', 'created_at', 'post_id',  postgresql_where=sql_text("community_id IS NOT NULL AND is_grayed = '0' AND depth = 0")),        # API: community created
-        sa.Index('hive_posts_cache_ix33', 'community_id', 'payout',     'post_id',  postgresql_where=sql_text("community_id IS NOT NULL AND is_grayed = '0' AND is_paidout = '0'")), # API: community payout
-        sa.Index('hive_posts_cache_ix34', 'community_id', 'payout',     'post_id',  postgresql_where=sql_text("community_id IS NOT NULL AND is_grayed = '1' AND is_paidout = '0'")), # API: community muted
-        sa.Index('hive_posts_cache_ix35', 'author', 'depth'),
+        sa.Index('hive_feed_cache_block_num_idx', 'block_num'),
+        sa.Index('hive_feed_cache_created_at_idx', 'created_at'),
+        sa.Index('hive_feed_cache_post_id_idx', 'post_id')
     )
 
     sa.Table(
         'hive_state', metadata,
         sa.Column('block_num', sa.Integer, primary_key=True, autoincrement=False),
         sa.Column('db_version', sa.Integer, nullable=False),
-        sa.Column('steem_per_mvest', sa.types.DECIMAL(8, 3), nullable=False),
-        sa.Column('usd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),
-        sa.Column('sbd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),
+        sa.Column('steem_per_mvest', sa.types.DECIMAL(14, 6), nullable=False),
+        sa.Column('usd_per_steem', sa.types.DECIMAL(14, 6), nullable=False),
+        sa.Column('sbd_per_steem', sa.types.DECIMAL(14, 6), nullable=False),
         sa.Column('dgpo', sa.Text, nullable=False),
     )
 
+    sa.Table(
+        'hive_posts_api_helper', metadata,
+        sa.Column('id', sa.Integer, primary_key=True, autoincrement = False),
+        sa.Column('author_s_permlink', VARCHAR(275, collation='C'), nullable=False), # concatenation of author '/' permlink
+        sa.Index('hive_posts_api_helper_author_s_permlink_idx', 'author_s_permlink')
+    )
+
+    sa.Table(
+        'hive_mentions', metadata,
+        sa.Column('id', sa.Integer, primary_key=True),
+        sa.Column('post_id', sa.Integer, nullable=False),
+        sa.Column('account_id', sa.Integer, nullable=False),
+        sa.Column('block_num', sa.Integer, nullable=False),
+
+        sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_mentions_fk1'),
+        sa.ForeignKeyConstraint(['account_id'], ['hive_accounts.id'], name='hive_mentions_fk2'),
+
+        sa.Index('hive_mentions_account_id_idx', 'account_id'),
+        sa.UniqueConstraint('post_id', 'account_id', 'block_num', name='hive_mentions_ux1')
+    )
+
     metadata = build_metadata_community(metadata)
 
     return metadata
@@ -258,7 +333,7 @@ def build_metadata_community(metadata=None):
         sa.Column('id',          sa.Integer,      primary_key=True, autoincrement=False),
         sa.Column('type_id',     SMALLINT,        nullable=False),
         sa.Column('lang',        CHAR(2),         nullable=False, server_default='en'),
-        sa.Column('name',        VARCHAR(16),     nullable=False),
+        sa.Column('name',        VARCHAR(16, collation='C'), nullable=False),
         sa.Column('title',       sa.String(32),   nullable=False, server_default=''),
         sa.Column('created_at',  sa.DateTime,     nullable=False),
         sa.Column('sum_pending', sa.Integer,      nullable=False, server_default='0'),
@@ -274,10 +349,11 @@ def build_metadata_community(metadata=None):
         sa.Column('description', sa.String(5000), nullable=False, server_default=''),
         sa.Column('flag_text',   sa.String(5000), nullable=False, server_default=''),
         sa.Column('settings',    TEXT,            nullable=False, server_default='{}'),
+        sa.Column('block_num', sa.Integer,  nullable=False ),
 
         sa.UniqueConstraint('name', name='hive_communities_ux1'),
         sa.Index('hive_communities_ix1', 'rank', 'id'),
-        sa.Index('hive_communities_id_name', 'id', 'name')
+        sa.Index('hive_communities_block_num_idx', 'block_num')
     )
 
     sa.Table(
@@ -288,23 +364,27 @@ def build_metadata_community(metadata=None):
         sa.Column('role_id',      SMALLINT,       nullable=False, server_default='0'),
         sa.Column('title',        sa.String(140), nullable=False, server_default=''),
 
-        sa.UniqueConstraint('account_id', 'community_id', name='hive_roles_ux1'),
+        sa.PrimaryKeyConstraint('account_id', 'community_id', name='hive_roles_pk'),
         sa.Index('hive_roles_ix1', 'community_id', 'account_id', 'role_id'),
     )
 
     sa.Table(
         'hive_subscriptions', metadata,
+        sa.Column('id', sa.Integer, primary_key=True),
         sa.Column('account_id',   sa.Integer,  nullable=False),
         sa.Column('community_id', sa.Integer,  nullable=False),
         sa.Column('created_at',   sa.DateTime, nullable=False),
+        sa.Column('block_num', sa.Integer,  nullable=False ),
 
         sa.UniqueConstraint('account_id', 'community_id', name='hive_subscriptions_ux1'),
-        sa.Index('hive_subscriptions_ix1', 'community_id', 'account_id', 'created_at'),
+        sa.Index('hive_subscriptions_community_idx', 'community_id'),
+        sa.Index('hive_subscriptions_block_num_idx', 'block_num')
     )
 
     sa.Table(
         'hive_notifs', metadata,
         sa.Column('id',           sa.Integer,  primary_key=True),
+        sa.Column('block_num',    sa.Integer,  nullable=False),
         sa.Column('type_id',      SMALLINT,    nullable=False),
         sa.Column('score',        SMALLINT,    nullable=False),
         sa.Column('created_at',   sa.DateTime, nullable=False),
@@ -312,7 +392,7 @@ def build_metadata_community(metadata=None):
         sa.Column('dst_id',       sa.Integer,  nullable=True),
         sa.Column('post_id',      sa.Integer,  nullable=True),
         sa.Column('community_id', sa.Integer,  nullable=True),
-        sa.Column('block_num',    sa.Integer,  nullable=True),
+        sa.Column('block_num',    sa.Integer,  nullable=False),
         sa.Column('payload',      sa.Text,     nullable=True),
 
         sa.Index('hive_notifs_ix1', 'dst_id',                  'id', postgresql_where=sql_text("dst_id IS NOT NULL")),
@@ -323,6 +403,25 @@ def build_metadata_community(metadata=None):
         sa.Index('hive_notifs_ix6', 'dst_id', 'created_at', 'score', 'id', postgresql_where=sql_text("dst_id IS NOT NULL")), # unread
     )
 
+    sa.Table('hive_notification_cache', metadata,
+        sa.Column('id', sa.BigInteger, primary_key=True),
+        sa.Column('block_num', sa.Integer, nullable = False),
+        sa.Column('type_id', sa.Integer, nullable = False),
+        sa.Column('dst', sa.Integer, nullable=True), # dst account id except persistent notifs from hive_notifs
+        sa.Column('src', sa.Integer, nullable=True), # src account id
+        sa.Column('dst_post_id', sa.Integer, nullable=True), # destination post id
+        sa.Column('post_id', sa.Integer, nullable=True),
+        sa.Column('created_at', sa.DateTime, nullable=False), # notification creation time
+        sa.Column('score', sa.Integer, nullable=False),
+        sa.Column('community_title', sa.String(32), nullable=True),
+        sa.Column('community', sa.String(16), nullable=True),
+        sa.Column('payload', sa.String, nullable=True),
+
+        sa.Index('hive_notification_cache_block_num_idx', 'block_num'),
+        sa.Index('hive_notification_cache_dst_score_idx', 'dst', 'score', postgresql_where=sql_text("dst IS NOT NULL"))
+
+    )
+
     return metadata
 
 
@@ -330,28 +429,193 @@ def teardown(db):
     """Drop all tables"""
     build_metadata().drop_all(db.engine())
 
+def drop_fk(db):
+    db.query_no_return("START TRANSACTION")
+    for table in build_metadata().sorted_tables:
+        for fk in table.foreign_keys:
+            sql = """ALTER TABLE {} DROP CONSTRAINT IF EXISTS {}""".format(table.name, fk.name)
+            db.query_no_return(sql)
+    db.query_no_return("COMMIT")
+
+def create_fk(db):
+    from sqlalchemy.schema import AddConstraint
+    from sqlalchemy import text
+    connection = db.engine().connect()
+    connection.execute(text("START TRANSACTION"))
+    for table in build_metadata().sorted_tables:
+        for fk in table.foreign_keys:
+            connection.execute(AddConstraint(fk.constraint))
+    connection.execute(text("COMMIT"))
+
 def setup(db):
     """Creates all tables and seed data"""
+
+    sql = """SELECT * FROM pg_extension WHERE extname='intarray'"""
+    assert db.query_row( sql ), "The database requires created 'intarray' extension"
     # initialize schema
     build_metadata().create_all(db.engine())
 
     # tune auto vacuum/analyze
     reset_autovac(db)
 
+    # sets FILLFACTOR:
+    set_fillfactor(db)
+
     # default rows
     sqls = [
         "INSERT INTO hive_state (block_num, db_version, steem_per_mvest, usd_per_steem, sbd_per_steem, dgpo) VALUES (0, %d, 0, 0, 0, '')" % DB_VERSION,
         "INSERT INTO hive_blocks (num, hash, created_at) VALUES (0, '0000000000000000000000000000000000000000', '2016-03-24 16:04:57')",
+
+        "INSERT INTO hive_permlink_data (id, permlink) VALUES (0, '')",
+        "INSERT INTO hive_category_data (id, category) VALUES (0, '')",
+        "INSERT INTO hive_tag_data (id, tag) VALUES (0, '')",
+        "INSERT INTO hive_accounts (id, name, created_at) VALUES (0, '', '1970-01-01T00:00:00')",
+
         "INSERT INTO hive_accounts (name, created_at) VALUES ('miners',    '2016-03-24 16:05:00')",
         "INSERT INTO hive_accounts (name, created_at) VALUES ('null',      '2016-03-24 16:05:00')",
         "INSERT INTO hive_accounts (name, created_at) VALUES ('temp',      '2016-03-24 16:05:00')",
-        "INSERT INTO hive_accounts (name, created_at) VALUES ('initminer', '2016-03-24 16:05:00')"]
+        "INSERT INTO hive_accounts (name, created_at) VALUES ('initminer', '2016-03-24 16:05:00')",
+
+        """
+        INSERT INTO
+            public.hive_posts(id, root_id, parent_id, author_id, permlink_id, category_id,
+                community_id, created_at, depth, block_num, block_num_created
+            )
+        VALUES
+            (0, 0, 0, 0, 0, 0, 0, now(), 0, 0, 0);
+        """]
     for sql in sqls:
         db.query(sql)
 
     sql = "CREATE INDEX hive_communities_ft1 ON hive_communities USING GIN (to_tsvector('english', title || ' ' || about))"
     db.query(sql)
 
+    # find_comment_id definition moved to utility_functions.sql
+    # find_account_id definition moved to utility_functions.sql
+
+    # process_hive_post_operation definition moved to hive_post_operations.sql
+    # delete_hive_post moved to hive_post_operations.sql
+
+    # In original hivemind, a value of 'active_at' was calculated from
+    # max
+    #   {
+    #     created             ( account_create_operation ),
+    #     last_account_update ( account_update_operation/account_update2_operation ),
+    #     last_post           ( comment_operation - only creation )
+    #     last_root_post      ( comment_operation - only creation + only ROOT ),
+    #     last_vote_time      ( vote_operation )
+    #   }
+    # In order to simplify calculations, `last_account_update` is not taken into consideration, because this updating accounts is very rare
+    # and posting/voting after an account updating, fixes `active_at` value immediately.
+
+    # hive_accounts_view definition moved to hive_accounts_view.sql
+
+    # hive_posts_view definition moved to hive_posts_view.sql
+
+    # update_hive_posts_root_id moved to update_hive_posts_root_id.sql
+
+    # hive_votes_view definition moved into hive_votes_view.sql
+
+    # database_api_vote, find_votes, list_votes_by_voter_comment, list_votes_by_comment_voter moved into database_api_list_votes.sql
+
+    sql = """
+          DO $$
+          BEGIN
+            EXECUTE 'ALTER DATABASE '||current_database()||' SET join_collapse_limit TO 16';
+            EXECUTE 'ALTER DATABASE '||current_database()||' SET from_collapse_limit TO 16';
+          END
+          $$;
+          """
+    db.query_no_return(sql)
+
+    sql = """
+          CREATE TABLE IF NOT EXISTS hive_db_patch_level
+          (
+            level SERIAL NOT NULL PRIMARY KEY,
+            patch_date timestamp without time zone NOT NULL,
+            patched_to_revision TEXT
+          );
+    """
+    db.query_no_return(sql)
+    sql = """
+          INSERT INTO hive_db_patch_level
+          (patch_date, patched_to_revision)
+          values
+          (now(), '{}');
+          """
+
+    from hive.version import GIT_REVISION
+    db.query_no_return(sql.format(GIT_REVISION))
+
+    # max_time_stamp definition moved into utility_functions.sql
+
+    # get_discussion definition moved to bridge_get_discussion.sql
+
+    sql_scripts = [
+      "utility_functions.sql",
+      "hive_accounts_view.sql",
+      "hive_accounts_info_view.sql",
+      "hive_posts_base_view.sql",
+      "hive_posts_view.sql",
+      "hive_votes_view.sql",
+      "hive_muted_accounts_view.sql",
+      "hive_muted_accounts_by_id_view.sql",
+      "hive_blacklisted_accounts_by_observer_view.sql",
+      "hive_post_operations.sql",
+      "head_block_time.sql",
+      "update_feed_cache.sql",
+      "payout_stats_view.sql",
+      "update_hive_posts_mentions.sql",
+      "mutes.sql",
+      "bridge_get_ranked_post_type.sql",
+      "bridge_get_ranked_post_for_communities.sql",
+      "bridge_get_ranked_post_for_observer_communities.sql",
+      "bridge_get_ranked_post_for_tag.sql",
+      "bridge_get_ranked_post_for_all.sql",
+      "calculate_account_reputations.sql",
+      "update_communities_rank.sql",
+      "delete_hive_posts_mentions.sql",
+      "notifications_view.sql",
+      "notifications_api.sql",
+      "bridge_get_account_posts_by_comments.sql",
+      "bridge_get_account_posts_by_payout.sql",
+      "bridge_get_account_posts_by_posts.sql",
+      "bridge_get_account_posts_by_replies.sql",
+      "bridge_get_relationship_between_accounts.sql",
+      "bridge_get_post.sql",
+      "bridge_get_discussion.sql",
+      "condenser_api_post_type.sql",
+      "condenser_api_post_ex_type.sql",
+      "condenser_get_blog.sql",
+      "condenser_get_content.sql",
+      "condenser_tags.sql",
+      "condenser_follows.sql",
+      "hot_and_trends.sql",
+      "update_hive_posts_children_count.sql",
+      "update_hive_posts_api_helper.sql",
+      "database_api_list_comments.sql",
+      "database_api_list_votes.sql",
+      "update_posts_rshares.sql",
+      "update_hive_post_root_id.sql",
+      "condenser_get_by_account_comments.sql",
+      "condenser_get_by_blog_without_reblog.sql",
+      "bridge_get_by_feed_with_reblog.sql",
+      "condenser_get_by_blog.sql",
+      "bridge_get_account_posts_by_blog.sql",
+      "condenser_get_names_by_reblogged.sql",
+      "condenser_get_account_reputations.sql",
+      "update_follow_count.sql",
+      "delete_reblog_feed_cache.sql"
+    ]
+    from os.path import dirname, realpath
+    dir_path = dirname(realpath(__file__))
+    for script in sql_scripts:
+        execute_sql_script(db.query_no_return, "{}/sql_scripts/{}".format(dir_path, script))
+
+
+
+
+
 def reset_autovac(db):
     """Initializes/resets per-table autovacuum/autoanalyze params.
 
@@ -360,9 +624,7 @@ def reset_autovac(db):
 
     autovac_config = { #    vacuum  analyze
         'hive_accounts':    (50000, 100000),
-        'hive_posts_cache': (25000, 25000),
         'hive_posts':       (2500, 10000),
-        'hive_post_tags':   (5000, 10000),
         'hive_follows':     (5000, 5000),
         'hive_feed_cache':  (5000, 5000),
         'hive_blocks':      (5000, 25000),
@@ -376,3 +638,59 @@ def reset_autovac(db):
                                      autovacuum_analyze_scale_factor = 0,
                                      autovacuum_analyze_threshold = %s)"""
         db.query(sql % (table, n_vacuum, n_analyze))
+
+
+def set_fillfactor(db):
+    """Initializes/resets FILLFACTOR for tables which are intesively updated"""
+
+    fillfactor_config = {
+        'hive_posts': 70,
+        'hive_post_data': 70,
+        'hive_votes': 70,
+        'hive_reputation_data': 50
+    }
+
+    for table, fillfactor in fillfactor_config.items():
+        sql = """ALTER TABLE {} SET (FILLFACTOR = {})"""
+        db.query(sql.format(table, fillfactor))
+
+def set_logged_table_attribute(db, logged):
+    """Initializes/resets LOGGED/UNLOGGED attribute for tables which are intesively updated"""
+
+    logged_config = [
+        'hive_accounts',
+        'hive_permlink_data',
+        'hive_posts',
+        'hive_post_data',
+        'hive_votes',
+        'hive_reputation_data'
+    ]
+
+    for table in logged_config:
+        log.info("Setting {} attribute on a table: {}".format('LOGGED' if logged else 'UNLOGGED', table))
+        sql = """ALTER TABLE {} SET {}"""
+        db.query_no_return(sql.format(table, 'LOGGED' if logged else 'UNLOGGED'))
+
+def execute_sql_script(query_executor, path_to_script):
+    """ Load and execute sql script from file
+        Params:
+          query_executor - callable to execute query with
+          path_to_script - path to script
+        Returns:
+          depending on query_executor
+
+        Example:
+          print(execute_sql_script(db.query_row, "./test.sql"))
+          where test_sql: SELECT * FROM hive_state WHERE block_num = 0;
+          will return something like: (0, 18, Decimal('0.000000'), Decimal('0.000000'), Decimal('0.000000'), '')
+    """
+    try:
+        sql_script = None
+        with open(path_to_script, 'r') as sql_script_file:
+            sql_script = sql_script_file.read()
+        if sql_script is not None:
+            return query_executor(sql_script)
+    except Exception as ex:
+        log.exception("Error running sql script: {}".format(ex))
+        raise ex
+    return None
diff --git a/hive/db/sql_scripts/bridge_get_account_posts_by_blog.sql b/hive/db/sql_scripts/bridge_get_account_posts_by_blog.sql
new file mode 100644
index 0000000000000000000000000000000000000000..9377ba5d454acfcd5c13251e95397828244843f0
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_account_posts_by_blog.sql
@@ -0,0 +1,84 @@
+DROP FUNCTION IF EXISTS bridge_get_account_posts_by_blog;
+
+CREATE OR REPLACE FUNCTION bridge_get_account_posts_by_blog(
+  in _account VARCHAR,
+  in _author VARCHAR,
+  in _permlink VARCHAR,
+  in _limit INTEGER,
+  in _bridge_api BOOLEAN
+)
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INTEGER;
+  __account_id INTEGER;
+  __created_at TIMESTAMP;
+BEGIN
+  __account_id = find_account_id( _account, True );
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+    SELECT hfc.created_at INTO __created_at
+    FROM hive_feed_cache hfc
+    WHERE hfc.account_id = __account_id AND hfc.post_id = __post_id;
+  END IF;
+
+  RETURN QUERY SELECT -- bridge_get_account_posts_by_blog
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      NULL
+    FROM hive_posts_view hp
+    JOIN
+    (
+      SELECT hfc.post_id, hfc.created_at
+      FROM hive_feed_cache hfc
+      WHERE hfc.account_id = __account_id AND ( __post_id = 0 OR hfc.created_at < __created_at OR ( hfc.created_at = __created_at AND hfc.post_id < __post_id ) )
+        AND ( NOT _bridge_api OR
+              NOT EXISTS (SELECT NULL FROM hive_posts hp1
+                          WHERE hp1.id = hfc.post_id AND hp1.counter_deleted = 0 AND hp1.depth = 0 AND hp1.community_id IS NOT NULL
+                          AND NOT EXISTS (SELECT NULL FROM hive_reblogs hr WHERE hr.blogger_id = __account_id AND hr.post_id = hp1.id)
+                         )
+            )
+      ORDER BY hfc.created_at DESC, hfc.post_id DESC
+      LIMIT _limit
+    ) blog ON hp.id = blog.post_id
+    ORDER BY blog.created_at DESC, blog.post_id DESC
+    LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/bridge_get_account_posts_by_comments.sql b/hive/db/sql_scripts/bridge_get_account_posts_by_comments.sql
new file mode 100644
index 0000000000000000000000000000000000000000..3a0ffeb49a81ce26c0a70c5e53cfefe37946d248
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_account_posts_by_comments.sql
@@ -0,0 +1,65 @@
+DROP FUNCTION IF EXISTS bridge_get_account_posts_by_comments;
+
+CREATE FUNCTION bridge_get_account_posts_by_comments( in _account VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __account_id INT;
+  __post_id INT;
+BEGIN
+  __account_id = find_account_id( _account, True );
+  __post_id = find_comment_id( _author, _permlink, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      NULL
+  FROM
+  (
+    SELECT hp1.id
+    FROM hive_posts hp1 
+    WHERE hp1.author_id = __account_id AND hp1.counter_deleted = 0 AND hp1.depth > 0 AND ( __post_id = 0 OR hp1.id < __post_id )
+    ORDER BY hp1.id DESC
+    LIMIT _limit
+  ) ds
+  JOIN hive_posts_view hp ON ds.id = hp.id
+  ORDER BY hp.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/bridge_get_account_posts_by_payout.sql b/hive/db/sql_scripts/bridge_get_account_posts_by_payout.sql
new file mode 100644
index 0000000000000000000000000000000000000000..5106cb3791cb187580a6732c8ac96f362d2b6ae2
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_account_posts_by_payout.sql
@@ -0,0 +1,65 @@
+DROP FUNCTION IF EXISTS bridge_get_account_posts_by_payout;
+
+CREATE FUNCTION bridge_get_account_posts_by_payout( in _account VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __account_id INT;
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+BEGIN
+  __account_id = find_account_id( _account, True );
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      NULL
+  FROM
+      hive_posts_view hp
+  WHERE
+      hp.author_id = __account_id AND NOT hp.is_paidout
+      AND ( __post_id = 0 OR ( hp.payout + hp.pending_payout ) < __payout_limit OR ( ( hp.payout + hp.pending_payout ) = __payout_limit AND hp.id < __post_id ) )
+  ORDER BY ( hp.payout + hp.pending_payout ) DESC, hp.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/bridge_get_account_posts_by_posts.sql b/hive/db/sql_scripts/bridge_get_account_posts_by_posts.sql
new file mode 100644
index 0000000000000000000000000000000000000000..b0be3c7d361aa01ecb355ba77f1842b66ff3d67b
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_account_posts_by_posts.sql
@@ -0,0 +1,60 @@
+DROP FUNCTION IF EXISTS bridge_get_account_posts_by_posts;
+
+CREATE FUNCTION bridge_get_account_posts_by_posts( in _account VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __account_id INT;
+  __post_id INT;
+BEGIN
+  __account_id = find_account_id( _account, True );
+  __post_id = find_comment_id( _author, _permlink, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      NULL
+  FROM
+      hive_posts_view hp
+  WHERE
+      hp.author_id = __account_id AND hp.depth = 0 AND ( __post_id = 0 OR hp.id < __post_id )
+  ORDER BY hp.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/bridge_get_account_posts_by_replies.sql b/hive/db/sql_scripts/bridge_get_account_posts_by_replies.sql
new file mode 100644
index 0000000000000000000000000000000000000000..c75348efee15e5af454fdf52e40ffb91d4b8f1e3
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_account_posts_by_replies.sql
@@ -0,0 +1,78 @@
+CREATE OR REPLACE FUNCTION bridge_get_account_posts_by_replies( in _account VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _bridge_api BOOLEAN )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __account_id INT;
+  __post_id INT;
+BEGIN
+  IF NOT _bridge_api AND _permlink <> '' THEN
+      -- find blogger account using parent author of page defining post
+      __post_id = find_comment_id( _author, _permlink, True );
+      SELECT pp.author_id INTO __account_id
+      FROM hive_posts hp
+      JOIN hive_posts pp ON hp.parent_id = pp.id
+      WHERE hp.id = __post_id;
+      IF __account_id = 0 THEN __account_id = NULL; END IF;
+  ELSE
+      __account_id = find_account_id( _account, True );
+      __post_id = find_comment_id( _author, _permlink, True );
+  END IF;
+  RETURN QUERY SELECT --bridge_get_account_posts_by_replies
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      NULL
+  FROM
+  (
+    WITH ar as (SELECT hpr.id as id
+      FROM hive_posts hpr
+      JOIN hive_posts hp1 on hp1.id = hpr.parent_id
+      WHERE hp1.author_id = __account_id
+        AND (hpr.counter_deleted = 0)
+        AND (__post_id = 0 OR hpr.id < __post_id )
+    )
+    SELECT * FROM ar
+    ORDER BY ar.id DESC
+    LIMIT _limit
+  ) as replies
+  JOIN hive_posts_view hp ON hp.id = replies.id
+  ORDER BY replies.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
\ No newline at end of file
diff --git a/hive/db/sql_scripts/bridge_get_by_feed_with_reblog.sql b/hive/db/sql_scripts/bridge_get_by_feed_with_reblog.sql
new file mode 100644
index 0000000000000000000000000000000000000000..9ee07ae6a8260b9bc1d1116a98e22261ed56a10f
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_by_feed_with_reblog.sql
@@ -0,0 +1,81 @@
+DROP FUNCTION IF EXISTS bridge_get_by_feed_with_reblog;
+
+CREATE OR REPLACE FUNCTION bridge_get_by_feed_with_reblog( IN _account VARCHAR, IN _author VARCHAR, IN _permlink VARCHAR, IN _limit INTEGER)
+    RETURNS SETOF bridge_api_post_reblogs
+    LANGUAGE 'plpgsql'
+    STABLE 
+    ROWS 1000
+AS $BODY$
+DECLARE
+  __post_id INT;
+  __cutoff INT;
+  __account_id INT;
+  __min_date TIMESTAMP;
+BEGIN
+  __account_id = find_account_id( _account, True );
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+    SELECT MIN(hfc.created_at) INTO __min_date
+    FROM hive_feed_cache hfc
+    JOIN hive_follows hf ON hfc.account_id = hf.following
+    WHERE hf.state = 1 AND hf.follower = __account_id AND hfc.post_id = __post_id;
+  END IF;
+
+  __cutoff = block_before_head( '1 month' );
+
+  RETURN QUERY SELECT -- bridge_get_by_feed_with_reblog
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      T.reblogged_by
+    FROM hive_posts_view hp
+    JOIN
+    (
+      SELECT hfc.post_id, MIN(hfc.created_at) as min_created, array_agg(ha.name) AS reblogged_by
+      FROM hive_feed_cache hfc
+      JOIN hive_follows hf ON hfc.account_id = hf.following
+      JOIN hive_accounts ha ON ha.id = hf.following
+      WHERE hfc.block_num > __cutoff AND hf.state = 1 AND hf.follower = __account_id
+      GROUP BY hfc.post_id
+      HAVING __post_id = 0 OR MIN(hfc.created_at) < __min_date OR ( MIN(hfc.created_at) = __min_date AND hfc.post_id < __post_id )
+      ORDER BY min_created DESC, hfc.post_id DESC
+      LIMIT _limit
+    ) T ON hp.id =  T.post_id
+    ORDER BY T.min_created DESC, T.post_id DESC;
+END
+$BODY$
+;
diff --git a/hive/db/sql_scripts/bridge_get_discussion.sql b/hive/db/sql_scripts/bridge_get_discussion.sql
new file mode 100644
index 0000000000000000000000000000000000000000..6644f6c89ef25467164c078e5e0159f34a2d1f49
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_discussion.sql
@@ -0,0 +1,86 @@
+DROP FUNCTION IF EXISTS bridge_get_discussion
+;
+CREATE OR REPLACE FUNCTION bridge_get_discussion(
+    in _author hive_accounts.name%TYPE,
+    in _permlink hive_permlink_data.permlink%TYPE,
+    in _observer VARCHAR
+)
+RETURNS SETOF bridge_api_post_discussion
+LANGUAGE plpgsql
+AS
+$function$
+DECLARE
+    __post_id INT;
+    __observer_id INT;
+BEGIN
+    __post_id = find_comment_id( _author, _permlink, True );
+    __observer_id = find_account_id( _observer, True );
+    RETURN QUERY
+    SELECT -- bridge_get_discussion
+        hpv.id,
+        hpv.author,
+        hpv.parent_author,
+        hpv.author_rep,
+        hpv.root_title,
+        hpv.beneficiaries,
+        hpv.max_accepted_payout,
+        hpv.percent_hbd,
+        hpv.url,
+        hpv.permlink,
+        hpv.parent_permlink_or_category,
+        hpv.title,
+        hpv.body,
+        hpv.category,
+        hpv.depth,
+        hpv.promoted,
+        hpv.payout,
+        hpv.pending_payout,
+        hpv.payout_at,
+        hpv.is_paidout,
+        hpv.children,
+        hpv.votes,
+        hpv.created_at,
+        hpv.updated_at,
+        hpv.rshares,
+        hpv.abs_rshares,
+        hpv.json,
+        hpv.is_hidden,
+        hpv.is_grayed,
+        hpv.total_votes,
+        hpv.sc_trend,
+        hpv.role_title,
+        hpv.community_title,
+        hpv.role_id,
+        hpv.is_pinned,
+        hpv.curator_payout_value,
+        hpv.is_muted,
+        hpv.parent_id,
+        ds.source
+    FROM
+    (
+        WITH RECURSIVE child_posts (id, parent_id) AS
+        (
+            SELECT hp.id, hp.parent_id, blacklisted_by_observer_view.source as source
+            FROM hive_posts hp left outer join blacklisted_by_observer_view on (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp.author_id)
+            WHERE hp.id = __post_id
+            AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp.author_id))
+            UNION ALL
+            SELECT children.id, children.parent_id, blacklisted_by_observer_view.source as source
+            FROM hive_posts children left outer join blacklisted_by_observer_view on (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = children.author_id)
+            JOIN child_posts ON children.parent_id = child_posts.id
+            JOIN hive_accounts ON children.author_id = hive_accounts.id
+            WHERE children.counter_deleted = 0
+            AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = children.author_id))
+        )
+        SELECT hp2.id, cp.source
+        FROM hive_posts hp2
+        JOIN child_posts cp ON cp.id = hp2.id
+        ORDER BY hp2.id
+    ) ds
+    JOIN hive_posts_view hpv ON ds.id = hpv.id
+    ORDER BY ds.id
+    LIMIT 2000
+    ;
+END
+$function$
+;
diff --git a/hive/db/sql_scripts/bridge_get_post.sql b/hive/db/sql_scripts/bridge_get_post.sql
new file mode 100644
index 0000000000000000000000000000000000000000..3ad4667a14e61bb07f0b50c171160d9a109b625c
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_post.sql
@@ -0,0 +1,56 @@
+DROP FUNCTION IF EXISTS bridge_get_post;
+
+CREATE FUNCTION bridge_get_post( in _author VARCHAR, in _permlink VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      NULL
+  FROM
+      hive_posts_view hp
+  WHERE
+      hp.id = __post_id;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/bridge_get_ranked_post_for_all.sql b/hive/db/sql_scripts/bridge_get_ranked_post_for_all.sql
new file mode 100644
index 0000000000000000000000000000000000000000..8104dbfb6b273a2b08c34d6b026a8a4bea1fcfa5
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_ranked_post_for_all.sql
@@ -0,0 +1,528 @@
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_created;
+CREATE FUNCTION bridge_get_ranked_post_by_created( in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      created.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          blacklisted_by_observer_view.source as source
+      FROM hive_posts hp1
+          JOIN hive_accounts_view ha ON hp1.author_id = ha.id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.counter_deleted = 0 AND hp1.depth = 0 AND NOT ha.is_grayed AND ( __post_id = 0 OR hp1.id < __post_id )
+      AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.id DESC
+      LIMIT _limit
+  ) as created
+  JOIN hive_posts_view hp ON hp.id = created.id
+  ORDER BY created.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_hot;
+CREATE FUNCTION bridge_get_ranked_post_by_hot( in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __hot_limit FLOAT;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.sc_hot INTO __hot_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      hot.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          hp1.sc_hot as hot,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth = 0
+          AND ( __post_id = 0 OR hp1.sc_hot < __hot_limit OR ( hp1.sc_hot = __hot_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.sc_hot DESC, hp1.id DESC
+      LIMIT _limit
+  ) as hot
+  JOIN hive_posts_view hp ON hp.id = hot.id
+  ORDER BY hot.hot DESC, hot.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_muted;
+CREATE FUNCTION bridge_get_ranked_post_by_muted( in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id(_observer, True);
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      payout.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          ( hp1.payout + hp1.pending_payout ) as all_payout,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          JOIN hive_accounts_view ha ON hp1.author_id = ha.id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND ha.is_grayed AND ( hp1.payout + hp1.pending_payout ) > 0
+          AND ( __post_id = 0 OR ( hp1.payout + hp1.pending_payout ) < __payout_limit OR ( ( hp1.payout + hp1.pending_payout ) = __payout_limit AND hp1.id < __post_id ) )
+      ORDER BY ( hp1.payout + hp1.pending_payout ) DESC, hp1.id DESC
+      LIMIT _limit
+  ) as payout
+  JOIN hive_posts_view hp ON hp.id = payout.id
+  ORDER BY payout.all_payout DESC, payout.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_payout_comments;
+CREATE FUNCTION bridge_get_ranked_post_by_payout_comments( in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      payout.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          ( hp1.payout + hp1.pending_payout ) as all_payout,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth > 0
+          AND ( __post_id = 0 OR ( hp1.payout + hp1.pending_payout ) < __payout_limit OR ( ( hp1.payout + hp1.pending_payout ) = __payout_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY ( hp1.payout + hp1.pending_payout ) DESC, hp1.id DESC
+      LIMIT _limit
+  ) as payout
+  JOIN hive_posts_view hp ON hp.id = payout.id
+  ORDER BY payout.all_payout DESC, payout.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_payout;
+CREATE FUNCTION bridge_get_ranked_post_by_payout( in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _bridge_api BOOLEAN, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __head_block_time TIMESTAMP;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __head_block_time = head_block_time();
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      payout.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          ( hp1.payout + hp1.pending_payout ) as all_payout,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.counter_deleted = 0 AND NOT hp1.is_paidout
+          AND ( ( NOT _bridge_api AND hp1.depth = 0 ) OR ( _bridge_api AND hp1.payout_at BETWEEN __head_block_time + interval '12 hours' AND __head_block_time + interval '36 hours' ) )
+          AND ( __post_id = 0 OR ( hp1.payout + hp1.pending_payout ) < __payout_limit OR ( ( hp1.payout + hp1.pending_payout ) = __payout_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY ( hp1.payout + hp1.pending_payout ) DESC, hp1.id DESC
+      LIMIT _limit
+  ) as payout
+  JOIN hive_posts_view hp ON hp.id = payout.id
+  ORDER BY payout.all_payout DESC, payout.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_promoted;
+CREATE FUNCTION bridge_get_ranked_post_by_promoted( in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __promoted_limit hive_posts.promoted%TYPE;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.promoted INTO __promoted_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      promoted.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          hp1.promoted as promoted,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.promoted > 0
+          AND ( __post_id = 0 OR hp1.promoted < __promoted_limit OR ( hp1.promoted = __promoted_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.promoted DESC, hp1.id DESC
+      LIMIT _limit
+  ) as promoted
+  JOIN hive_posts_view hp ON hp.id = promoted.id
+  ORDER BY promoted.promoted DESC, promoted.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_trends;
+CREATE FUNCTION bridge_get_ranked_post_by_trends( in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __trending_limit FLOAT;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.sc_trend INTO __trending_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      trends.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          hp1.sc_trend as trend,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth = 0
+          AND ( __post_id = 0 OR hp1.sc_trend < __trending_limit OR ( hp1.sc_trend = __trending_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.sc_trend DESC, hp1.id DESC
+      LIMIT _limit
+  ) as trends
+  JOIN hive_posts_view hp ON hp.id = trends.id
+  ORDER BY trends.trend DESC, trends.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/bridge_get_ranked_post_for_communities.sql b/hive/db/sql_scripts/bridge_get_ranked_post_for_communities.sql
new file mode 100644
index 0000000000000000000000000000000000000000..eed6a15e6ea9fc291106525f4cf34653767e0803
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_ranked_post_for_communities.sql
@@ -0,0 +1,597 @@
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_pinned_for_community;
+CREATE FUNCTION bridge_get_ranked_post_pinned_for_community( in _community VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __observer_id INT;
+BEGIN
+  __observer_id = find_account_id( _observer, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      blacklisted_by_observer_view.source
+  FROM
+      hive_posts_view hp
+      JOIN hive_communities hc ON hc.id = hp.community_id
+      LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp.author_id)
+  WHERE hc.name = _community AND hp.is_pinned
+  AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp.author_id))
+  ORDER BY hp.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_trends_for_community;
+CREATE FUNCTION bridge_get_ranked_post_by_trends_for_community( in _community VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _bridge_api BOOLEAN, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __trending_limit FLOAT;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.sc_trend INTO __trending_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      trends.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          hp1.sc_trend as trend,
+          blacklisted_by_observer_view.source as source
+      FROM
+         hive_posts hp1
+         JOIN hive_communities hc ON hp1.community_id = hc.id
+         LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hc.name = _community AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth = 0
+         AND ( NOT _bridge_api OR NOT hp1.is_pinned ) -- concatenated with bridge_get_ranked_post_pinned_for_community when called for bridge_api
+         AND ( __post_id = 0 OR hp1.sc_trend < __trending_limit OR ( hp1.sc_trend = __trending_limit AND hp1.id < __post_id ) )
+         AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.sc_trend DESC, hp1.id DESC
+      LIMIT _limit
+  ) as trends
+  JOIN hive_posts_view hp ON hp.id = trends.id
+  ORDER BY trends.trend DESC, trends.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_promoted_for_community;
+CREATE FUNCTION bridge_get_ranked_post_by_promoted_for_community( in _community VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __promoted_limit hive_posts.promoted%TYPE;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.promoted INTO __promoted_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      promoted.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          hp1.promoted as promoted,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          JOIN hive_communities hc ON hp1.community_id = hc.id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hc.name = _community AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.promoted > 0
+          AND ( __post_id = 0 OR hp1.promoted < __promoted_limit OR ( hp1.promoted = __promoted_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.promoted DESC, hp1.id DESC
+      LIMIT _limit
+  ) as promoted
+  JOIN hive_posts_view hp ON hp.id = promoted.id
+  ORDER BY promoted.promoted DESC, promoted.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_payout_for_community;
+CREATE FUNCTION bridge_get_ranked_post_by_payout_for_community(in _community VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __head_block_time TIMESTAMP;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __head_block_time = head_block_time();
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      payout.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          ( hp1.payout + hp1.pending_payout ) as all_payout,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          JOIN hive_communities hc ON hp1.community_id = hc.id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hc.name = _community AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.payout_at BETWEEN __head_block_time + interval '12 hours' AND __head_block_time + interval '36 hours'
+          AND ( __post_id = 0 OR ( hp1.payout + hp1.pending_payout ) < __payout_limit OR ( ( hp1.payout + hp1.pending_payout ) = __payout_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY ( hp1.payout + hp1.pending_payout ) DESC, hp1.id DESC
+      LIMIT _limit
+  ) as payout
+  JOIN hive_posts_view hp ON hp.id = payout.id
+  ORDER BY payout.all_payout DESC, payout.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_payout_comments_for_community;
+CREATE FUNCTION bridge_get_ranked_post_by_payout_comments_for_community( in _community VARCHAR,  in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      payout.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          ( hp1.payout + hp1.pending_payout ) as all_payout,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          JOIN hive_communities hc ON hp1.community_id = hc.id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hc.name = _community AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth > 0
+          AND ( __post_id = 0 OR ( hp1.payout + hp1.pending_payout ) < __payout_limit OR ( ( hp1.payout + hp1.pending_payout ) = __payout_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY ( hp1.payout + hp1.pending_payout ) DESC, hp1.id DESC
+      LIMIT _limit
+  ) as payout
+  JOIN hive_posts_view hp ON hp.id = payout.id
+  ORDER BY payout.all_payout DESC, payout.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_muted_for_community;
+CREATE FUNCTION bridge_get_ranked_post_by_muted_for_community( in _community VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __observer_id = find_account_id(_observer, True);
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      payout.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          ( hp1.payout + hp1.pending_payout ) as all_payout,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          JOIN hive_communities hc ON hp1.community_id = hc.id
+          JOIN hive_accounts_view ha ON hp1.author_id = ha.id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hc.name = _community AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND ha.is_grayed AND ( hp1.payout + hp1.pending_payout ) > 0
+          AND ( __post_id = 0 OR ( hp1.payout + hp1.pending_payout ) < __payout_limit OR ( ( hp1.payout + hp1.pending_payout ) = __payout_limit AND hp1.id < __post_id ) )
+      ORDER BY ( hp1.payout + hp1.pending_payout ) DESC, hp1.id DESC
+      LIMIT _limit
+  ) as payout
+  JOIN hive_posts_view hp ON hp.id = payout.id
+  ORDER BY payout.all_payout DESC, payout.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_hot_for_community;
+CREATE FUNCTION bridge_get_ranked_post_by_hot_for_community( in _community VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __hot_limit FLOAT;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.sc_hot INTO __hot_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      hot.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          hp1.sc_hot as hot,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          JOIN hive_communities hc ON hp1.community_id = hc.id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hc.name = _community AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth = 0
+          AND ( __post_id = 0 OR hp1.sc_hot < __hot_limit OR ( hp1.sc_hot = __hot_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.sc_hot DESC, hp1.id DESC
+      LIMIT _limit
+  ) as hot
+  JOIN hive_posts_view hp ON hp.id = hot.id
+  ORDER BY hot.hot DESC, hot.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_created_for_community;
+CREATE FUNCTION bridge_get_ranked_post_by_created_for_community( in _community VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _bridge_api BOOLEAN, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __observer_id = find_account_id( _observer, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      created.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          JOIN hive_communities hc ON hp1.community_id = hc.id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hc.name = _community AND hp1.counter_deleted = 0 AND hp1.depth = 0
+          AND ( NOT _bridge_api OR NOT hp1.is_pinned ) -- concatenated with bridge_get_ranked_post_pinned_for_community when called for bridge_api
+          AND ( __post_id = 0 OR hp1.id < __post_id )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.id DESC
+      LIMIT _limit
+  ) as created
+  JOIN hive_posts_view hp ON hp.id = created.id
+  ORDER BY created.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/bridge_get_ranked_post_for_observer_communities.sql b/hive/db/sql_scripts/bridge_get_ranked_post_for_observer_communities.sql
new file mode 100644
index 0000000000000000000000000000000000000000..02c7aed27d0b42d268a0e2d96d8e60704746d4b4
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_ranked_post_for_observer_communities.sql
@@ -0,0 +1,501 @@
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_created_for_observer_communities;
+CREATE FUNCTION bridge_get_ranked_post_by_created_for_observer_communities( in _observer VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __account_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __account_id = find_account_id( _observer, True );
+  RETURN QUERY
+    with post_ids as (select posts.id
+                      from (select community_id
+                            from hive_subscriptions
+                            where account_id = __account_id) communities
+                      cross join lateral (select hive_posts.id
+                                          from hive_posts
+                                          join hive_accounts on (hive_posts.author_id = hive_accounts.id)
+                                          where hive_posts.community_id = communities.community_id
+                                            and hive_posts.depth = 0
+                                            and hive_posts.counter_deleted = 0
+                                            and (__post_id = 0 OR hive_posts.id < __post_id)
+                                            and hive_accounts.reputation > '-464800000000'::bigint
+                                            and (not exists (select 1 from muted_accounts_by_id_view where observer_id = __account_id and muted_id = hive_posts.author_id))
+                                          order by id desc
+                                          limit _limit) posts
+                      order by id desc
+                      limit _limit)
+      SELECT
+          hp.id,
+          hp.author,
+          hp.parent_author,
+          hp.author_rep,
+          hp.root_title,
+          hp.beneficiaries,
+          hp.max_accepted_payout,
+          hp.percent_hbd,
+          hp.url,
+          hp.permlink,
+          hp.parent_permlink_or_category,
+          hp.title,
+          hp.body,
+          hp.category,
+          hp.depth,
+          hp.promoted,
+          hp.payout,
+          hp.pending_payout,
+          hp.payout_at,
+          hp.is_paidout,
+          hp.children,
+          hp.votes,
+          hp.created_at,
+          hp.updated_at,
+          hp.rshares,
+          hp.abs_rshares,
+          hp.json,
+          hp.is_hidden,
+          hp.is_grayed,
+          hp.total_votes,
+          hp.sc_trend,
+          hp.role_title,
+          hp.community_title,
+          hp.role_id,
+          hp.is_pinned,
+          hp.curator_payout_value,
+          hp.is_muted,
+          blacklisted_by_observer_view.source
+      from post_ids
+      join hive_posts_view hp using (id)
+      LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __account_id AND blacklisted_by_observer_view.blacklisted_id = hp.author_id)
+      order by id desc;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_hot_for_observer_communities;
+CREATE FUNCTION bridge_get_ranked_post_by_hot_for_observer_communities( in _observer VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __hot_limit FLOAT;
+  __account_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.sc_hot INTO __hot_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __account_id = find_account_id( _observer, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      blacklisted_by_observer_view.source
+  FROM
+      hive_posts_view hp
+      JOIN hive_subscriptions hs ON hp.community_id = hs.community_id
+      LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __account_id AND blacklisted_by_observer_view.blacklisted_id = hp.author_id)
+  WHERE hs.account_id = __account_id AND NOT hp.is_paidout AND hp.depth = 0
+      AND ( __post_id = 0 OR hp.sc_hot < __hot_limit OR ( hp.sc_hot = __hot_limit AND hp.id < __post_id ) )
+      AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __account_id AND muted_id = hp.author_id))
+  ORDER BY hp.sc_hot DESC, hp.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_payout_comments_for_observer_communities;
+CREATE FUNCTION bridge_get_ranked_post_by_payout_comments_for_observer_communities( in _observer VARCHAR,  in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __account_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __account_id = find_account_id( _observer, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      payout.blacklist_source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          ( hp1.payout + hp1.pending_payout ) as all_payout,
+          blacklisted_by_observer_view.source as blacklist_source
+      FROM
+          hive_posts hp1
+          JOIN hive_subscriptions hs ON hp1.community_id = hs.community_id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __account_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hs.account_id = __account_id AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth > 0
+          AND ( __post_id = 0 OR ( hp1.payout + hp1.pending_payout ) < __payout_limit OR ( ( hp1.payout + hp1.pending_payout ) = __payout_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __account_id AND muted_id = hp1.author_id))
+      ORDER BY ( hp1.payout + hp1.pending_payout ) DESC, hp1.id DESC
+      LIMIT _limit
+  ) as payout
+  JOIN hive_posts_view hp ON hp.id = payout.id
+  ORDER BY payout.all_payout DESC, payout.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_payout_for_observer_communities;
+CREATE FUNCTION bridge_get_ranked_post_by_payout_for_observer_communities( in _observer VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __head_block_time TIMESTAMP;
+  __account_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __account_id = find_account_id( _observer, True );
+  __head_block_time = head_block_time();
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      blacklisted_by_observer_view.source
+  FROM
+      hive_posts_view hp
+      JOIN hive_subscriptions hs ON hp.community_id = hs.community_id
+      LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __account_id AND blacklisted_by_observer_view.blacklisted_id = hp.author_id)
+  WHERE hs.account_id = __account_id AND NOT hp.is_paidout AND hp.payout_at BETWEEN __head_block_time + interval '12 hours' AND __head_block_time + interval '36 hours'
+      AND ( __post_id = 0 OR ( hp.payout + hp.pending_payout ) < __payout_limit OR ( ( hp.payout + hp.pending_payout ) = __payout_limit AND hp.id < __post_id ) )
+      AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __account_id AND muted_id = hp.author_id))
+  ORDER BY ( hp.payout + hp.pending_payout ) DESC, hp.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_promoted_for_observer_communities;
+CREATE FUNCTION bridge_get_ranked_post_by_promoted_for_observer_communities( in _observer VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __promoted_limit hive_posts.promoted%TYPE;
+  __account_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.promoted INTO __promoted_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __account_id = find_account_id( _observer, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      blacklisted_by_observer_view.source
+  FROM
+      hive_posts_view hp
+      JOIN hive_subscriptions hs ON hp.community_id = hs.community_id
+      LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __account_id AND blacklisted_by_observer_view.blacklisted_id = hp.author_id)
+  WHERE hs.account_id = __account_id AND NOT hp.is_paidout AND hp.promoted > 0
+      AND ( __post_id = 0 OR hp.promoted < __promoted_limit OR ( hp.promoted = __promoted_limit AND hp.id < __post_id ) )
+      AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __account_id AND muted_id = hp.author_id))
+  ORDER BY hp.promoted DESC, hp.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_trends_for_observer_communities;
+CREATE OR REPLACE FUNCTION bridge_get_ranked_post_by_trends_for_observer_communities( in _observer VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __account_id INT;
+  __trending_limit FLOAT := 0;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __account_id = find_account_id( _observer, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.sc_trend INTO __trending_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __account_id = find_account_id( _observer, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      trending.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          hp1.sc_trend,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          JOIN hive_subscriptions hs ON hp1.community_id = hs.community_id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __account_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE
+          hs.account_id = __account_id AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth = 0
+          AND ( __post_id = 0 OR hp1.sc_trend < __trending_limit OR ( hp1.sc_trend = __trending_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __account_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.sc_trend DESC, hp1.id DESC
+      LIMIT _limit
+  ) trending
+  JOIN hive_posts_view hp ON trending.id = hp.id
+  ORDER BY trending.sc_trend DESC, trending.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_muted_for_observer_communities;
+CREATE FUNCTION bridge_get_ranked_post_by_muted_for_observer_communities( in _observer VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __account_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __account_id = find_account_id( _observer, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      blacklisted_by_observer_view.source
+  FROM
+      hive_posts_view hp
+      JOIN hive_subscriptions hs ON hp.community_id = hs.community_id
+      JOIN hive_accounts_view ha ON ha.id = hp.author_id
+      LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __account_id AND blacklisted_by_observer_view.blacklisted_id = hp.author_id)
+  WHERE hs.account_id = __account_id AND NOT hp.is_paidout AND ha.is_grayed AND ( hp.payout + hp.pending_payout ) > 0
+      AND ( __post_id = 0 OR ( hp.payout + hp.pending_payout ) < __payout_limit OR ( ( hp.payout + hp.pending_payout ) = __payout_limit AND hp.id < __post_id ) )
+  ORDER BY ( hp.payout + hp.pending_payout ) DESC, hp.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/bridge_get_ranked_post_for_tag.sql b/hive/db/sql_scripts/bridge_get_ranked_post_for_tag.sql
new file mode 100644
index 0000000000000000000000000000000000000000..ec0e3d62914b0344ecdebf49b7d461d85fa2eaf7
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_ranked_post_for_tag.sql
@@ -0,0 +1,544 @@
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_created_for_tag;
+CREATE FUNCTION bridge_get_ranked_post_by_created_for_tag( in _tag VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __hive_tag INT[];
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  __hive_tag = ARRAY_APPEND( __hive_tag, find_tag_id( _tag, True ));
+  __observer_id = find_account_id(_observer, True);
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      created.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          JOIN hive_accounts_view ha ON hp1.author_id = ha.id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.tags_ids @> __hive_tag AND hp1.counter_deleted = 0 AND hp1.depth = 0 AND NOT ha.is_grayed AND ( __post_id = 0 OR hp1.id < __post_id )
+      --ORDER BY hp1.id + 0 DESC -- this workaround helped the query to better choose indexes, but after some time it started to significally slow down
+      AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.id DESC
+      LIMIT _limit
+  ) as created
+  JOIN hive_posts_view hp ON hp.id = created.id
+  ORDER BY created.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_hot_for_tag;
+CREATE FUNCTION bridge_get_ranked_post_by_hot_for_tag( in _tag VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __hot_limit FLOAT;
+  __hive_tag INT[];
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.sc_hot INTO __hot_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __hive_tag = ARRAY_APPEND( __hive_tag, find_tag_id( _tag, True ));
+  __observer_id = find_account_id(_observer, True);
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      hot.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          hp1.sc_hot as hot,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.tags_ids @> __hive_tag AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth = 0
+          AND ( __post_id = 0 OR hp1.sc_hot < __hot_limit OR ( hp1.sc_hot = __hot_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.sc_hot DESC, hp1.id DESC
+      LIMIT _limit
+  ) as hot
+  JOIN hive_posts_view hp ON hp.id = hot.id
+  ORDER BY hot.hot DESC, hot.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_muted_for_tag;
+CREATE FUNCTION bridge_get_ranked_post_by_muted_for_tag( in _tag VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __hive_tag INT[];
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __hive_tag = ARRAY_APPEND( __hive_tag, find_tag_id( _tag, True ) );
+  __observer_id = find_account_id(_observer, True);
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      payout.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          ( hp1.payout + hp1.pending_payout ) as all_payout,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          JOIN hive_accounts_view ha ON hp1.author_id = ha.id
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.tags_ids @> __hive_tag AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND ha.is_grayed AND ( hp1.payout + hp1.pending_payout ) > 0
+          AND ( __post_id = 0 OR ( hp1.payout + hp1.pending_payout ) < __payout_limit OR ( ( hp1.payout + hp1.pending_payout ) = __payout_limit AND hp1.id < __post_id ) )
+      ORDER BY ( hp1.payout + hp1.pending_payout ) DESC, hp1.id DESC
+      LIMIT _limit
+  ) as payout
+  JOIN hive_posts_view hp ON hp.id = payout.id
+  ORDER BY payout.all_payout DESC, payout.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_payout_comments_for_category;
+CREATE FUNCTION bridge_get_ranked_post_by_payout_comments_for_category( in _category VARCHAR,  in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __hive_category INT;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __hive_category = find_category_id( _category, True );
+  __observer_id = find_account_id(_observer, True);
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      payout.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          ( hp1.payout + hp1.pending_payout ) as all_payout,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.category_id = __hive_category AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth > 0
+          AND ( __post_id = 0 OR ( hp1.payout + hp1.pending_payout ) < __payout_limit OR ( ( hp1.payout + hp1.pending_payout ) = __payout_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY ( hp1.payout + hp1.pending_payout ) DESC, hp1.id DESC
+      LIMIT _limit
+  ) as payout
+  JOIN hive_posts_view hp ON hp.id = payout.id
+  ORDER BY payout.all_payout DESC, payout.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_payout_for_category;
+CREATE FUNCTION bridge_get_ranked_post_by_payout_for_category( in _category VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _bridge_api BOOLEAN, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+  __head_block_time TIMESTAMP;
+  __hive_category INT;
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT ( hp.payout + hp.pending_payout ) INTO __payout_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __hive_category = find_category_id( _category, True );
+  __head_block_time = head_block_time();
+  __observer_id = find_account_id(_observer, True);
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      payout.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          ( hp1.payout + hp1.pending_payout ) as all_payout,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.category_id = __hive_category AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout
+          AND ( ( NOT _bridge_api AND hp1.depth = 0 ) OR ( _bridge_api AND hp1.payout_at BETWEEN __head_block_time + interval '12 hours' AND __head_block_time + interval '36 hours' ) )
+          AND ( __post_id = 0 OR ( hp1.payout + hp1.pending_payout ) < __payout_limit OR ( ( hp1.payout + hp1.pending_payout ) = __payout_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY ( hp1.payout + hp1.pending_payout ) DESC, hp1.id DESC
+      LIMIT _limit
+  ) as payout
+  JOIN hive_posts_view hp ON hp.id = payout.id
+  ORDER BY payout.all_payout DESC, payout.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_promoted_for_tag;
+CREATE FUNCTION bridge_get_ranked_post_by_promoted_for_tag( in _tag VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __promoted_limit hive_posts.promoted%TYPE;
+  __hive_tag INT[];
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.promoted INTO __promoted_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __hive_tag = ARRAY_APPEND( __hive_tag,  find_tag_id( _tag, True ) );
+  __observer_id = find_account_id(_observer, True);
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      promoted.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          hp1.promoted as promoted,
+          blacklisted_by_observer_view.source as source
+      FROM
+          hive_posts hp1
+          LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.tags_ids @> __hive_tag AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.promoted > 0
+          AND ( __post_id = 0 OR hp1.promoted < __promoted_limit OR ( hp1.promoted = __promoted_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.promoted DESC, hp1.id DESC
+      LIMIT _limit
+  ) as promoted
+  JOIN hive_posts_view hp ON hp.id = promoted.id
+  ORDER BY promoted.promoted DESC, promoted.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS bridge_get_ranked_post_by_trends_for_tag;
+CREATE FUNCTION bridge_get_ranked_post_by_trends_for_tag( in _tag VARCHAR, in _author VARCHAR, in _permlink VARCHAR, in _limit SMALLINT, in _observer VARCHAR )
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __trending_limit FLOAT;
+  __hive_tag INT[];
+  __observer_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  IF __post_id <> 0 THEN
+      SELECT hp.sc_trend INTO __trending_limit FROM hive_posts hp WHERE hp.id = __post_id;
+  END IF;
+  __hive_tag = ARRAY_APPEND( __hive_tag, find_tag_id( _tag, True ));
+  __observer_id = find_account_id(_observer, True);
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      trends.source
+  FROM
+  (
+      SELECT
+          hp1.id,
+          hp1.sc_trend as trend,
+          blacklisted_by_observer_view.source as source
+      FROM
+         hive_posts hp1
+         LEFT OUTER JOIN blacklisted_by_observer_view ON (blacklisted_by_observer_view.observer_id = __observer_id AND blacklisted_by_observer_view.blacklisted_id = hp1.author_id)
+      WHERE hp1.tags_ids @> __hive_tag AND hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.depth = 0
+          AND ( __post_id = 0 OR hp1.sc_trend < __trending_limit OR ( hp1.sc_trend = __trending_limit AND hp1.id < __post_id ) )
+          AND (NOT EXISTS (SELECT 1 FROM muted_accounts_by_id_view WHERE observer_id = __observer_id AND muted_id = hp1.author_id))
+      ORDER BY hp1.sc_trend DESC, hp1.id DESC
+      LIMIT _limit
+  ) as trends
+  JOIN hive_posts_view hp ON hp.id = trends.id
+  ORDER BY trends.trend DESC, trends.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/bridge_get_ranked_post_type.sql b/hive/db/sql_scripts/bridge_get_ranked_post_type.sql
new file mode 100644
index 0000000000000000000000000000000000000000..eee1ca6a0b762517a65b15567300ce0806b4a8fc
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_ranked_post_type.sql
@@ -0,0 +1,128 @@
+DROP TYPE IF EXISTS bridge_api_post CASCADE;
+CREATE TYPE bridge_api_post AS (
+    id INTEGER,
+    author VARCHAR,
+    parent_author VARCHAR,
+    author_rep BIGINT,
+    root_title VARCHAR,
+    beneficiaries JSON,
+    max_accepted_payout VARCHAR,
+    percent_hbd INTEGER,
+    url TEXT,
+    permlink VARCHAR,
+    parent_permlink_or_category VARCHAR,
+    title VARCHAR,
+    body TEXT,
+    category VARCHAR,
+    depth SMALLINT,
+    promoted DECIMAL(10,3),
+    payout DECIMAL(10,3),
+    pending_payout DECIMAL(10,3),
+    payout_at TIMESTAMP,
+    is_paidout BOOLEAN,
+    children INTEGER,
+    votes INTEGER,
+    created_at TIMESTAMP,
+    updated_at TIMESTAMP,
+    rshares NUMERIC,
+    abs_rshares NUMERIC,
+    json TEXT,
+    is_hidden BOOLEAN,
+    is_grayed BOOLEAN,
+    total_votes BIGINT,
+    sc_trend FLOAT4,
+    role_title VARCHAR,
+    community_title VARCHAR,
+    role_id SMALLINT,
+    is_pinned BOOLEAN,
+    curator_payout_value VARCHAR,
+    is_muted BOOLEAN,
+    blacklists TEXT
+);
+
+DROP TYPE IF EXISTS bridge_api_post_reblogs CASCADE;
+-- extension of bridge_api_post (same fields/types/order plus extras at the end)
+CREATE TYPE bridge_api_post_reblogs AS (
+    id INTEGER,
+    author VARCHAR,
+    parent_author VARCHAR,
+    author_rep BIGINT,
+    root_title VARCHAR,
+    beneficiaries JSON,
+    max_accepted_payout VARCHAR,
+    percent_hbd INTEGER,
+    url TEXT,
+    permlink VARCHAR,
+    parent_permlink_or_category VARCHAR,
+    title VARCHAR,
+    body TEXT,
+    category VARCHAR,
+    depth SMALLINT,
+    promoted DECIMAL(10,3),
+    payout DECIMAL(10,3),
+    pending_payout DECIMAL(10,3),
+    payout_at TIMESTAMP,
+    is_paidout BOOLEAN,
+    children INTEGER,
+    votes INTEGER,
+    created_at TIMESTAMP,
+    updated_at TIMESTAMP,
+    rshares NUMERIC,
+    abs_rshares NUMERIC,
+    json TEXT,
+    is_hidden BOOLEAN,
+    is_grayed BOOLEAN,
+    total_votes BIGINT,
+    sc_trend FLOAT4,
+    role_title VARCHAR,
+    community_title VARCHAR,
+    role_id SMALLINT,
+    is_pinned BOOLEAN,
+    curator_payout_value VARCHAR,
+    is_muted BOOLEAN,
+    reblogged_by VARCHAR[]
+);
+
+DROP TYPE IF EXISTS bridge_api_post_discussion CASCADE;
+-- extension of bridge_api_post (same fields/types/order plus extras at the end)
+CREATE TYPE bridge_api_post_discussion AS (
+    id INTEGER,
+    author VARCHAR,
+    parent_author VARCHAR,
+    author_rep BIGINT,
+    root_title VARCHAR,
+    beneficiaries JSON,
+    max_accepted_payout VARCHAR,
+    percent_hbd INTEGER,
+    url TEXT,
+    permlink VARCHAR,
+    parent_permlink_or_category VARCHAR,
+    title VARCHAR,
+    body TEXT,
+    category VARCHAR,
+    depth SMALLINT,
+    promoted DECIMAL(10,3),
+    payout DECIMAL(10,3),
+    pending_payout DECIMAL(10,3),
+    payout_at TIMESTAMP,
+    is_paidout BOOLEAN,
+    children INTEGER,
+    votes INTEGER,
+    created_at TIMESTAMP,
+    updated_at TIMESTAMP,
+    rshares NUMERIC,
+    abs_rshares NUMERIC,
+    json TEXT,
+    is_hidden BOOLEAN,
+    is_grayed BOOLEAN,
+    total_votes BIGINT,
+    sc_trend FLOAT4,
+    role_title VARCHAR,
+    community_title VARCHAR,
+    role_id SMALLINT,
+    is_pinned BOOLEAN,
+    curator_payout_value VARCHAR,
+    is_muted BOOLEAN,
+    parent_id INTEGER,
+    blacklists TEXT
+);
\ No newline at end of file
diff --git a/hive/db/sql_scripts/bridge_get_relationship_between_accounts.sql b/hive/db/sql_scripts/bridge_get_relationship_between_accounts.sql
new file mode 100644
index 0000000000000000000000000000000000000000..651b7248283534e361946c31bd19d20cb23102d8
--- /dev/null
+++ b/hive/db/sql_scripts/bridge_get_relationship_between_accounts.sql
@@ -0,0 +1,29 @@
+DROP FUNCTION IF EXISTS bridge_get_relationship_between_accounts;
+
+CREATE FUNCTION bridge_get_relationship_between_accounts( in _account1 VARCHAR, in _account2 VARCHAR )
+RETURNS TABLE(
+    state hive_follows.state%TYPE,
+    blacklisted hive_follows.blacklisted%TYPE,
+    follow_blacklists hive_follows.follow_blacklists%TYPE,
+    follow_muted hive_follows.follow_muted%TYPE
+)
+AS
+$function$
+DECLARE
+  __account1_id INT;
+  __account2_id INT;
+BEGIN
+  __account1_id = find_account_id( _account1, True );
+  __account2_id = find_account_id( _account2, True );
+  RETURN QUERY SELECT
+      hf.state,
+      hf.blacklisted,
+      hf.follow_blacklists,
+      hf.follow_muted
+  FROM
+      hive_follows hf
+  WHERE
+      hf.follower = __account1_id AND hf.following = __account2_id;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/calculate_account_reputations.sql b/hive/db/sql_scripts/calculate_account_reputations.sql
new file mode 100644
index 0000000000000000000000000000000000000000..a00b7c647cb9a46f35b08165258b141a02765a4b
--- /dev/null
+++ b/hive/db/sql_scripts/calculate_account_reputations.sql
@@ -0,0 +1,311 @@
+DROP TYPE IF EXISTS AccountReputation CASCADE;
+
+CREATE TYPE AccountReputation AS (id int, reputation bigint, is_implicit boolean, changed boolean);
+
+DROP FUNCTION IF EXISTS calculate_account_reputations;
+
+--- Massive version of account reputation calculation.
+CREATE OR REPLACE FUNCTION calculate_account_reputations(
+  _first_block_num integer,
+  _last_block_num integer,
+  _tracked_account character varying DEFAULT NULL::character varying)
+    RETURNS SETOF accountreputation 
+    LANGUAGE 'plpgsql'
+    STABLE 
+AS $BODY$
+DECLARE
+  __vote_data RECORD;
+  __account_reputations AccountReputation[];
+  __author_rep bigint;
+  __new_author_rep bigint;
+  __voter_rep bigint;
+  __implicit_voter_rep boolean;
+  __implicit_author_rep boolean;
+  __rshares bigint;
+  __prev_rshares bigint;
+  __rep_delta bigint;
+  __prev_rep_delta bigint;
+  __traced_author int;
+  __account_name varchar;
+BEGIN
+  SELECT INTO __account_reputations ARRAY(SELECT ROW(a.id, a.reputation, a.is_implicit, false)::AccountReputation
+  FROM hive_accounts a
+  WHERE a.id != 0
+  ORDER BY a.id);
+
+--  SELECT COALESCE((SELECT ha.id FROM hive_accounts ha WHERE ha.name = _tracked_account), 0) INTO __traced_author;
+
+  FOR __vote_data IN
+    SELECT rd.id, rd.author_id, rd.voter_id, rd.rshares,
+      COALESCE((SELECT prd.rshares
+                FROM hive_reputation_data prd
+                WHERE prd.author_id = rd.author_id and prd.voter_id = rd.voter_id
+                      and prd.permlink = rd.permlink and prd.id < rd.id
+                        ORDER BY prd.id DESC LIMIT 1), 0) as prev_rshares
+      FROM hive_reputation_data rd 
+      WHERE (_first_block_num IS NULL AND _last_block_num IS NULL) OR (rd.block_num BETWEEN _first_block_num AND _last_block_num)
+      ORDER BY rd.id
+    LOOP
+      __voter_rep := __account_reputations[__vote_data.voter_id].reputation;
+      __implicit_author_rep := __account_reputations[__vote_data.author_id].is_implicit;
+    
+/*      IF __vote_data.author_id = __traced_author THEN
+           raise notice 'Processing vote <%> rshares: %, prev_rshares: %', __vote_data.id, __vote_data.rshares, __vote_data.prev_rshares;
+       select ha.name into __account_name from hive_accounts ha where ha.id = __vote_data.voter_id;
+       raise notice 'Voter `%` (%) reputation: %', __account_name, __vote_data.voter_id,  __voter_rep;
+      END IF;
+*/
+      CONTINUE WHEN __voter_rep < 0;
+
+      __implicit_voter_rep := __account_reputations[__vote_data.voter_id].is_implicit;
+    
+      __author_rep := __account_reputations[__vote_data.author_id].reputation;
+      __rshares := __vote_data.rshares;
+      __prev_rshares := __vote_data.prev_rshares;
+      __prev_rep_delta := (__prev_rshares >> 6)::bigint;
+
+      IF NOT __implicit_author_rep AND --- Author must have set explicit reputation to allow its correction
+         (__prev_rshares > 0 OR
+          --- Voter must have explicitly set reputation to match hived old conditions
+         (__prev_rshares < 0 AND NOT __implicit_voter_rep AND __voter_rep > __author_rep - __prev_rep_delta)) THEN
+            __author_rep := __author_rep - __prev_rep_delta;
+            __implicit_author_rep := __author_rep = 0;
+            __account_reputations[__vote_data.author_id] := ROW(__vote_data.author_id, __author_rep, __implicit_author_rep, true)::AccountReputation;
+ /*           IF __vote_data.author_id = __traced_author THEN
+             raise notice 'Corrected author_rep by prev_rep_delta: % to have reputation: %', __prev_rep_delta, __author_rep;
+            END IF;
+*/
+      END IF;
+
+      __implicit_voter_rep := __account_reputations[__vote_data.voter_id].is_implicit;
+      --- reread voter's rep. since it can change above if author == voter
+    __voter_rep := __account_reputations[__vote_data.voter_id].reputation;
+    
+      IF __rshares > 0 OR
+         (__rshares < 0 AND NOT __implicit_voter_rep AND __voter_rep > __author_rep) THEN
+
+        __rep_delta := (__rshares >> 6)::bigint;
+        __new_author_rep = __author_rep + __rep_delta;
+        __account_reputations[__vote_data.author_id] := ROW(__vote_data.author_id, __new_author_rep, False, true)::AccountReputation;
+/*        IF __vote_data.author_id = __traced_author THEN
+          raise notice 'Changing account: <%> reputation from % to %', __vote_data.author_id, __author_rep, __new_author_rep;
+        END IF;
+*/
+      ELSE
+/*        IF __vote_data.author_id = __traced_author THEN
+            raise notice 'Ignoring reputation change due to unmet conditions... Author_rep: %, Voter_rep: %', __author_rep, __voter_rep;
+        END IF;
+*/
+      END IF;
+    END LOOP;
+
+    RETURN QUERY
+      SELECT id, Reputation, is_implicit, changed
+      FROM unnest(__account_reputations)
+    WHERE Reputation IS NOT NULL and Changed 
+    ;
+END
+$BODY$
+;
+
+DROP FUNCTION IF EXISTS calculate_account_reputations_for_block;
+
+CREATE OR REPLACE FUNCTION calculate_account_reputations_for_block(_block_num INT, _tracked_account VARCHAR DEFAULT NULL::VARCHAR)
+  RETURNS SETOF accountreputation 
+  LANGUAGE 'plpgsql'
+  VOLATILE
+AS $BODY$
+DECLARE
+  __vote_data RECORD;
+  __author_rep bigint;
+  __new_author_rep bigint;
+  __voter_rep bigint;
+  __implicit_voter_rep boolean;
+  __implicit_author_rep boolean;
+  __author_rep_changed boolean := false;
+  __rshares bigint;
+  __prev_rshares bigint;
+  __rep_delta bigint;
+  __prev_rep_delta bigint;
+  __traced_author int;
+  __account_name varchar;
+BEGIN
+  CREATE UNLOGGED TABLE IF NOT EXISTS __new_reputation_data
+  (
+      id integer,
+      author_id integer,
+      voter_id integer,
+      rshares bigint,
+      prev_rshares bigint
+  );
+
+  TRUNCATE TABLE __new_reputation_data;
+  INSERT INTO __new_reputation_data 
+    SELECT rd.id, rd.author_id, rd.voter_id, rd.rshares,
+      COALESCE((SELECT prd.rshares
+               FROM hive_reputation_data prd
+               WHERE prd.author_id = rd.author_id AND prd.voter_id = rd.voter_id
+                     AND prd.permlink = rd.permlink AND prd.id < rd.id
+                      ORDER BY prd.id DESC LIMIT 1), 0) AS prev_rshares
+    FROM hive_reputation_data rd 
+    WHERE rd.block_num = _block_num
+    ORDER BY rd.id
+    ;
+
+  CREATE UNLOGGED TABLE IF NOT EXISTS __tmp_accounts
+  (
+      id integer,
+      reputation bigint,
+      is_implicit boolean,
+      changed boolean
+  );
+
+  TRUNCATE TABLE __tmp_accounts;
+  INSERT INTO __tmp_accounts
+  SELECT ha.id, ha.reputation, ha.is_implicit, false AS changed
+  FROM __new_reputation_data rd
+  JOIN hive_accounts ha on rd.author_id = ha.id
+  UNION
+  SELECT hv.id, hv.reputation, hv.is_implicit, false as changed
+  FROM __new_reputation_data rd
+  JOIN hive_accounts hv on rd.voter_id = hv.id
+  ;
+
+--  SELECT COALESCE((SELECT ha.id FROM hive_accounts ha WHERE ha.name = _tracked_account), 0) INTO __traced_author;
+
+  FOR __vote_data IN
+      SELECT rd.id, rd.author_id, rd.voter_id, rd.rshares, rd.prev_rshares
+      FROM __new_reputation_data rd 
+      ORDER BY rd.id
+    LOOP
+      SELECT INTO __voter_rep, __implicit_voter_rep ha.reputation, ha.is_implicit 
+      FROM __tmp_accounts ha where ha.id = __vote_data.voter_id;
+      SELECT INTO __author_rep, __implicit_author_rep ha.reputation, ha.is_implicit 
+      FROM __tmp_accounts ha where ha.id = __vote_data.author_id;
+
+/*      IF __vote_data.author_id = __traced_author THEN
+           raise notice 'Processing vote <%> rshares: %, prev_rshares: %', __vote_data.id, __vote_data.rshares, __vote_data.prev_rshares;
+       select ha.name into __account_name from hive_accounts ha where ha.id = __vote_data.voter_id;
+       raise notice 'Voter `%` (%) reputation: %', __account_name, __vote_data.voter_id,  __voter_rep;
+      END IF;
+*/
+      CONTINUE WHEN __voter_rep < 0;
+    
+      __rshares := __vote_data.rshares;
+      __prev_rshares := __vote_data.prev_rshares;
+      __prev_rep_delta := (__prev_rshares >> 6)::bigint;
+
+      IF NOT __implicit_author_rep AND --- Author must have set explicit reputation to allow its correction
+         (__prev_rshares > 0 OR
+          --- Voter must have explicitly set reputation to match hived old conditions
+         (__prev_rshares < 0 AND NOT __implicit_voter_rep AND __voter_rep > __author_rep - __prev_rep_delta)) THEN
+            __author_rep := __author_rep - __prev_rep_delta;
+            __implicit_author_rep := __author_rep = 0;
+            __author_rep_changed = true;
+            if __vote_data.author_id = __vote_data.voter_id THEN
+              __implicit_voter_rep := __implicit_author_rep;
+              __voter_rep := __author_rep;
+            end if;
+
+ /*           IF __vote_data.author_id = __traced_author THEN
+             raise notice 'Corrected author_rep by prev_rep_delta: % to have reputation: %', __prev_rep_delta, __author_rep;
+            END IF;
+*/
+      END IF;
+    
+      IF __rshares > 0 OR
+         (__rshares < 0 AND NOT __implicit_voter_rep AND __voter_rep > __author_rep) THEN
+
+        __rep_delta := (__rshares >> 6)::bigint;
+        __new_author_rep = __author_rep + __rep_delta;
+        __author_rep_changed = true;
+
+        UPDATE __tmp_accounts
+        SET reputation = __new_author_rep,
+            is_implicit = False,
+            changed = true
+        WHERE id = __vote_data.author_id;
+
+/*        IF __vote_data.author_id = __traced_author THEN
+          raise notice 'Changing account: <%> reputation from % to %', __vote_data.author_id, __author_rep, __new_author_rep;
+        END IF;
+*/
+      ELSE
+/*        IF __vote_data.author_id = __traced_author THEN
+            raise notice 'Ignoring reputation change due to unmet conditions... Author_rep: %, Voter_rep: %', __author_rep, __voter_rep;
+        END IF;
+*/
+      END IF;
+    END LOOP;
+
+    RETURN QUERY SELECT id, reputation, is_implicit, Changed
+    FROM __tmp_accounts
+    WHERE Reputation IS NOT NULL AND Changed 
+    ;
+END
+$BODY$
+;
+
+DROP FUNCTION IF EXISTS truncate_account_reputation_data;
+
+CREATE OR REPLACE FUNCTION truncate_account_reputation_data(
+  in _day_limit INTERVAL)
+  RETURNS VOID 
+  LANGUAGE 'plpgsql'
+  VOLATILE 
+AS $BODY$
+DECLARE
+  __block_num_limit INT;
+
+BEGIN
+  __block_num_limit = block_before_head(_day_limit);
+  DELETE FROM hive_reputation_data hpd
+  WHERE hpd.block_num < __block_num_limit
+  ;
+END
+$BODY$
+;
+
+
+DROP FUNCTION IF EXISTS update_account_reputations;
+
+CREATE OR REPLACE FUNCTION update_account_reputations(
+  in _first_block_num INTEGER,
+  in _last_block_num INTEGER,
+  in _force_data_truncate BOOLEAN)
+  RETURNS VOID 
+  LANGUAGE 'plpgsql'
+  VOLATILE 
+AS $BODY$
+DECLARE
+  __truncate_interval interval := '30 days'::interval;
+  __truncate_block_count INT := 1*24*1200*3; --- 1day
+
+BEGIN
+  UPDATE hive_accounts urs
+  SET reputation = ds.reputation,
+      is_implicit = ds.is_implicit
+  FROM 
+  (
+    SELECT p.id as account_id, p.reputation, p.is_implicit
+    FROM calculate_account_reputations(_first_block_num, _last_block_num) p
+    WHERE _first_block_num IS NULL OR _last_block_num IS NULL OR _first_block_num != _last_block_num
+
+    UNION ALL
+
+    SELECT p.id as account_id, p.reputation, p.is_implicit
+    FROM calculate_account_reputations_for_block(_first_block_num) p
+    WHERE _first_block_num IS NOT NULL AND _last_block_num IS NOT NULL AND _first_block_num = _last_block_num
+
+  ) ds
+  WHERE urs.id = ds.account_id AND (urs.reputation != ds.reputation OR urs.is_implicit != ds.is_implicit)
+  ;
+
+  IF _force_data_truncate or _last_block_num IS NULL OR MOD(_last_block_num, __truncate_block_count) = 0 THEN
+    PERFORM truncate_account_reputation_data(__truncate_interval);
+  END IF
+  ;
+END
+$BODY$
+;
+
diff --git a/hive/db/sql_scripts/condenser_api_post_ex_type.sql b/hive/db/sql_scripts/condenser_api_post_ex_type.sql
new file mode 100644
index 0000000000000000000000000000000000000000..74d41637412df3679dff73d14e9a3953b4e446e0
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_api_post_ex_type.sql
@@ -0,0 +1,44 @@
+DROP TYPE IF EXISTS condenser_api_post_ex CASCADE;
+-- type for fat node style post of get_content()
+CREATE TYPE condenser_api_post_ex AS (
+    id INT,
+    author VARCHAR(16),
+    permlink VARCHAR(255),
+    author_rep BIGINT,
+    title VARCHAR(512),
+    body TEXT,
+    category VARCHAR(255),
+    depth SMALLINT,
+    promoted DECIMAL(10,3),
+    payout DECIMAL(10,3),
+    pending_payout DECIMAL(10,3),
+    payout_at TIMESTAMP,
+    is_paidout BOOLEAN,
+    children INT,
+    votes INT,
+    created_at TIMESTAMP,
+    updated_at TIMESTAMP,
+    rshares NUMERIC,
+    abs_rshares NUMERIC,
+    json TEXT,
+    is_hidden BOOLEAN,
+    is_grayed BOOLEAN,
+    total_votes BIGINT,
+    net_votes BIGINT,
+    total_vote_weight NUMERIC,
+    parent_author VARCHAR(16),
+    parent_permlink_or_category VARCHAR(255),
+    curator_payout_value VARCHAR(30),
+    root_author VARCHAR(16),
+    root_permlink VARCHAR(255),
+    max_accepted_payout VARCHAR(30),
+    percent_hbd INT,
+    allow_replies BOOLEAN,
+    allow_votes BOOLEAN,
+    allow_curation_rewards BOOLEAN,
+    beneficiaries JSON,
+    url TEXT,
+    root_title VARCHAR(512),
+    active TIMESTAMP,
+    author_rewards BIGINT
+);
diff --git a/hive/db/sql_scripts/condenser_api_post_type.sql b/hive/db/sql_scripts/condenser_api_post_type.sql
new file mode 100644
index 0000000000000000000000000000000000000000..ece5017675c5ab1b3f3f5c667512527592fc6406
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_api_post_type.sql
@@ -0,0 +1,32 @@
+DROP TYPE IF EXISTS condenser_api_post CASCADE;
+-- type for regular condenser_api posts
+CREATE TYPE condenser_api_post AS (
+    id INT,
+    entry_id INT, -- used for paging with offset (otherwise can be any value)
+    author VARCHAR(16),
+    permlink VARCHAR(255),
+    author_rep BIGINT,
+    title VARCHAR(512),
+    body TEXT,
+    category VARCHAR(255),
+    depth SMALLINT,
+    promoted DECIMAL(10,3),
+    payout DECIMAL(10,3),
+    pending_payout DECIMAL(10,3),
+    payout_at TIMESTAMP,
+    is_paidout BOOLEAN,
+    children INT,
+    created_at TIMESTAMP,
+    updated_at TIMESTAMP,
+    reblogged_at TIMESTAMP, -- used when post data is combined with hive_feed_cache (otherwise can be date)
+    rshares NUMERIC,
+    json TEXT,
+    parent_author VARCHAR(16),
+    parent_permlink_or_category VARCHAR(255),
+    curator_payout_value VARCHAR(30),
+    max_accepted_payout VARCHAR(30),
+    percent_hbd INT,
+    beneficiaries JSON,
+    url TEXT,
+    root_title VARCHAR(512)
+);
diff --git a/hive/db/sql_scripts/condenser_follows.sql b/hive/db/sql_scripts/condenser_follows.sql
new file mode 100644
index 0000000000000000000000000000000000000000..9c90c4fec640449f2e6fed619e22f5270379c7e7
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_follows.sql
@@ -0,0 +1,71 @@
+DROP FUNCTION IF EXISTS condenser_get_follow_count;
+CREATE FUNCTION condenser_get_follow_count( in _account VARCHAR,
+  out following hive_accounts.following%TYPE, out followers hive_accounts.followers%TYPE )
+AS
+$function$
+DECLARE
+  __account_id INT;
+BEGIN
+  __account_id = find_account_id( _account, True );
+  SELECT ha.following, ha.followers INTO following, followers FROM hive_accounts ha WHERE ha.id = __account_id;
+  -- following equals (SELECT COUNT(*) FROM hive_follows WHERE state = 1 AND following = __account_id)
+  -- followers equals (SELECT COUNT(*) FROM hive_follows WHERE state = 1 AND follower  = __account_id)
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS condenser_get_followers;
+-- list of account names that follow/ignore given account
+CREATE FUNCTION condenser_get_followers( in _account VARCHAR, in _start VARCHAR, in _type INT, in _limit INT )
+RETURNS SETOF hive_accounts.name%TYPE
+AS
+$function$
+DECLARE
+  __account_id INT;
+  __start_id INT;
+BEGIN
+  __account_id = find_account_id( _account, True );
+  __start_id = find_account_id( _start, True );
+  IF __start_id <> 0 THEN
+      SELECT INTO __start_id ( SELECT id FROM hive_follows WHERE following = __account_id AND follower = __start_id );
+  END IF;
+  RETURN QUERY SELECT
+     ha.name
+  FROM
+     hive_follows hf
+     JOIN hive_accounts ha ON hf.follower = ha.id
+  WHERE
+     hf.following = __account_id AND hf.state = _type AND ( __start_id = 0 OR hf.id < __start_id )
+  ORDER BY hf.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS condenser_get_following;
+-- list of account names followed/ignored by given account
+CREATE FUNCTION condenser_get_following( in _account VARCHAR, in _start VARCHAR, in _type INT, in _limit INT )
+RETURNS SETOF hive_accounts.name%TYPE
+AS
+$function$
+DECLARE
+  __account_id INT;
+  __start_id INT;
+BEGIN
+  __account_id = find_account_id( _account, True );
+  __start_id = find_account_id( _start, True );
+  IF __start_id <> 0 THEN
+      SELECT INTO __start_id ( SELECT id FROM hive_follows WHERE follower = __account_id AND following = __start_id );
+  END IF;
+  RETURN QUERY SELECT
+     ha.name
+  FROM
+     hive_follows hf
+     JOIN hive_accounts ha ON hf.following = ha.id
+  WHERE
+     hf.follower = __account_id AND hf.state = _type AND ( __start_id = 0 OR hf.id < __start_id )
+  ORDER BY hf.id DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/condenser_get_account_reputations.sql b/hive/db/sql_scripts/condenser_get_account_reputations.sql
new file mode 100644
index 0000000000000000000000000000000000000000..e390998cad46f7a723337f9b80a8dc9739d8cee4
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_get_account_reputations.sql
@@ -0,0 +1,27 @@
+DROP FUNCTION IF EXISTS condenser_get_account_reputations;
+
+CREATE OR REPLACE FUNCTION condenser_get_account_reputations(
+  in _account_lower_bound VARCHAR,
+  in _limit INTEGER
+)
+RETURNS TABLE
+(
+    name hive_accounts.name%TYPE,
+    reputation hive_accounts.reputation%TYPE
+)
+AS
+$function$
+DECLARE
+
+BEGIN
+
+    RETURN QUERY SELECT
+      ha.name, ha.reputation
+    FROM hive_accounts ha
+    WHERE ha.name >= _account_lower_bound AND ha.id != 0 -- don't include artificial empty account
+    ORDER BY name
+    LIMIT _limit;
+
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/condenser_get_blog.sql b/hive/db/sql_scripts/condenser_get_blog.sql
new file mode 100644
index 0000000000000000000000000000000000000000..f59dfe6c90ecef8c7187a0294610717c00ca90a2
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_get_blog.sql
@@ -0,0 +1,129 @@
+DROP FUNCTION IF EXISTS condenser_get_blog_helper CASCADE;
+CREATE FUNCTION condenser_get_blog_helper( in _blogger VARCHAR, in _last INT, in _limit INT,
+                                           out _account_id INT, out _offset INT, out _new_limit INT )
+AS
+$function$
+BEGIN
+  _account_id = find_account_id( _blogger, True );
+  IF _last < 0 THEN -- caller wants "most recent" page
+      SELECT INTO _last ( SELECT COUNT(1) - 1 FROM hive_feed_cache hfc WHERE hfc.account_id = _account_id );
+      _offset = _last - _limit + 1;
+      IF _offset < 0 THEN
+        _offset = 0;
+      END IF;
+      _new_limit = _limit;
+  ELSIF _last + 1 < _limit THEN -- bad call, but recoverable
+      _offset = 0;
+      _new_limit = _last + 1;
+  ELSE -- normal call
+      _offset = _last - _limit + 1;
+      _new_limit = _limit;
+  END IF;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS condenser_get_blog;
+-- blog posts [ _last - _limit + 1, _last ] oldest first (reverted by caller)
+CREATE FUNCTION condenser_get_blog( in _blogger VARCHAR, in _last INT, in _limit INT )
+RETURNS SETOF condenser_api_post
+AS
+$function$
+DECLARE
+  __account_id INT;
+  __offset INT;
+BEGIN
+  SELECT h.* INTO __account_id, __offset, _limit FROM condenser_get_blog_helper( _blogger, _last, _limit ) h;
+  RETURN QUERY SELECT
+      hp.id,
+      blog.entry_id::INT,
+      hp.author,
+      hp.permlink,
+      hp.author_rep,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.created_at,
+      hp.updated_at,
+      (
+        CASE hp.author_id = __account_id
+          WHEN True THEN '1970-01-01T00:00:00'::timestamp
+          ELSE blog.created_at
+        END
+      ) as reblogged_at,
+      hp.rshares,
+      hp.json,
+      hp.parent_author,
+      hp.parent_permlink_or_category,
+      hp.curator_payout_value,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.beneficiaries,
+      hp.url,
+      hp.root_title
+  FROM
+  (
+      SELECT
+          hfc.created_at, hfc.post_id, row_number() over (ORDER BY hfc.created_at ASC, hfc.post_id ASC) - 1 as entry_id
+      FROM
+          hive_feed_cache hfc
+      WHERE
+          hfc.account_id = __account_id
+      ORDER BY hfc.created_at ASC, hfc.post_id ASC
+      LIMIT _limit
+      OFFSET __offset
+  ) as blog
+  JOIN hive_posts_view hp ON hp.id = blog.post_id
+  ORDER BY blog.created_at ASC, blog.post_id ASC;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS condenser_get_blog_entries;
+-- blog entries [ _last - _limit + 1, _last ] oldest first (reverted by caller)
+CREATE FUNCTION condenser_get_blog_entries( in _blogger VARCHAR, in _last INT, in _limit INT )
+RETURNS TABLE( entry_id INT, author hive_accounts.name%TYPE, permlink hive_permlink_data.permlink%TYPE, reblogged_at TIMESTAMP )
+AS
+$function$
+DECLARE
+  __account_id INT;
+  __offset INT;
+BEGIN
+  SELECT h.* INTO __account_id, __offset, _limit FROM condenser_get_blog_helper( _blogger, _last, _limit ) h;
+  RETURN QUERY SELECT
+      blog.entry_id::INT,
+      ha.name as author,
+      hpd.permlink,
+      (
+        CASE hp.author_id = __account_id
+          WHEN True THEN '1970-01-01T00:00:00'::timestamp
+          ELSE blog.created_at
+        END
+      ) as reblogged_at
+  FROM
+  (
+      SELECT
+          hfc.created_at, hfc.post_id, row_number() over (ORDER BY hfc.created_at ASC, hfc.post_id ASC) - 1 as entry_id
+      FROM
+          hive_feed_cache hfc
+      WHERE
+          hfc.account_id = __account_id
+      ORDER BY hfc.created_at ASC, hfc.post_id ASC
+      LIMIT _limit
+      OFFSET __offset
+  ) as blog
+  JOIN hive_posts hp ON hp.id = blog.post_id
+  JOIN hive_accounts ha ON ha.id = hp.author_id
+  JOIN hive_permlink_data hpd ON hpd.id = hp.permlink_id
+  ORDER BY blog.created_at ASC, blog.post_id ASC;
+END
+$function$
+language plpgsql STABLE;
+
diff --git a/hive/db/sql_scripts/condenser_get_by_account_comments.sql b/hive/db/sql_scripts/condenser_get_by_account_comments.sql
new file mode 100644
index 0000000000000000000000000000000000000000..e553dc4fe31683e5825022bdabd7142365d2ee67
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_get_by_account_comments.sql
@@ -0,0 +1,64 @@
+DROP FUNCTION IF EXISTS condenser_get_by_account_comments;
+
+CREATE OR REPLACE FUNCTION condenser_get_by_account_comments(
+  in _author VARCHAR,
+  in _permlink VARCHAR,
+  in _limit INTEGER
+)
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INTEGER := 0;
+BEGIN
+
+  IF _permlink <> '' THEN
+    __post_id = find_comment_id( _author, _permlink, True );
+  END IF;
+
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      NULL
+    FROM hive_posts_view hp
+    WHERE ( hp.author = _author ) AND ( ( __post_id = 0 ) OR ( hp.id <= __post_id ) ) AND hp.depth > 0
+    ORDER BY hp.id DESC
+    LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/condenser_get_by_blog.sql b/hive/db/sql_scripts/condenser_get_by_blog.sql
new file mode 100644
index 0000000000000000000000000000000000000000..8939d1b56773efe73ffef7a6afa4395beeca2e2c
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_get_by_blog.sql
@@ -0,0 +1,76 @@
+DROP FUNCTION IF EXISTS condenser_get_by_blog;
+
+CREATE OR REPLACE FUNCTION condenser_get_by_blog(
+  in _account VARCHAR,
+  in _author VARCHAR,
+  in _permlink VARCHAR,
+  in _limit INTEGER
+)
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __post_id INTEGER := 0;
+  __account_id INTEGER := find_account_id( _account, True );
+  __created_at TIMESTAMP;
+BEGIN
+
+  IF _permlink <> '' THEN
+    __post_id = find_comment_id( _author, _permlink, True );
+    __created_at = 
+    (
+      SELECT created_at
+      FROM hive_feed_cache
+      WHERE account_id = __account_id
+      AND post_id = __post_id
+    );
+  END IF;
+
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      NULL
+    FROM hive_posts_view hp
+    JOIN hive_feed_cache hfc ON hp.id = hfc.post_id
+    WHERE hfc.account_id = __account_id AND ( ( __post_id = 0 ) OR ( hfc.created_at <= __created_at ) )
+    ORDER BY created_at DESC, hp.id DESC
+    LIMIT _limit;
+
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/condenser_get_by_blog_without_reblog.sql b/hive/db/sql_scripts/condenser_get_by_blog_without_reblog.sql
new file mode 100644
index 0000000000000000000000000000000000000000..41d8ce36eef58924dc9dee8c59d7dd242c9b9d0d
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_get_by_blog_without_reblog.sql
@@ -0,0 +1,62 @@
+DROP FUNCTION IF EXISTS condenser_get_by_blog_without_reblog;
+
+CREATE OR REPLACE FUNCTION condenser_get_by_blog_without_reblog(
+  in _author VARCHAR,
+  in _permlink VARCHAR,
+  in _limit INTEGER
+)
+RETURNS SETOF bridge_api_post
+AS
+$function$
+DECLARE
+  __author_id INT;
+  __post_id INT;
+BEGIN
+  __author_id = find_account_id( _author, True );
+  __post_id = find_comment_id( _author, _permlink, _permlink <> '' );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.parent_author,
+      hp.author_rep,
+      hp.root_title,
+      hp.beneficiaries,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.url,
+      hp.permlink,
+      hp.parent_permlink_or_category,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.sc_trend,
+      hp.role_title,
+      hp.community_title,
+      hp.role_id,
+      hp.is_pinned,
+      hp.curator_payout_value,
+      hp.is_muted,
+      NULL
+    FROM hive_posts_view hp
+    WHERE hp.author_id = __author_id AND hp.depth = 0 AND ( ( __post_id = 0 ) OR ( hp.id < __post_id ) )
+    ORDER BY hp.id DESC
+    LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/condenser_get_content.sql b/hive/db/sql_scripts/condenser_get_content.sql
new file mode 100644
index 0000000000000000000000000000000000000000..a5a32240714e40b0bc0f420212d4160943291f6d
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_get_content.sql
@@ -0,0 +1,113 @@
+DROP FUNCTION IF EXISTS condenser_get_content;
+CREATE FUNCTION condenser_get_content( in _author VARCHAR, in _permlink VARCHAR )
+RETURNS SETOF condenser_api_post_ex
+AS
+$function$
+DECLARE
+  __post_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.permlink,
+      hp.author_rep,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.net_votes,
+      hp.total_vote_weight,
+      hp.parent_author,
+      hp.parent_permlink_or_category,
+      hp.curator_payout_value,
+      hp.root_author,
+      hp.root_permlink,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.allow_replies,
+      hp.allow_votes,
+      hp.allow_curation_rewards,
+      hp.beneficiaries,
+      hp.url,
+      hp.root_title,
+      hp.active,
+      hp.author_rewards
+    FROM hive_posts_view hp
+    WHERE hp.id = __post_id;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS condenser_get_content_replies;
+CREATE FUNCTION condenser_get_content_replies( in _author VARCHAR, in _permlink VARCHAR )
+RETURNS SETOF condenser_api_post_ex
+AS
+$function$
+DECLARE
+  __post_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+  RETURN QUERY SELECT
+      hp.id,
+      hp.author,
+      hp.permlink,
+      hp.author_rep,
+      hp.title,
+      hp.body,
+      hp.category,
+      hp.depth,
+      hp.promoted,
+      hp.payout,
+      hp.pending_payout,
+      hp.payout_at,
+      hp.is_paidout,
+      hp.children,
+      hp.votes,
+      hp.created_at,
+      hp.updated_at,
+      hp.rshares,
+      hp.abs_rshares,
+      hp.json,
+      hp.is_hidden,
+      hp.is_grayed,
+      hp.total_votes,
+      hp.net_votes,
+      hp.total_vote_weight,
+      hp.parent_author,
+      hp.parent_permlink_or_category,
+      hp.curator_payout_value,
+      hp.root_author,
+      hp.root_permlink,
+      hp.max_accepted_payout,
+      hp.percent_hbd,
+      hp.allow_replies,
+      hp.allow_votes,
+      hp.allow_curation_rewards,
+      hp.beneficiaries,
+      hp.url,
+      hp.root_title,
+      hp.active,
+      hp.author_rewards
+    FROM hive_posts_view hp
+    WHERE hp.parent_id = __post_id
+    ORDER BY hp.id;
+END
+$function$
+language plpgsql STABLE;
+
diff --git a/hive/db/sql_scripts/condenser_get_names_by_reblogged.sql b/hive/db/sql_scripts/condenser_get_names_by_reblogged.sql
new file mode 100644
index 0000000000000000000000000000000000000000..4ff82dd365fa69432cc973de10c99a9a08277cc1
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_get_names_by_reblogged.sql
@@ -0,0 +1,24 @@
+DROP FUNCTION IF EXISTS condenser_get_names_by_reblogged;
+
+CREATE FUNCTION condenser_get_names_by_reblogged( in _author VARCHAR, in _permlink VARCHAR )
+RETURNS TABLE(
+    names hive_accounts.name%TYPE
+)
+AS
+$function$
+DECLARE
+  __post_id INT;
+BEGIN
+  __post_id = find_comment_id( _author, _permlink, True );
+
+  RETURN QUERY SELECT
+    ha.name
+  FROM hive_accounts ha
+  JOIN hive_feed_cache hfc ON ha.id = hfc.account_id
+  WHERE hfc.post_id = __post_id
+  ORDER BY ha.name
+  ;
+
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/condenser_tags.sql b/hive/db/sql_scripts/condenser_tags.sql
new file mode 100644
index 0000000000000000000000000000000000000000..684da1c61df6f754a746b40de74e09e15265119d
--- /dev/null
+++ b/hive/db/sql_scripts/condenser_tags.sql
@@ -0,0 +1,50 @@
+DROP FUNCTION IF EXISTS condenser_get_top_trending_tags_summary;
+CREATE FUNCTION condenser_get_top_trending_tags_summary( in _limit INT )
+RETURNS SETOF VARCHAR
+AS
+$function$
+BEGIN
+  RETURN QUERY SELECT
+      hcd.category
+  FROM
+      hive_category_data hcd
+      JOIN hive_posts hp ON hp.category_id = hcd.id
+  WHERE hp.counter_deleted = 0 AND NOT hp.is_paidout
+  GROUP BY hcd.category
+  ORDER BY SUM(hp.payout + hp.pending_payout) DESC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS condenser_get_trending_tags;
+CREATE FUNCTION condenser_get_trending_tags( in _category VARCHAR, in _limit INT )
+RETURNS TABLE( category VARCHAR, total_posts BIGINT, top_posts BIGINT, total_payouts hive_posts.payout%TYPE )
+AS
+$function$
+DECLARE
+  __category_id INT;
+  __payout_limit hive_posts.payout%TYPE;
+BEGIN
+  __category_id = find_category_id( _category, True );
+  IF __category_id <> 0 THEN
+      SELECT SUM(hp.payout + hp.pending_payout) INTO __payout_limit
+      FROM hive_posts hp
+      WHERE hp.category_id = __category_id AND hp.counter_deleted = 0 AND NOT hp.is_paidout;
+  END IF;
+  RETURN QUERY SELECT
+      hcd.category,
+      COUNT(*) AS total_posts,
+      SUM(CASE WHEN hp.depth = 0 THEN 1 ELSE 0 END) AS top_posts,
+      SUM(hp.payout + hp.pending_payout) AS total_payouts
+  FROM
+      hive_posts hp
+      JOIN hive_category_data hcd ON hcd.id = hp.category_id
+  WHERE NOT hp.is_paidout AND counter_deleted = 0
+  GROUP BY hcd.category
+  HAVING __category_id = 0 OR SUM(hp.payout + hp.pending_payout) < __payout_limit OR ( SUM(hp.payout + hp.pending_payout) = __payout_limit AND hcd.category > _category )
+  ORDER BY SUM(hp.payout + hp.pending_payout) DESC, hcd.category ASC
+  LIMIT _limit;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/database_api_list_comments.sql b/hive/db/sql_scripts/database_api_list_comments.sql
new file mode 100644
index 0000000000000000000000000000000000000000..2087c1a22ecd997718283f6b1be726702ab50dfe
--- /dev/null
+++ b/hive/db/sql_scripts/database_api_list_comments.sql
@@ -0,0 +1,360 @@
+DROP TYPE IF EXISTS database_api_post CASCADE;
+CREATE TYPE database_api_post AS (
+  id INT,
+  community_id INT,
+  author VARCHAR(16),
+  permlink VARCHAR(255),
+  title VARCHAR(512),
+  body TEXT,
+  category VARCHAR(255),
+  depth SMALLINT,
+  promoted DECIMAL(10,3),
+  payout DECIMAL(10,3),
+  last_payout_at TIMESTAMP,
+  cashout_time TIMESTAMP,
+  is_paidout BOOLEAN,
+  children INT,
+  votes INT,
+  created_at TIMESTAMP,
+  updated_at TIMESTAMP,
+  rshares NUMERIC,
+  json TEXT,
+  is_hidden BOOLEAN,
+  is_grayed BOOLEAN,
+  total_votes BIGINT,
+  net_votes BIGINT,
+  total_vote_weight NUMERIC,
+  parent_author VARCHAR(16),
+  parent_permlink_or_category VARCHAR(255),
+  curator_payout_value VARCHAR(30),
+  root_author VARCHAR(16),
+  root_permlink VARCHAR(255),
+  max_accepted_payout VARCHAR(30),
+  percent_hbd INT,
+  allow_replies BOOLEAN,
+  allow_votes BOOLEAN,
+  allow_curation_rewards BOOLEAN,
+  beneficiaries JSON,
+  url TEXT,
+  root_title VARCHAR(512),
+  abs_rshares NUMERIC,
+  active TIMESTAMP,
+  author_rewards BIGINT
+)
+;
+
+DROP FUNCTION IF EXISTS list_comments_by_permlink(character varying, character varying, int)
+;
+CREATE OR REPLACE FUNCTION list_comments_by_permlink(
+  in _author hive_accounts.name%TYPE,
+  in _permlink hive_permlink_data.permlink%TYPE,
+  in _limit INT)
+  RETURNS SETOF database_api_post
+  LANGUAGE sql
+  STABLE
+  AS
+  $function$
+    SELECT
+        hp.id, hp.community_id, hp.author, hp.permlink, hp.title, hp.body,
+        hp.category, hp.depth, hp.promoted, hp.payout, hp.last_payout_at, hp.cashout_time, hp.is_paidout,
+        hp.children, hp.votes, hp.created_at, hp.updated_at, hp.rshares, hp.json,
+        hp.is_hidden, hp.is_grayed, hp.total_votes, hp.net_votes, hp.total_vote_weight,
+        hp.parent_author, hp.parent_permlink_or_category, hp.curator_payout_value, hp.root_author, hp.root_permlink,
+        hp.max_accepted_payout, hp.percent_hbd, hp.allow_replies, hp.allow_votes,
+        hp.allow_curation_rewards, hp.beneficiaries, hp.url, hp.root_title, hp.abs_rshares,
+        hp.active, hp.author_rewards
+    FROM
+        hive_posts_view hp
+    INNER JOIN
+    (
+        SELECT hp1.id
+        FROM
+            hive_posts_api_helper hp1
+        INNER JOIN hive_posts hp2 ON hp2.id = hp1.id
+        WHERE
+            hp2.counter_deleted = 0 AND NOT hp2.is_muted AND hp1.id != 0
+            AND hp1.author_s_permlink >= _author || '/' || _permlink
+        ORDER BY
+            hp1.author_s_permlink
+        LIMIT
+            _limit
+    ) ds ON ds.id = hp.id
+    ORDER BY
+      hp.author, hp.permlink
+  $function$
+;
+
+DROP FUNCTION IF EXISTS list_comments_by_cashout_time(timestamp, character varying, character varying, int)
+;
+CREATE OR REPLACE FUNCTION list_comments_by_cashout_time(
+  in _cashout_time timestamp,
+  in _author hive_accounts.name%TYPE,
+  in _permlink hive_permlink_data.permlink%TYPE,
+  in _limit INT)
+  RETURNS SETOF database_api_post
+  AS
+  $function$
+  DECLARE
+    __post_id INT;
+  BEGIN
+    __post_id = find_comment_id(_author,_permlink, True);
+    RETURN QUERY
+    SELECT
+        hp.id, hp.community_id, hp.author, hp.permlink, hp.title, hp.body,
+        hp.category, hp.depth, hp.promoted, hp.payout, hp.last_payout_at, hp.cashout_time, hp.is_paidout,
+        hp.children, hp.votes, hp.created_at, hp.updated_at, hp.rshares, hp.json,
+        hp.is_hidden, hp.is_grayed, hp.total_votes, hp.net_votes, hp.total_vote_weight,
+        hp.parent_author, hp.parent_permlink_or_category, hp.curator_payout_value, hp.root_author, hp.root_permlink,
+        hp.max_accepted_payout, hp.percent_hbd, hp.allow_replies, hp.allow_votes,
+        hp.allow_curation_rewards, hp.beneficiaries, hp.url, hp.root_title, hp.abs_rshares,
+        hp.active, hp.author_rewards
+    FROM
+        hive_posts_view hp
+    INNER JOIN
+    (
+        SELECT
+            hp1.id
+        FROM
+            hive_posts hp1
+        WHERE
+            hp1.counter_deleted = 0
+            AND NOT hp1.is_muted
+            AND hp1.cashout_time > _cashout_time
+            OR hp1.cashout_time = _cashout_time
+            AND hp1.id >= __post_id AND hp1.id != 0
+        ORDER BY
+            hp1.cashout_time ASC,
+            hp1.id ASC
+        LIMIT
+            _limit
+    ) ds ON ds.id = hp.id
+    ORDER BY
+        hp.cashout_time ASC,
+        hp.id ASC
+    ;
+  END
+  $function$
+  LANGUAGE plpgsql
+;
+
+DROP FUNCTION IF EXISTS list_comments_by_root(character varying, character varying, character varying, character varying, int)
+;
+CREATE OR REPLACE FUNCTION list_comments_by_root(
+  in _root_author hive_accounts.name%TYPE,
+  in _root_permlink hive_permlink_data.permlink%TYPE,
+  in _start_post_author hive_accounts.name%TYPE,
+  in _start_post_permlink hive_permlink_data.permlink%TYPE,
+  in _limit INT)
+  RETURNS SETOF database_api_post
+  AS
+  $function$
+  DECLARE
+    __root_id INT;
+    __post_id INT;
+  BEGIN
+    __root_id = find_comment_id(_root_author, _root_permlink, True);
+    __post_id = find_comment_id(_start_post_author, _start_post_permlink, True);
+    RETURN QUERY
+    SELECT
+      hp.id, hp.community_id, hp.author, hp.permlink, hp.title, hp.body,
+      hp.category, hp.depth, hp.promoted, hp.payout, hp.last_payout_at, hp.cashout_time, hp.is_paidout,
+      hp.children, hp.votes, hp.created_at, hp.updated_at, hp.rshares, hp.json,
+      hp.is_hidden, hp.is_grayed, hp.total_votes, hp.net_votes, hp.total_vote_weight,
+      hp.parent_author, hp.parent_permlink_or_category, hp.curator_payout_value, hp.root_author, hp.root_permlink,
+      hp.max_accepted_payout, hp.percent_hbd, hp.allow_replies, hp.allow_votes,
+      hp.allow_curation_rewards, hp.beneficiaries, hp.url, hp.root_title, hp.abs_rshares,
+      hp.active, hp.author_rewards
+    FROM
+      hive_posts_view hp
+    INNER JOIN
+    (
+      SELECT
+        hp2.id
+      FROM
+        hive_posts hp2
+      WHERE
+        hp2.counter_deleted = 0
+        AND NOT hp2.is_muted
+        AND hp2.root_id = __root_id
+        AND hp2.id >= __post_id
+      ORDER BY
+        hp2.id ASC
+      LIMIT _limit
+    ) ds on hp.id = ds.id
+    ORDER BY
+      hp.id
+    ;
+  END
+  $function$
+  LANGUAGE plpgsql
+;
+
+DROP FUNCTION IF EXISTS list_comments_by_parent(character varying, character varying, character varying, character varying, int)
+;
+CREATE OR REPLACE FUNCTION list_comments_by_parent(
+  in _parent_author hive_accounts.name%TYPE,
+  in _parent_permlink hive_permlink_data.permlink%TYPE,
+  in _start_post_author hive_accounts.name%TYPE,
+  in _start_post_permlink hive_permlink_data.permlink%TYPE,
+  in _limit INT)
+  RETURNS SETOF database_api_post
+AS $function$
+DECLARE
+  __post_id INT;
+  __parent_id INT;
+BEGIN
+  __parent_id = find_comment_id(_parent_author, _parent_permlink, True);
+  __post_id = find_comment_id(_start_post_author, _start_post_permlink, True);
+  RETURN QUERY
+  SELECT
+    hp.id, hp.community_id, hp.author, hp.permlink, hp.title, hp.body,
+    hp.category, hp.depth, hp.promoted, hp.payout, hp.last_payout_at, hp.cashout_time, hp.is_paidout,
+    hp.children, hp.votes, hp.created_at, hp.updated_at, hp.rshares, hp.json,
+    hp.is_hidden, hp.is_grayed, hp.total_votes, hp.net_votes, hp.total_vote_weight,
+    hp.parent_author, hp.parent_permlink_or_category, hp.curator_payout_value, hp.root_author, hp.root_permlink,
+    hp.max_accepted_payout, hp.percent_hbd, hp.allow_replies, hp.allow_votes,
+    hp.allow_curation_rewards, hp.beneficiaries, hp.url, hp.root_title, hp.abs_rshares,
+    hp.active, hp.author_rewards
+  FROM
+    hive_posts_view hp
+  INNER JOIN
+  (
+    SELECT hp1.id FROM
+      hive_posts hp1
+    WHERE
+      hp1.counter_deleted = 0
+      AND NOT hp1.is_muted
+      AND hp1.parent_id = __parent_id
+      AND hp1.id >= __post_id
+    ORDER BY
+      hp1.id ASC
+    LIMIT
+      _limit
+  ) ds ON ds.id = hp.id
+  ORDER BY
+    hp.id
+  ;
+END
+$function$
+LANGUAGE plpgsql
+;
+
+DROP FUNCTION IF EXISTS list_comments_by_last_update(character varying, timestamp, character varying, character varying, int)
+;
+CREATE OR REPLACE FUNCTION list_comments_by_last_update(
+  in _parent_author hive_accounts.name%TYPE,
+  in _updated_at hive_posts.updated_at%TYPE,
+  in _start_post_author hive_accounts.name%TYPE,
+  in _start_post_permlink hive_permlink_data.permlink%TYPE,
+  in _limit INT)
+  RETURNS SETOF database_api_post
+  AS
+  $function$
+  DECLARE
+    __post_id INT;
+    __parent_author_id INT;
+  BEGIN
+    __parent_author_id = find_account_id(_parent_author, True);
+    __post_id = find_comment_id(_start_post_author, _start_post_permlink, True);
+    RETURN QUERY
+    SELECT
+        hp.id, hp.community_id, hp.author, hp.permlink, hp.title, hp.body,
+        hp.category, hp.depth, hp.promoted, hp.payout, hp.last_payout_at, hp.cashout_time, hp.is_paidout,
+        hp.children, hp.votes, hp.created_at, hp.updated_at, hp.rshares, hp.json,
+        hp.is_hidden, hp.is_grayed, hp.total_votes, hp.net_votes, hp.total_vote_weight,
+        hp.parent_author, hp.parent_permlink_or_category, hp.curator_payout_value, hp.root_author, hp.root_permlink,
+        hp.max_accepted_payout, hp.percent_hbd, hp.allow_replies, hp.allow_votes,
+        hp.allow_curation_rewards, hp.beneficiaries, hp.url, hp.root_title, hp.abs_rshares,
+        hp.active, hp.author_rewards
+    FROM
+        hive_posts_view hp
+    INNER JOIN
+    (
+        SELECT
+          hp1.id
+        FROM
+          hive_posts hp1
+        JOIN
+          hive_posts hp2 ON hp1.parent_id = hp2.id
+        WHERE
+          hp1.counter_deleted = 0
+          AND NOT hp1.is_muted
+          AND hp2.author_id = __parent_author_id
+          AND (
+            hp1.updated_at < _updated_at
+            OR hp1.updated_at = _updated_at
+            AND hp1.id >= __post_id
+          )
+        ORDER BY
+          hp1.updated_at DESC,
+          hp1.id ASC
+        LIMIT
+          _limit
+    ) ds ON ds.id = hp.id
+    ORDER BY
+      hp.updated_at DESC,
+      hp.id ASC
+    ;
+  END
+  $function$
+  LANGUAGE plpgsql
+;
+
+DROP FUNCTION IF EXISTS list_comments_by_author_last_update(character varying, timestamp, character varying, character varying, int)
+;
+CREATE OR REPLACE FUNCTION list_comments_by_author_last_update(
+  in _author hive_accounts.name%TYPE,
+  in _updated_at hive_posts.updated_at%TYPE,
+  in _start_post_author hive_accounts.name%TYPE,
+  in _start_post_permlink hive_permlink_data.permlink%TYPE,
+  in _limit INT)
+  RETURNS SETOF database_api_post
+  AS
+  $function$
+  DECLARE
+    __author_id INT;
+    __post_id INT;
+  BEGIN
+    __author_id = find_account_id(_author, True);
+    __post_id = find_comment_id(_start_post_author, _start_post_permlink, True);
+    RETURN QUERY
+    SELECT
+        hp.id, hp.community_id, hp.author, hp.permlink, hp.title, hp.body,
+        hp.category, hp.depth, hp.promoted, hp.payout, hp.last_payout_at, hp.cashout_time, hp.is_paidout,
+        hp.children, hp.votes, hp.created_at, hp.updated_at, hp.rshares, hp.json,
+        hp.is_hidden, hp.is_grayed, hp.total_votes, hp.net_votes, hp.total_vote_weight,
+        hp.parent_author, hp.parent_permlink_or_category, hp.curator_payout_value, hp.root_author, hp.root_permlink,
+        hp.max_accepted_payout, hp.percent_hbd, hp.allow_replies, hp.allow_votes,
+        hp.allow_curation_rewards, hp.beneficiaries, hp.url, hp.root_title, hp.abs_rshares,
+        hp.active, hp.author_rewards
+    FROM
+        hive_posts_view hp
+    INNER JOIN
+    (
+      SELECT
+        hp1.id
+      FROM
+        hive_posts hp1
+      WHERE
+        hp1.counter_deleted = 0
+        AND NOT hp1.is_muted
+        AND hp1.author_id = __author_id
+        AND (
+          hp1.updated_at < _updated_at
+          OR hp1.updated_at = _updated_at
+          AND hp1.id >= __post_id
+        )
+      ORDER BY
+        hp1.updated_at DESC,
+        hp1.id ASC
+      LIMIT
+        _limit
+    ) ds ON ds.id = hp.id
+    ORDER BY
+        hp.updated_at DESC,
+        hp.id ASC
+    ;
+  END
+  $function$
+  LANGUAGE plpgsql
+;
diff --git a/hive/db/sql_scripts/database_api_list_votes.sql b/hive/db/sql_scripts/database_api_list_votes.sql
new file mode 100644
index 0000000000000000000000000000000000000000..df591ebf1ef229179c2488845f3a8ec2ffd7badf
--- /dev/null
+++ b/hive/db/sql_scripts/database_api_list_votes.sql
@@ -0,0 +1,147 @@
+DROP TYPE IF EXISTS database_api_vote CASCADE;
+
+CREATE TYPE database_api_vote AS (
+  id BIGINT,
+  voter VARCHAR(16),
+  author VARCHAR(16),
+  permlink VARCHAR(255),
+  weight NUMERIC,
+  rshares BIGINT,
+  percent INT,
+  last_update TIMESTAMP,
+  num_changes INT,
+  reputation BIGINT
+);
+
+DROP FUNCTION IF EXISTS find_votes( character varying, character varying, int )
+;
+CREATE OR REPLACE FUNCTION public.find_votes
+(
+  in _AUTHOR hive_accounts.name%TYPE,
+  in _PERMLINK hive_permlink_data.permlink%TYPE,
+  in _LIMIT INT
+)
+RETURNS SETOF database_api_vote
+LANGUAGE 'plpgsql'
+AS
+$function$
+DECLARE _POST_ID INT;
+BEGIN
+_POST_ID = find_comment_id( _AUTHOR, _PERMLINK, True);
+
+RETURN QUERY
+(
+    SELECT
+        v.id,
+        v.voter,
+        v.author,
+        v.permlink,
+        v.weight,
+        v.rshares,
+        v.percent,
+        v.last_update,
+        v.num_changes,
+        v.reputation
+    FROM
+        hive_votes_view v
+    WHERE
+        v.post_id = _POST_ID
+    ORDER BY
+        voter_id
+    LIMIT _LIMIT
+);
+
+END
+$function$;
+
+DROP FUNCTION IF EXISTS list_votes_by_voter_comment( character varying, character varying, character varying, int )
+;
+CREATE OR REPLACE FUNCTION public.list_votes_by_voter_comment
+(
+  in _VOTER hive_accounts.name%TYPE,
+  in _AUTHOR hive_accounts.name%TYPE,
+  in _PERMLINK hive_permlink_data.permlink%TYPE,
+  in _LIMIT INT
+)
+RETURNS SETOF database_api_vote
+LANGUAGE 'plpgsql'
+AS
+$function$
+DECLARE __voter_id INT;
+DECLARE __post_id INT;
+BEGIN
+
+__voter_id = find_account_id( _VOTER, True );
+__post_id = find_comment_id( _AUTHOR, _PERMLINK, True );
+
+RETURN QUERY
+(
+    SELECT
+        v.id,
+        v.voter,
+        v.author,
+        v.permlink,
+        v.weight,
+        v.rshares,
+        v.percent,
+        v.last_update,
+        v.num_changes,
+        v.reputation
+    FROM
+        hive_votes_view v
+    WHERE
+        v.voter_id = __voter_id
+        AND v.post_id >= __post_id
+    ORDER BY
+        v.post_id
+    LIMIT _LIMIT
+);
+
+END
+$function$;
+
+DROP FUNCTION IF EXISTS list_votes_by_comment_voter( character varying, character varying, character varying, int )
+;
+CREATE OR REPLACE FUNCTION public.list_votes_by_comment_voter
+(
+  in _VOTER hive_accounts.name%TYPE,
+  in _AUTHOR hive_accounts.name%TYPE,
+  in _PERMLINK hive_permlink_data.permlink%TYPE,
+  in _LIMIT INT
+)
+RETURNS SETOF database_api_vote
+LANGUAGE 'plpgsql'
+AS
+$function$
+DECLARE __voter_id INT;
+DECLARE __post_id INT;
+BEGIN
+
+__voter_id = find_account_id( _VOTER, True );
+__post_id = find_comment_id( _AUTHOR, _PERMLINK, True );
+
+RETURN QUERY
+(
+    SELECT
+        v.id,
+        v.voter,
+        v.author,
+        v.permlink,
+        v.weight,
+        v.rshares,
+        v.percent,
+        v.last_update,
+        v.num_changes,
+        v.reputation
+    FROM
+        hive_votes_view v
+    WHERE
+        v.post_id = __post_id
+        AND v.voter_id >= __voter_id
+    ORDER BY
+        v.voter_id
+    LIMIT _LIMIT
+);
+
+END
+$function$;
diff --git a/hive/db/sql_scripts/db_upgrade.sh b/hive/db/sql_scripts/db_upgrade.sh
new file mode 100755
index 0000000000000000000000000000000000000000..caf1097f6d74bcb03779e8941affe7c66496e92a
--- /dev/null
+++ b/hive/db/sql_scripts/db_upgrade.sh
@@ -0,0 +1,74 @@
+#!/bin/bash 
+
+set -e 
+set -o pipefail 
+
+echo "Usage ./db_upgrade.sh <user-name> <db-name>"
+rm -f ./upgrade.log
+
+for sql in postgres_handle_view_changes.sql \
+          upgrade/upgrade_table_schema.sql \
+          utility_functions.sql \
+          hive_accounts_view.sql \
+          hive_accounts_info_view.sql \
+          hive_posts_base_view.sql \
+          hive_posts_view.sql \
+          hive_votes_view.sql \
+          hive_muted_accounts_view.sql \
+          hive_muted_accounts_by_id_view.sql \
+          hive_blacklisted_accounts_by_observer_view.sql \
+          hive_post_operations.sql \
+          head_block_time.sql \
+          update_feed_cache.sql \
+          payout_stats_view.sql \
+          update_hive_posts_mentions.sql \
+          mutes.sql \
+          bridge_get_ranked_post_type.sql \
+          bridge_get_ranked_post_for_communities.sql \
+          bridge_get_ranked_post_for_observer_communities.sql \
+          bridge_get_ranked_post_for_tag.sql \
+          bridge_get_ranked_post_for_all.sql \
+          calculate_account_reputations.sql \
+          update_communities_rank.sql \
+          delete_hive_posts_mentions.sql \
+          notifications_view.sql \
+          notifications_api.sql \
+          bridge_get_account_posts_by_comments.sql \
+          bridge_get_account_posts_by_payout.sql \
+          bridge_get_account_posts_by_posts.sql \
+          bridge_get_account_posts_by_replies.sql \
+          bridge_get_relationship_between_accounts.sql \
+          bridge_get_post.sql \
+          bridge_get_discussion.sql \
+          condenser_api_post_type.sql \
+          condenser_api_post_ex_type.sql \
+          condenser_get_blog.sql \
+          condenser_get_content.sql \
+          condenser_tags.sql \
+          condenser_follows.sql \
+          hot_and_trends.sql \
+          update_hive_posts_children_count.sql \
+          update_hive_posts_api_helper.sql \
+          database_api_list_comments.sql \
+          database_api_list_votes.sql \
+          update_posts_rshares.sql \
+          update_hive_post_root_id.sql \
+          condenser_get_by_account_comments.sql \
+          condenser_get_by_blog_without_reblog.sql \
+          bridge_get_by_feed_with_reblog.sql \
+          condenser_get_by_blog.sql \
+          bridge_get_account_posts_by_blog.sql \
+          condenser_get_names_by_reblogged.sql \
+          condenser_get_account_reputations.sql \
+          update_follow_count.sql
+
+do
+    echo Executing psql -U $1 -d $2 -f $sql
+    time psql -a -1 -v "ON_ERROR_STOP=1" -U $1 -d $2  -c '\timing' -f $sql 2>&1 | tee -a -i upgrade.log
+  echo $?
+done
+
+time psql -a -v "ON_ERROR_STOP=1" -U $1 -d $2  -c '\timing' -f upgrade/upgrade_runtime_migration.sql 2>&1 | tee -a -i upgrade.log
+
+time psql -a -v "ON_ERROR_STOP=1" -U $1 -d $2  -c '\timing' -f upgrade/do_conditional_vacuum.sql 2>&1 | tee -a -i upgrade.log
+
diff --git a/hive/db/sql_scripts/delete_hive_posts_mentions.sql b/hive/db/sql_scripts/delete_hive_posts_mentions.sql
new file mode 100644
index 0000000000000000000000000000000000000000..ff4d12f58b33fb579715269451acdf05d3d66e6e
--- /dev/null
+++ b/hive/db/sql_scripts/delete_hive_posts_mentions.sql
@@ -0,0 +1,19 @@
+DROP FUNCTION IF EXISTS delete_hive_posts_mentions();
+
+CREATE OR REPLACE FUNCTION delete_hive_posts_mentions()
+RETURNS VOID
+LANGUAGE 'plpgsql'
+AS
+$function$
+DECLARE
+  __90_days_beyond_head_block_number INTEGER;
+BEGIN
+
+  __90_days_beyond_head_block_number = block_before_head('90 days'::interval);
+
+  DELETE FROM hive_mentions
+  WHERE block_num < __90_days_beyond_head_block_number;
+
+END
+$function$
+;
diff --git a/hive/db/sql_scripts/delete_reblog_feed_cache.sql b/hive/db/sql_scripts/delete_reblog_feed_cache.sql
new file mode 100644
index 0000000000000000000000000000000000000000..39ae1e1edd5969df65670e448428d70264b5a2c1
--- /dev/null
+++ b/hive/db/sql_scripts/delete_reblog_feed_cache.sql
@@ -0,0 +1,34 @@
+
+DROP FUNCTION IF EXISTS delete_reblog_feed_cache(character varying,character varying,character varying)
+;
+
+CREATE OR REPLACE FUNCTION delete_reblog_feed_cache(
+  in _author hive_accounts.name%TYPE,
+  in _permlink hive_permlink_data.permlink%TYPE,
+  in _account hive_accounts.name%TYPE)
+RETURNS INTEGER
+LANGUAGE plpgsql
+AS
+$function$
+DECLARE
+  __account_id INT;
+  __post_id INT;
+BEGIN
+
+  __account_id = find_account_id( _account, False );
+  __post_id = find_comment_id( _author, _permlink, False );
+
+  IF __post_id = 0 THEN
+    RETURN 0;
+  END IF;
+
+  DELETE FROM hive_reblogs
+  WHERE blogger_id = __account_id AND post_id = __post_id;
+
+  DELETE FROM hive_feed_cache
+  WHERE account_id = __account_id AND post_id = __post_id;
+
+  RETURN 1;
+END
+$function$
+;
diff --git a/hive/db/sql_scripts/head_block_time.sql b/hive/db/sql_scripts/head_block_time.sql
new file mode 100644
index 0000000000000000000000000000000000000000..f1f1462b67c89ddc4cb61be398eafb11507a345a
--- /dev/null
+++ b/hive/db/sql_scripts/head_block_time.sql
@@ -0,0 +1,19 @@
+DROP FUNCTION IF EXISTS head_block_time CASCADE;
+CREATE OR REPLACE FUNCTION head_block_time()
+RETURNS TIMESTAMP
+LANGUAGE 'sql' STABLE
+AS
+$BODY$
+SELECT hb.created_at FROM hive_blocks hb ORDER BY hb.num DESC LIMIT 1
+$BODY$
+;
+
+
+DROP FUNCTION IF EXISTS block_before_head CASCADE;
+CREATE OR REPLACE FUNCTION block_before_head( in _time INTERVAL )
+RETURNS hive_blocks.num%TYPE
+LANGUAGE 'sql' STABLE
+AS
+$BODY$
+SELECT MAX(hb1.num) - CAST( extract(epoch from _time)/3 as INTEGER ) FROM hive_blocks hb1
+$BODY$
diff --git a/hive/db/sql_scripts/hive_accounts_info_view.sql b/hive/db/sql_scripts/hive_accounts_info_view.sql
new file mode 100644
index 0000000000000000000000000000000000000000..eb378d233ecd3b4072d24abb874cc7f934c58d01
--- /dev/null
+++ b/hive/db/sql_scripts/hive_accounts_info_view.sql
@@ -0,0 +1,58 @@
+DROP VIEW IF EXISTS hive_accounts_info_view_lite CASCADE;
+CREATE OR REPLACE VIEW public.hive_accounts_info_view_lite
+ AS
+ SELECT ha.id,
+    ha.name,
+    COALESCE(posts.post_count, 0::bigint) AS post_count,
+    ha.created_at,
+    ha.reputation,
+    ha.rank,
+    ha.following,
+    ha.followers,
+    ha.lastread_at,
+    ha.posting_json_metadata,
+    ha.json_metadata
+   FROM hive_accounts ha
+   LEFT JOIN LATERAL
+   ( 
+     SELECT COUNT(1) AS post_count
+     FROM hive_posts hp
+     WHERE hp.counter_deleted = 0 and hp.author_id = ha.id
+   ) posts ON true
+   ;
+
+DROP VIEW IF EXISTS hive_accounts_info_view;
+CREATE OR REPLACE VIEW public.hive_accounts_info_view
+ AS
+ SELECT ha.id,
+    ha.name,
+    ha.post_count,
+    ha.created_at,
+    ( SELECT GREATEST(ha.created_at,
+                      COALESCE(latest_post.latest_post, '1970-01-01 00:00:00'::timestamp without time zone),
+                      COALESCE(whole_votes.latest_vote, '1970-01-01 00:00:00'::timestamp without time zone))
+                      AS "greatest"
+                     ) AS active_at,
+    ha.reputation,
+    ha.rank,
+    ha.following,
+    ha.followers,
+    ha.lastread_at,
+    ha.posting_json_metadata,
+    ha.json_metadata
+   FROM hive_accounts_info_view_lite ha
+   LEFT JOIN lateral 
+   (
+      SELECT hp1.created_at AS latest_post
+      FROM hive_posts hp1
+      WHERE hp1.counter_deleted = 0 and hp1.author_id = ha.id
+      ORDER BY hp1.created_at DESC, hp1.author_id DESC LIMIT 1
+   ) latest_post on true
+   LEFT JOIN LATERAL
+   (
+     SELECT hvf.last_update AS latest_vote
+     FROM hive_votes hvf
+     WHERE hvf.voter_id = ha.id
+     ORDER BY hvf.voter_id DESC, hvf.last_update DESC LIMIT 1
+   ) whole_votes ON true
+   ;
diff --git a/hive/db/sql_scripts/hive_accounts_view.sql b/hive/db/sql_scripts/hive_accounts_view.sql
new file mode 100644
index 0000000000000000000000000000000000000000..63966fc3ef17fad49f9b643761358d7160afe4ee
--- /dev/null
+++ b/hive/db/sql_scripts/hive_accounts_view.sql
@@ -0,0 +1,18 @@
+DROP VIEW IF EXISTS public.hive_accounts_view CASCADE;
+
+CREATE OR REPLACE VIEW public.hive_accounts_view
+AS
+SELECT id,
+  name,
+  created_at,
+  reputation,
+  is_implicit,
+  followers,
+  following,
+  rank,
+  lastread_at,
+  posting_json_metadata,
+  json_metadata,
+  ( reputation <= -464800000000 ) is_grayed -- biggest number where rep_log10 gives < 1.0
+  FROM hive_accounts
+  ;
diff --git a/hive/db/sql_scripts/hive_blacklisted_accounts_by_observer_view.sql b/hive/db/sql_scripts/hive_blacklisted_accounts_by_observer_view.sql
new file mode 100644
index 0000000000000000000000000000000000000000..c6b94bdc09c4901d12bd0e2aa02dbdd6e97ad67a
--- /dev/null
+++ b/hive/db/sql_scripts/hive_blacklisted_accounts_by_observer_view.sql
@@ -0,0 +1,22 @@
+DROP VIEW IF EXISTS blacklisted_by_observer_view;
+CREATE OR REPLACE VIEW blacklisted_by_observer_view AS
+SELECT observer_accounts.id AS observer_id,
+    following_accounts.id AS blacklisted_id,
+    following_accounts.name AS blacklisted_name,
+    'my blacklist'::text AS source
+   FROM ((hive_follows
+     JOIN hive_accounts following_accounts ON ((hive_follows.following = following_accounts.id)))
+     JOIN hive_accounts observer_accounts ON ((hive_follows.follower = observer_accounts.id)))
+  WHERE hive_follows.blacklisted
+UNION ALL
+ SELECT observer_accounts.id AS observer_id,
+    following_accounts.id AS blacklisted_id,
+    following_accounts.name AS blacklisted_name,
+    string_agg(('blacklisted by '::text || (indirect_accounts.name)::text), ','::text ORDER BY indirect_accounts.name) AS source
+   FROM (((hive_follows hive_follows_direct
+     JOIN hive_follows hive_follows_indirect ON ((hive_follows_direct.following = hive_follows_indirect.follower)))
+     JOIN hive_accounts following_accounts ON ((hive_follows_indirect.following = following_accounts.id)))
+     JOIN hive_accounts observer_accounts ON ((hive_follows_direct.follower = observer_accounts.id)))
+     JOIN hive_accounts indirect_accounts ON ((hive_follows_indirect.follower = indirect_accounts.id))
+  WHERE (hive_follows_direct.follow_blacklists AND hive_follows_indirect.blacklisted)
+  GROUP BY observer_accounts.id, following_accounts.id;
\ No newline at end of file
diff --git a/hive/db/sql_scripts/hive_muted_accounts_by_id_view.sql b/hive/db/sql_scripts/hive_muted_accounts_by_id_view.sql
new file mode 100644
index 0000000000000000000000000000000000000000..1fddfa778ee99410c2bfea5b76803408e72a1f96
--- /dev/null
+++ b/hive/db/sql_scripts/hive_muted_accounts_by_id_view.sql
@@ -0,0 +1,16 @@
+DROP VIEW IF EXISTS muted_accounts_by_id_view CASCADE;
+CREATE OR REPLACE VIEW muted_accounts_by_id_view AS
+SELECT observer_accounts.id AS observer_id,
+    following_accounts.id AS muted_id
+   FROM ((hive_follows
+     JOIN hive_accounts following_accounts ON ((hive_follows.following = following_accounts.id)))
+     JOIN hive_accounts observer_accounts ON ((hive_follows.follower = observer_accounts.id)))
+  WHERE (hive_follows.state = 2)
+UNION
+ SELECT observer_accounts.id AS observer_id,
+    following_accounts.id AS muted_id
+   FROM (((hive_follows hive_follows_direct
+     JOIN hive_follows hive_follows_indirect ON ((hive_follows_direct.following = hive_follows_indirect.follower)))
+     JOIN hive_accounts following_accounts ON ((hive_follows_indirect.following = following_accounts.id)))
+     JOIN hive_accounts observer_accounts ON ((hive_follows_direct.follower = observer_accounts.id)))
+  WHERE (hive_follows_direct.follow_muted AND (hive_follows_indirect.state = 2));
\ No newline at end of file
diff --git a/hive/db/sql_scripts/hive_muted_accounts_view.sql b/hive/db/sql_scripts/hive_muted_accounts_view.sql
new file mode 100644
index 0000000000000000000000000000000000000000..9189a157ce4375c0f8e1f3e3dddc76be4f8718aa
--- /dev/null
+++ b/hive/db/sql_scripts/hive_muted_accounts_view.sql
@@ -0,0 +1,17 @@
+ DROP VIEW IF EXISTS muted_accounts_view;
+ CREATE OR REPLACE VIEW muted_accounts_view AS
+ (
+   SELECT observer_accounts.name AS observer, following_accounts.name AS muted
+   FROM hive_follows JOIN hive_accounts following_accounts ON hive_follows.following = following_accounts.id
+                     JOIN hive_accounts observer_accounts ON hive_follows.follower = observer_accounts.id
+   WHERE hive_follows.state = 2
+
+   UNION
+
+   SELECT observer_accounts.name AS observer, following_accounts.name AS muted
+   FROM hive_follows hive_follows_direct JOIN hive_follows hive_follows_indirect ON hive_follows_direct.following = hive_follows_indirect.follower
+                                         JOIN hive_accounts following_accounts ON hive_follows_indirect.following = following_accounts.id
+                                         JOIN hive_accounts observer_accounts ON hive_follows_direct.follower = observer_accounts.id
+   WHERE hive_follows_direct.follow_muted AND hive_follows_indirect.state = 2
+ );
+ 
\ No newline at end of file
diff --git a/hive/db/sql_scripts/hive_post_operations.sql b/hive/db/sql_scripts/hive_post_operations.sql
new file mode 100644
index 0000000000000000000000000000000000000000..1e6ee77fe8b91b4fc13fa78c1b17a3d17850b76c
--- /dev/null
+++ b/hive/db/sql_scripts/hive_post_operations.sql
@@ -0,0 +1,190 @@
+DROP FUNCTION IF EXISTS prepare_tags;
+CREATE OR REPLACE FUNCTION prepare_tags( in _raw_tags VARCHAR[] )
+RETURNS SETOF hive_tag_data.id%TYPE
+LANGUAGE 'plpgsql'
+VOLATILE
+AS
+$function$
+DECLARE
+   __i INTEGER;
+   __tags VARCHAR[];
+   __tag VARCHAR;
+BEGIN
+  FOR __i IN 1 .. ARRAY_UPPER( _raw_tags, 1)
+  LOOP
+    __tag = CAST( LEFT(LOWER(REGEXP_REPLACE( _raw_tags[ __i ], '[#\s]', '', 'g' )),32) as VARCHAR);
+    CONTINUE WHEN __tag = '' OR __tag = ANY(__tags);
+    __tags = ARRAY_APPEND( __tags, __tag );
+  END LOOP;
+
+  RETURN QUERY INSERT INTO
+     hive_tag_data AS htd(tag)
+  SELECT UNNEST( __tags )
+  ON CONFLICT("tag") DO UPDATE SET tag=EXCLUDED.tag --trick to always return id
+  RETURNING htd.id;
+END
+$function$;
+
+DROP FUNCTION IF EXISTS process_hive_post_operation;
+;
+CREATE OR REPLACE FUNCTION process_hive_post_operation(
+  in _author hive_accounts.name%TYPE,
+  in _permlink hive_permlink_data.permlink%TYPE,
+  in _parent_author hive_accounts.name%TYPE,
+  in _parent_permlink hive_permlink_data.permlink%TYPE,
+  in _date hive_posts.created_at%TYPE,
+  in _community_support_start_block hive_posts.block_num%TYPE,
+  in _block_num hive_posts.block_num%TYPE,
+  in _metadata_tags VARCHAR[])
+RETURNS TABLE (is_new_post boolean, id hive_posts.id%TYPE, author_id hive_posts.author_id%TYPE, permlink_id hive_posts.permlink_id%TYPE,
+                post_category hive_category_data.category%TYPE, parent_id hive_posts.parent_id%TYPE, community_id hive_posts.community_id%TYPE,
+                is_valid hive_posts.is_valid%TYPE, is_muted hive_posts.is_muted%TYPE, depth hive_posts.depth%TYPE)
+LANGUAGE plpgsql
+AS
+$function$
+BEGIN
+
+INSERT INTO hive_permlink_data
+(permlink)
+values
+(
+_permlink
+)
+ON CONFLICT DO NOTHING
+;
+if _parent_author != '' THEN
+  RETURN QUERY INSERT INTO hive_posts as hp
+  (parent_id, depth, community_id, category_id,
+    root_id, is_muted, is_valid,
+    author_id, permlink_id, created_at, updated_at, sc_hot, sc_trend, active, payout_at, cashout_time, counter_deleted, block_num, block_num_created)
+  SELECT php.id AS parent_id, php.depth + 1 AS depth,
+      (CASE
+          WHEN _block_num > _community_support_start_block THEN
+            COALESCE(php.community_id, (select hc.id from hive_communities hc where hc.name = _parent_permlink))
+          ELSE NULL
+      END) AS community_id,
+      COALESCE(php.category_id, (select hcg.id from hive_category_data hcg where hcg.category = _parent_permlink)) AS category_id,
+      (CASE(php.root_id)
+          WHEN 0 THEN php.id
+          ELSE php.root_id
+        END) AS root_id,
+      php.is_muted AS is_muted, php.is_valid AS is_valid,
+      ha.id AS author_id, hpd.id AS permlink_id, _date AS created_at,
+      _date AS updated_at,
+      calculate_time_part_of_hot(_date) AS sc_hot,
+      calculate_time_part_of_trending(_date) AS sc_trend,
+      _date AS active, (_date + INTERVAL '7 days') AS payout_at, (_date + INTERVAL '7 days') AS cashout_time, 0,
+        _block_num as block_num, _block_num as block_num_created
+  FROM hive_accounts ha,
+        hive_permlink_data hpd,
+        hive_posts php
+  INNER JOIN hive_accounts pha ON pha.id = php.author_id
+  INNER JOIN hive_permlink_data phpd ON phpd.id = php.permlink_id
+  WHERE pha.name = _parent_author AND phpd.permlink = _parent_permlink AND
+          ha.name = _author AND hpd.permlink = _permlink AND php.counter_deleted = 0
+
+  ON CONFLICT ON CONSTRAINT hive_posts_ux1 DO UPDATE SET
+    --- During post update it is disallowed to change: parent-post, category, community-id
+    --- then also depth, is_valid and is_muted is impossible to change
+    --- post edit part
+    updated_at = _date,
+    active = _date,
+    block_num = _block_num
+  RETURNING (xmax = 0) as is_new_post, hp.id, hp.author_id, hp.permlink_id, (SELECT hcd.category FROM hive_category_data hcd WHERE hcd.id = hp.category_id) as post_category, hp.parent_id, hp.community_id, hp.is_valid, hp.is_muted, hp.depth
+;
+ELSE
+  INSERT INTO hive_category_data
+  (category)
+  VALUES (_parent_permlink)
+  ON CONFLICT (category) DO NOTHING
+  ;
+
+  RETURN QUERY INSERT INTO hive_posts as hp
+  (parent_id, depth, community_id, category_id,
+    root_id, is_muted, is_valid,
+    author_id, permlink_id, created_at, updated_at, sc_hot, sc_trend,
+    active, payout_at, cashout_time, counter_deleted, block_num, block_num_created,
+    tags_ids)
+  SELECT 0 AS parent_id, 0 AS depth,
+      (CASE
+        WHEN _block_num > _community_support_start_block THEN
+          (select hc.id FROM hive_communities hc WHERE hc.name = _parent_permlink)
+        ELSE NULL
+      END) AS community_id,
+      (SELECT hcg.id FROM hive_category_data hcg WHERE hcg.category = _parent_permlink) AS category_id,
+      0 as root_id, -- will use id as root one if no parent
+      false AS is_muted, true AS is_valid,
+      ha.id AS author_id, hpd.id AS permlink_id, _date AS created_at,
+      _date AS updated_at,
+      calculate_time_part_of_hot(_date) AS sc_hot,
+      calculate_time_part_of_trending(_date) AS sc_trend,
+      _date AS active, (_date + INTERVAL '7 days') AS payout_at, (_date + INTERVAL '7 days') AS cashout_time, 0
+      , _block_num as block_num, _block_num as block_num_created
+      , (
+          SELECT ARRAY_AGG( prepare_tags )
+          FROM prepare_tags( ARRAY_APPEND(_metadata_tags, _parent_permlink ) )
+        ) as tags_ids
+  FROM hive_accounts ha,
+        hive_permlink_data hpd
+  WHERE ha.name = _author and hpd.permlink = _permlink
+
+  ON CONFLICT ON CONSTRAINT hive_posts_ux1 DO UPDATE SET
+    --- During post update it is disallowed to change: parent-post, category, community-id
+    --- then also depth, is_valid and is_muted is impossible to change
+    --- post edit part
+    updated_at = _date,
+    active = _date,
+    block_num = _block_num,
+    tags_ids = EXCLUDED.tags_ids
+
+  RETURNING (xmax = 0) as is_new_post, hp.id, hp.author_id, hp.permlink_id, _parent_permlink as post_category, hp.parent_id, hp.community_id, hp.is_valid, hp.is_muted, hp.depth
+  ;
+END IF;
+END
+$function$
+;
+
+DROP FUNCTION if exists delete_hive_post(character varying,character varying,character varying, integer, timestamp)
+;
+CREATE OR REPLACE FUNCTION delete_hive_post(
+  in _author hive_accounts.name%TYPE,
+  in _permlink hive_permlink_data.permlink%TYPE,
+  in _block_num hive_blocks.num%TYPE,
+  in _date hive_posts.active%TYPE)
+RETURNS VOID
+LANGUAGE plpgsql
+AS
+$function$
+DECLARE
+  __account_id INT;
+  __post_id INT;
+BEGIN
+
+  __account_id = find_account_id( _author, False );
+  __post_id = find_comment_id( _author, _permlink, False );
+
+  IF __post_id = 0 THEN
+    RETURN;
+  END IF;
+
+  UPDATE hive_posts
+  SET counter_deleted =
+  (
+      SELECT max( hps.counter_deleted ) + 1
+      FROM hive_posts hps
+      INNER JOIN hive_permlink_data hpd ON hps.permlink_id = hpd.id
+      WHERE hps.author_id = __account_id AND hpd.permlink = _permlink
+  )
+  ,block_num = _block_num
+  ,active = _date
+  WHERE id = __post_id;
+
+  DELETE FROM hive_reblogs
+  WHERE post_id = __post_id;
+
+  DELETE FROM hive_feed_cache
+  WHERE post_id = __post_id AND account_id = __account_id;
+
+END
+$function$
+;
diff --git a/hive/db/sql_scripts/hive_posts_base_view.sql b/hive/db/sql_scripts/hive_posts_base_view.sql
new file mode 100644
index 0000000000000000000000000000000000000000..d0a0e29783719380df11daa7f8965b3eee235c59
--- /dev/null
+++ b/hive/db/sql_scripts/hive_posts_base_view.sql
@@ -0,0 +1,67 @@
+DROP VIEW IF EXISTS public.hive_posts_base_view cascade;
+CREATE OR REPLACE VIEW public.hive_posts_base_view
+AS
+SELECT
+      hp.block_num
+    , hp.id
+    , hp.author_id
+    , hp.permlink_id
+    , hp.payout
+    , hp.pending_payout
+    , hp.abs_rshares
+    , hp.vote_rshares AS rshares
+FROM hive_posts hp
+;
+
+DROP VIEW IF EXISTS public.hive_posts_pp_view CASCADE;
+
+CREATE OR REPLACE VIEW public.hive_posts_pp_view
+ AS
+ SELECT hp.id,
+    hp.community_id,
+    hp.root_id,
+    hp.parent_id,
+    hp.active,
+    hp.author_rewards,
+    hp.author_id,
+    hp.category_id,
+    hp.depth,
+    hp.promoted,
+    hp.payout,
+    hp.pending_payout,
+    hp.payout_at,
+    hp.last_payout_at,
+    hp.cashout_time,
+    hp.is_paidout,
+    hp.children,
+    0 AS votes,
+    0 AS active_votes,
+    hp.created_at,
+    hp.updated_at,
+    hp.is_hidden,
+    hp.total_vote_weight,
+    pp.author_id AS parent_author_id,
+        CASE hp.depth > 0
+            WHEN true THEN hpd_pp.permlink
+            ELSE hcd.category
+        END AS parent_permlink_or_category,
+    hp.curator_payout_value,
+    hp.max_accepted_payout,
+    hp.percent_hbd,
+    true AS allow_replies,
+    hp.allow_votes,
+    hp.allow_curation_rewards,
+    hp.beneficiaries,
+    hp.sc_trend,
+    hp.sc_hot,
+    hp.is_pinned,
+    hp.is_muted,
+    hp.is_nsfw,
+    hp.is_valid,
+    hp.block_num
+   FROM hive_posts hp
+     JOIN hive_posts pp ON pp.id = hp.parent_id
+     JOIN hive_permlink_data hpd_pp ON hpd_pp.id = pp.permlink_id
+     JOIN hive_category_data hcd ON hcd.id = hp.category_id
+  WHERE hp.counter_deleted = 0 AND hp.id <> 0
+  ;
diff --git a/hive/db/sql_scripts/hive_posts_view.sql b/hive/db/sql_scripts/hive_posts_view.sql
new file mode 100644
index 0000000000000000000000000000000000000000..008a0423069994c8f46b3ac74ca6e4677dc9f94b
--- /dev/null
+++ b/hive/db/sql_scripts/hive_posts_view.sql
@@ -0,0 +1,92 @@
+DROP VIEW IF EXISTS public.hive_posts_view;
+
+CREATE OR REPLACE VIEW public.hive_posts_view
+AS
+SELECT hp.id,
+  hp.community_id,
+  hp.root_id,
+  hp.parent_id,
+  ha_a.name AS author,
+  hp.active,
+  hp.author_rewards,
+  hp.author_id,
+  hpd_p.permlink,
+  hpd.title,
+  hpd.body,
+  hpd.img_url,
+  hpd.preview,
+  hcd.category,
+  hp.category_id,
+  hp.depth,
+  hp.promoted,
+  hp.payout,
+  hp.pending_payout,
+  hp.payout_at,
+  hp.last_payout_at,
+  hp.cashout_time,
+  hp.is_paidout,
+  hp.children,
+  0 AS votes,
+  0 AS active_votes,
+  hp.created_at,
+  hp.updated_at,
+  hp.vote_rshares AS rshares,
+  hp.abs_rshares AS abs_rshares,
+  hp.total_votes AS total_votes,
+  hp.net_votes as net_votes,
+  hpd.json,
+  ha_a.reputation AS author_rep,
+  hp.is_hidden,
+  ha_a.is_grayed,
+  hp.total_vote_weight,
+  ha_pp.name AS parent_author,
+  ha_pp.id AS parent_author_id,
+    ( CASE hp.depth > 0
+      WHEN True THEN hpd_pp.permlink
+      ELSE hcd.category
+    END ) AS parent_permlink_or_category,
+  hp.curator_payout_value,
+  ha_rp.name AS root_author,
+  hpd_rp.permlink AS root_permlink,
+  rcd.category as root_category,
+  hp.max_accepted_payout,
+  hp.percent_hbd,
+    True AS allow_replies,
+  hp.allow_votes,
+  hp.allow_curation_rewards,
+  hp.beneficiaries,
+    CONCAT('/', rcd.category, '/@', ha_rp.name, '/', hpd_rp.permlink,
+      CASE (rp.id)
+        WHEN hp.id THEN ''
+        ELSE CONCAT('#@', ha_a.name, '/', hpd_p.permlink)
+      END
+    ) AS url,
+  rpd.title AS root_title,
+  hp.sc_trend,
+  hp.sc_hot,
+  hp.is_pinned,
+  hp.is_muted,
+  hp.is_nsfw,
+  hp.is_valid,
+  hr.title AS role_title,
+  hr.role_id AS role_id,
+  hc.title AS community_title,
+  hc.name AS community_name,
+  hp.block_num
+  FROM hive_posts hp
+    JOIN hive_posts pp ON pp.id = hp.parent_id
+    JOIN hive_posts rp ON rp.id = hp.root_id
+    JOIN hive_accounts_view ha_a ON ha_a.id = hp.author_id
+    JOIN hive_permlink_data hpd_p ON hpd_p.id = hp.permlink_id
+    JOIN hive_post_data hpd ON hpd.id = hp.id
+    JOIN hive_accounts ha_pp ON ha_pp.id = pp.author_id
+    JOIN hive_permlink_data hpd_pp ON hpd_pp.id = pp.permlink_id
+    JOIN hive_accounts ha_rp ON ha_rp.id = rp.author_id
+    JOIN hive_permlink_data hpd_rp ON hpd_rp.id = rp.permlink_id
+    JOIN hive_post_data rpd ON rpd.id = rp.id
+    JOIN hive_category_data hcd ON hcd.id = hp.category_id
+    JOIN hive_category_data rcd ON rcd.id = rp.category_id
+    LEFT JOIN hive_communities hc ON hp.community_id = hc.id
+    LEFT JOIN hive_roles hr ON hp.author_id = hr.account_id AND hp.community_id = hr.community_id
+  WHERE hp.counter_deleted = 0
+  ;
diff --git a/hive/db/sql_scripts/hive_votes_view.sql b/hive/db/sql_scripts/hive_votes_view.sql
new file mode 100644
index 0000000000000000000000000000000000000000..913d9b1273c9739c658de427b62b8858565e53d4
--- /dev/null
+++ b/hive/db/sql_scripts/hive_votes_view.sql
@@ -0,0 +1,25 @@
+DROP VIEW IF EXISTS hive_votes_view
+;
+CREATE OR REPLACE VIEW hive_votes_view
+AS
+SELECT
+    hv.id,
+    hv.voter_id as voter_id,
+    ha_a.name as author,
+    hpd.permlink as permlink,
+    vote_percent as percent,
+    ha_v.reputation as reputation,
+    rshares,
+    last_update,
+    ha_v.name as voter,
+    weight,
+    num_changes,
+    hv.permlink_id as permlink_id,
+    post_id,
+    is_effective
+FROM
+    hive_votes hv
+INNER JOIN hive_accounts ha_v ON ha_v.id = hv.voter_id
+INNER JOIN hive_accounts ha_a ON ha_a.id = hv.author_id
+INNER JOIN hive_permlink_data hpd ON hpd.id = hv.permlink_id
+;
diff --git a/hive/db/sql_scripts/hot_and_trends.sql b/hive/db/sql_scripts/hot_and_trends.sql
new file mode 100644
index 0000000000000000000000000000000000000000..4ec64009a148a06e3f9b2b079d4324f8318ed90d
--- /dev/null
+++ b/hive/db/sql_scripts/hot_and_trends.sql
@@ -0,0 +1,126 @@
+DROP FUNCTION IF EXISTS date_diff() CASCADE;
+CREATE OR REPLACE FUNCTION date_diff (units VARCHAR(30), start_t TIMESTAMP, end_t TIMESTAMP)
+  RETURNS INT AS $$
+DECLARE
+  diff_interval INTERVAL;
+  diff INT = 0;
+  years_diff INT = 0;
+BEGIN
+  IF units IN ('yy', 'yyyy', 'year', 'mm', 'm', 'month') THEN
+    years_diff = DATE_PART('year', end_t) - DATE_PART('year', start_t);
+    IF units IN ('yy', 'yyyy', 'year') THEN
+      -- SQL Server does not count full years passed (only difference between year parts)
+      RETURN years_diff;
+    ELSE
+      -- If end month is less than start month it will subtracted
+      RETURN years_diff * 12 + (DATE_PART('month', end_t) - DATE_PART('month', start_t));
+    END IF;
+  END IF;
+  -- Minus operator returns interval 'DDD days HH:MI:SS'
+  diff_interval = end_t - start_t;
+  diff = diff + DATE_PART('day', diff_interval);
+  IF units IN ('wk', 'ww', 'week') THEN
+    diff = diff/7;
+    RETURN diff;
+  END IF;
+  IF units IN ('dd', 'd', 'day') THEN
+    RETURN diff;
+  END IF;
+  diff = diff * 24 + DATE_PART('hour', diff_interval);
+  IF units IN ('hh', 'hour') THEN
+     RETURN diff;
+  END IF;
+  diff = diff * 60 + DATE_PART('minute', diff_interval);
+  IF units IN ('mi', 'n', 'minute') THEN
+     RETURN diff;
+  END IF;
+  diff = diff * 60 + DATE_PART('second', diff_interval);
+  RETURN diff;
+END;
+$$ LANGUAGE plpgsql IMMUTABLE
+;
+
+
+DROP FUNCTION IF EXISTS public.calculate_time_part_of_trending(_post_created_at hive_posts.created_at%TYPE ) CASCADE;
+CREATE OR REPLACE FUNCTION public.calculate_time_part_of_trending(
+  _post_created_at hive_posts.created_at%TYPE)
+    RETURNS double precision
+    LANGUAGE 'plpgsql'
+    IMMUTABLE
+AS $BODY$
+DECLARE
+  result double precision;
+  sec_from_epoch INT = 0;
+BEGIN
+  sec_from_epoch  = date_diff( 'second', CAST('19700101' AS TIMESTAMP), _post_created_at );
+  result = sec_from_epoch/240000.0;
+  return result;
+END;
+$BODY$
+;
+
+
+DROP FUNCTION IF EXISTS public.calculate_time_part_of_hot(_post_created_at hive_posts.created_at%TYPE ) CASCADE;
+CREATE OR REPLACE FUNCTION public.calculate_time_part_of_hot(
+  _post_created_at hive_posts.created_at%TYPE)
+    RETURNS double precision
+    LANGUAGE 'plpgsql'
+    IMMUTABLE
+AS $BODY$
+DECLARE
+  result double precision;
+  sec_from_epoch INT = 0;
+BEGIN
+  sec_from_epoch  = date_diff( 'second', CAST('19700101' AS TIMESTAMP), _post_created_at );
+  result = sec_from_epoch/10000.0;
+  return result;
+END;
+$BODY$
+;
+
+DROP FUNCTION IF EXISTS public.calculate_rhsares_part_of_hot_and_trend(_rshares hive_posts.vote_rshares%TYPE) CASCADE;
+CREATE OR REPLACE FUNCTION public.calculate_rhsares_part_of_hot_and_trend(_rshares hive_posts.vote_rshares%TYPE)
+RETURNS double precision
+LANGUAGE 'plpgsql'
+IMMUTABLE
+AS $BODY$
+DECLARE
+    mod_score double precision;
+BEGIN
+    mod_score := _rshares / 10000000.0;
+    IF ( mod_score > 0 )
+    THEN
+        return log( greatest( abs(mod_score), 1 ) );
+    END IF;
+    return  -1.0 * log( greatest( abs(mod_score), 1 ) );
+END;
+$BODY$
+;
+
+DROP FUNCTION IF EXISTS public.calculate_hot(hive_posts.vote_rshares%TYPE, hive_posts.created_at%TYPE);
+CREATE OR REPLACE FUNCTION public.calculate_hot(
+    _rshares hive_posts.vote_rshares%TYPE,
+    _post_created_at hive_posts.created_at%TYPE)
+RETURNS hive_posts.sc_hot%TYPE
+LANGUAGE 'plpgsql'
+IMMUTABLE
+AS $BODY$
+BEGIN
+    return calculate_rhsares_part_of_hot_and_trend(_rshares) + calculate_time_part_of_hot( _post_created_at );
+END;
+$BODY$
+;
+
+DROP FUNCTION IF EXISTS public.calculate_tranding(hive_posts.vote_rshares%TYPE, hive_posts.created_at%TYPE);
+CREATE OR REPLACE FUNCTION public.calculate_tranding(
+    _rshares hive_posts.vote_rshares%TYPE,
+    _post_created_at hive_posts.created_at%TYPE)
+RETURNS hive_posts.sc_trend%TYPE
+LANGUAGE 'plpgsql'
+IMMUTABLE
+AS $BODY$
+BEGIN
+    return calculate_rhsares_part_of_hot_and_trend(_rshares) + calculate_time_part_of_trending( _post_created_at );
+END;
+$BODY$
+;
diff --git a/hive/db/sql_scripts/mutes.sql b/hive/db/sql_scripts/mutes.sql
new file mode 100644
index 0000000000000000000000000000000000000000..a65d86e284f799f1253449a1dd012029c20c9590
--- /dev/null
+++ b/hive/db/sql_scripts/mutes.sql
@@ -0,0 +1,106 @@
+DROP FUNCTION IF EXISTS mutes_get_blacklisted_for_observer;
+CREATE FUNCTION mutes_get_blacklisted_for_observer( in _observer VARCHAR, in _flags INTEGER )
+RETURNS TABLE(
+    account hive_accounts.name%TYPE,
+    source VARCHAR,
+    is_blacklisted BOOLEAN -- False means muted
+)
+AS
+$function$
+DECLARE
+  __observer_id INT;
+BEGIN
+  __observer_id = find_account_id( _observer, True );
+  IF (_flags & 1)::BOOLEAN THEN
+    RETURN QUERY SELECT -- mutes_get_blacklisted_for_observer (local observer blacklist)
+        ha.name AS account,
+        _observer AS source,
+        True
+    FROM
+        hive_follows hf
+        JOIN hive_accounts ha ON ha.id = hf.following
+    WHERE
+        hf.follower = __observer_id AND hf.blacklisted
+    ORDER BY account, source;
+  END IF;
+  IF (_flags & 2)::BOOLEAN THEN
+    RETURN QUERY SELECT -- mutes_get_blacklisted_for_observer (indirect observer blacklists)
+        ha_i.name AS account,
+        ha.name AS source,
+        True
+    FROM
+        hive_follows hf
+        JOIN hive_follows hf_i ON hf_i.follower = hf.following
+        JOIN hive_accounts ha_i ON ha_i.id = hf_i.following
+        JOIN hive_accounts ha ON ha.id = hf.following
+    WHERE
+        hf.follower = __observer_id AND hf.follow_blacklists AND hf_i.blacklisted
+    ORDER BY account, source;
+  END IF;
+  IF (_flags & 4)::BOOLEAN THEN
+    RETURN QUERY SELECT-- mutes_get_blacklisted_for_observer (local observer mute list)
+        ha.name AS account,
+        _observer AS source,
+        False
+    FROM
+        hive_follows hf
+        JOIN hive_accounts ha ON ha.id = hf.following
+    WHERE
+        hf.follower = __observer_id AND hf.state = 2
+    ORDER BY account, source;
+  END IF;
+  IF (_flags & 8)::BOOLEAN THEN
+    RETURN QUERY SELECT-- mutes_get_blacklisted_for_observer (indirect observer mute list)
+        ha_i.name AS account,
+        ha.name AS source,
+        False
+    FROM
+        hive_follows hf
+        JOIN hive_follows hf_i ON hf_i.follower = hf.following
+        JOIN hive_accounts ha_i ON ha_i.id = hf_i.following
+        JOIN hive_accounts ha ON ha.id = hf.following
+    WHERE
+        hf.follower = __observer_id AND hf.follow_muted AND hf_i.state = 2
+    ORDER BY account, source;
+  END IF;
+END
+$function$
+language plpgsql STABLE;
+
+DROP FUNCTION IF EXISTS mutes_get_blacklists_for_observer;
+CREATE FUNCTION mutes_get_blacklists_for_observer( in _observer VARCHAR, in _follow_blacklist BOOLEAN, in _follow_muted BOOLEAN )
+RETURNS TABLE(
+    list hive_accounts.name%TYPE,
+    is_blacklist BOOLEAN -- False means mute list
+)
+AS
+$function$
+DECLARE
+  __observer_id INT;
+BEGIN
+  __observer_id = find_account_id( _observer, True );
+  IF _follow_blacklist THEN
+    RETURN QUERY SELECT -- mutes_get_blacklists_for_observer (observer blacklists)
+        ha.name AS list,
+        True
+    FROM
+        hive_follows hf
+        JOIN hive_accounts ha ON ha.id = hf.following
+    WHERE
+        hf.follower = __observer_id AND hf.follow_blacklists
+    ORDER BY list;
+  END IF;
+  IF _follow_muted THEN
+    RETURN QUERY SELECT -- mutes_get_blacklists_for_observer (observer mute lists)
+        ha.name AS list,
+        False
+    FROM
+        hive_follows hf
+        JOIN hive_accounts ha ON ha.id = hf.following
+    WHERE
+        hf.follower = __observer_id AND hf.follow_muted
+    ORDER BY list;
+  END IF;
+END
+$function$
+language plpgsql STABLE;
diff --git a/hive/db/sql_scripts/notifications_api.sql b/hive/db/sql_scripts/notifications_api.sql
new file mode 100644
index 0000000000000000000000000000000000000000..f51c50d10d6520277c19758726731e67dc4f8183
--- /dev/null
+++ b/hive/db/sql_scripts/notifications_api.sql
@@ -0,0 +1,169 @@
+DROP TYPE IF EXISTS notification CASCADE
+;
+CREATE TYPE notification AS
+(
+  id BIGINT
+, type_id SMALLINT
+, created_at TIMESTAMP
+, src VARCHAR
+, dst VARCHAR
+, author VARCHAR
+, permlink VARCHAR
+, community VARCHAR
+, community_title VARCHAR
+, payload VARCHAR
+, score SMALLINT
+);
+
+DROP FUNCTION IF EXISTS get_number_of_unread_notifications;
+CREATE OR REPLACE FUNCTION get_number_of_unread_notifications(in _account VARCHAR, in _minimum_score SMALLINT)
+RETURNS TABLE( lastread_at TIMESTAMP, unread BIGINT )
+LANGUAGE 'plpgsql' STABLE
+AS
+$BODY$
+DECLARE
+    __account_id INT := 0;
+    __last_read_at TIMESTAMP;
+    __last_read_at_block hive_blocks.num%TYPE;
+    __limit_block hive_blocks.num%TYPE = block_before_head( '90 days' );
+BEGIN
+  __account_id = find_account_id( _account, True );
+
+  SELECT ha.lastread_at INTO __last_read_at
+  FROM hive_accounts ha
+  WHERE ha.id = __account_id;
+
+  --- Warning given account can have no last_read_at set, so lets fallback to the block limit to avoid comparison to NULL.
+  SELECT COALESCE((SELECT hb.num
+                   FROM hive_blocks hb
+                   WHERE hb.created_at <= __last_read_at
+                   ORDER by hb.created_at desc
+                   LIMIT 1), __limit_block)
+    INTO __last_read_at_block;
+
+  RETURN QUERY SELECT
+    __last_read_at as lastread_at,
+    count(1) as unread
+  FROM hive_notification_cache hnv
+  WHERE hnv.dst = __account_id  AND hnv.block_num > __limit_block AND hnv.block_num > __last_read_at_block AND hnv.score >= _minimum_score
+  ;
+END
+$BODY$
+;
+
+DROP FUNCTION IF EXISTS account_notifications;
+
+CREATE OR REPLACE FUNCTION public.account_notifications(
+  _account character varying,
+  _min_score smallint,
+  _last_id bigint,
+  _limit smallint)
+    RETURNS SETOF notification
+    LANGUAGE 'plpgsql'
+    STABLE
+AS $BODY$
+DECLARE
+  __account_id INT;
+  __limit_block hive_blocks.num%TYPE = block_before_head( '90 days' );
+BEGIN
+  __account_id = find_account_id( _account, True );
+  RETURN QUERY SELECT
+      hnv.id
+    , CAST( hnv.type_id as SMALLINT) as type_id
+    , hnv.created_at
+    , hs.name as src
+    , hd.name as dst
+    , ha.name as author
+    , hpd.permlink
+    , hnv.community
+    , hnv.community_title
+    , hnv.payload
+    , CAST( hnv.score as SMALLINT) as score
+  FROM
+  (
+    select nv.id, nv.type_id, nv.created_at, nv.src, nv.dst, nv.dst_post_id, nv.score, nv.community, nv.community_title, nv.payload
+      from hive_notification_cache nv
+  WHERE nv.dst = __account_id  AND nv.block_num > __limit_block AND nv.score >= _min_score AND ( _last_id = 0 OR nv.id < _last_id )
+  ORDER BY nv.id DESC
+  LIMIT _limit
+  ) hnv
+  join hive_posts hp on hnv.dst_post_id = hp.id
+  join hive_accounts ha on hp.author_id = ha.id
+  join hive_accounts hs on hs.id = hnv.src
+  join hive_accounts hd on hd.id = hnv.dst
+  join hive_permlink_data hpd on hp.permlink_id = hpd.id
+  ORDER BY hnv.id DESC
+  LIMIT _limit;
+END
+$BODY$;
+
+DROP FUNCTION IF EXISTS post_notifications
+;
+CREATE OR REPLACE FUNCTION post_notifications(in _author VARCHAR, in _permlink VARCHAR, in _min_score SMALLINT, in _last_id BIGINT, in _limit SMALLINT)
+RETURNS SETOF notification
+AS
+$function$
+DECLARE
+  __post_id INT;
+  __limit_block hive_blocks.num%TYPE = block_before_head( '90 days' );
+BEGIN
+  __post_id = find_comment_id(_author, _permlink, True);
+  RETURN QUERY SELECT
+      hnv.id
+    , CAST( hnv.type_id as SMALLINT) as type_id
+    , hnv.created_at
+    , hs.name as src
+    , hd.name as dst
+    , ha.name as author
+    , hpd.permlink
+    , hnv.community
+    , hnv.community_title
+    , hnv.payload
+    , CAST( hnv.score as SMALLINT) as score
+  FROM
+  (
+    SELECT nv.id, nv.type_id, nv.created_at, nv.src, nv.dst, nv.dst_post_id, nv.score, nv.community, nv.community_title, nv.payload
+    FROM hive_notification_cache nv
+    WHERE nv.post_id = __post_id AND nv.block_num > __limit_block AND nv.score >= _min_score AND ( _last_id = 0 OR nv.id < _last_id )
+    ORDER BY nv.id DESC
+    LIMIT _limit
+  ) hnv
+  JOIN hive_posts hp ON hnv.dst_post_id = hp.id
+  JOIN hive_accounts ha ON hp.author_id = ha.id
+  JOIN hive_accounts hs ON hs.id = hnv.src
+  JOIN hive_accounts hd ON hd.id = hnv.dst
+  JOIN hive_permlink_data hpd ON hp.permlink_id = hpd.id
+  ORDER BY hnv.id DESC
+  LIMIT _limit;
+END
+$function$
+LANGUAGE plpgsql STABLE
+;
+
+DROP FUNCTION IF EXISTS update_notification_cache;
+;
+CREATE OR REPLACE FUNCTION update_notification_cache(in _first_block_num INT, in _last_block_num INT, in _prune_old BOOLEAN)
+RETURNS VOID
+AS
+$function$
+DECLARE
+  __limit_block hive_blocks.num%TYPE = block_before_head( '90 days' );
+BEGIN
+  IF _first_block_num IS NULL THEN
+    TRUNCATE TABLE hive_notification_cache;
+  	ALTER SEQUENCE hive_notification_cache_id_seq RESTART WITH 1;
+  ELSE
+    DELETE FROM hive_notification_cache nc WHERE _prune_old AND nc.block_num <= __limit_block;
+  END IF;
+
+  INSERT INTO hive_notification_cache
+  (block_num, type_id, created_at, src, dst, dst_post_id, post_id, score, payload, community, community_title)
+  SELECT nv.block_num, nv.type_id, nv.created_at, nv.src, nv.dst, nv.dst_post_id, nv.post_id, nv.score, nv.payload, nv.community, nv.community_title
+  FROM hive_raw_notifications_view nv
+  WHERE nv.block_num > __limit_block AND (_first_block_num IS NULL OR nv.block_num BETWEEN _first_block_num AND _last_block_num)
+  ORDER BY nv.block_num, nv.type_id, nv.created_at, nv.src, nv.dst, nv.dst_post_id, nv.post_id
+  ;
+END
+$function$
+LANGUAGE plpgsql VOLATILE
+;
diff --git a/hive/db/sql_scripts/notifications_view.sql b/hive/db/sql_scripts/notifications_view.sql
new file mode 100644
index 0000000000000000000000000000000000000000..e479f431245a2044fe307d309b8d0471b26a0b4a
--- /dev/null
+++ b/hive/db/sql_scripts/notifications_view.sql
@@ -0,0 +1,277 @@
+DROP VIEW IF EXISTS public.hive_accounts_rank_view CASCADE;
+
+CREATE OR REPLACE VIEW public.hive_accounts_rank_view
+AS
+SELECT rank.id,
+CASE
+  WHEN rank."position" < 200 THEN 70
+  WHEN rank."position" < 1000 THEN 60
+  WHEN rank."position" < 6500 THEN 50
+  WHEN rank."position" < 25000 THEN 40
+  WHEN rank."position" < 100000 THEN 30
+  ELSE 20
+END AS score
+FROM
+(
+  SELECT
+    ha.id as id
+    , CASE WHEN ha2.rank ISNULL THEN 10e6 ELSE ha2.rank END AS "position"
+  FROM
+   hive_accounts ha
+  LEFT JOIN
+  (
+    SELECT
+      ha3.id
+    , rank() OVER(order by ha3.reputation DESC) as rank
+    FROM  hive_accounts ha3
+    ORDER BY ha3.reputation DESC LIMIT 150000
+    -- Conditions above (related to rank.position) eliminates all records having rank > 100k. So with inclding some
+    -- additional space for redundant accounts (having same reputation) lets assume we're limiting it to 150k
+    -- As another reason, it can be pointed that only 2% of account has the same reputations, it means only 2000
+    -- in 100000, but we get 150000 as 50% would repeat
+  ) as ha2 ON ha2.id = ha.id
+) rank
+;
+
+DROP FUNCTION IF EXISTS public.calculate_notify_vote_score(_payout hive_posts.payout%TYPE, _abs_rshares hive_posts_view.abs_rshares%TYPE, _rshares hive_votes.rshares%TYPE) CASCADE
+;
+CREATE OR REPLACE FUNCTION public.calculate_notify_vote_score(_payout hive_posts.payout%TYPE, _abs_rshares hive_posts_view.abs_rshares%TYPE, _rshares hive_votes.rshares%TYPE)
+RETURNS INT
+LANGUAGE 'sql'
+IMMUTABLE
+AS $BODY$
+    SELECT CASE
+        WHEN ((( _payout )/_abs_rshares) * 1000 * _rshares < 20 ) THEN -1
+            ELSE LEAST(100, (LENGTH(CAST( CAST( ( (( _payout )/_abs_rshares) * 1000 * _rshares ) as BIGINT) as text)) - 1) * 25)
+    END;
+$BODY$;
+
+DROP FUNCTION IF EXISTS notification_id CASCADE;
+;
+CREATE OR REPLACE FUNCTION notification_id(in _block_number INTEGER, in _notifyType INTEGER, in _id INTEGER)
+RETURNS BIGINT
+AS
+$function$
+BEGIN
+RETURN CAST( _block_number as BIGINT ) << 36
+       | ( _notifyType << 28 )
+       | ( _id & CAST( x'0FFFFFFF' as BIGINT) );
+END
+$function$
+LANGUAGE plpgsql IMMUTABLE
+;
+
+DROP FUNCTION IF EXISTS public.calculate_value_of_vote_on_post CASCADE;
+CREATE OR REPLACE FUNCTION public.calculate_value_of_vote_on_post(
+    _post_payout hive_posts.payout%TYPE
+  , _post_rshares hive_posts_view.rshares%TYPE
+  , _vote_rshares hive_votes.rshares%TYPE)
+RETURNS FLOAT
+LANGUAGE 'sql'
+IMMUTABLE
+AS $BODY$
+    SELECT CASE _post_rshares != 0
+              WHEN TRUE THEN CAST( ( _post_payout/_post_rshares ) * _vote_rshares as FLOAT)
+           ELSE
+              CAST(0 AS FLOAT)
+           END
+$BODY$;
+
+
+-- View: public.hive_raw_notifications_as_view
+
+DROP VIEW IF EXISTS public.hive_raw_notifications_as_view CASCADE;
+CREATE OR REPLACE VIEW public.hive_raw_notifications_as_view
+ AS
+ SELECT notifs.block_num,
+    notifs.id,
+    notifs.post_id,
+    notifs.type_id,
+    notifs.created_at,
+    notifs.src,
+    notifs.dst,
+    notifs.dst_post_id,
+    notifs.community,
+    notifs.community_title,
+    notifs.payload,
+    harv.score
+   FROM ( SELECT hpv.block_num,
+            notification_id(hpv.block_num,
+                CASE hpv.depth
+                    WHEN 1 THEN 12
+                    ELSE 13
+                END, hpv.id) AS id,
+            hpv.parent_id AS post_id,
+                CASE hpv.depth
+                    WHEN 1 THEN 12
+                    ELSE 13
+                END AS type_id,
+            hpv.created_at,
+            hpv.author_id AS src,
+            hpv.parent_author_id AS dst,
+            hpv.parent_id as dst_post_id,
+            ''::character varying(16) AS community,
+            ''::character varying AS community_title,
+            ''::character varying AS payload
+           FROM hive_posts_pp_view hpv
+                  WHERE hpv.depth > 0 AND
+                        NOT EXISTS (SELECT NULL::text
+                                    FROM hive_follows hf
+                                    WHERE hf.follower = hpv.parent_author_id AND hf.following = hpv.author_id AND hf.state = 2)
+UNION ALL
+ SELECT hf.block_num,
+    notification_id(hf.block_num, 15, hf.id) AS id,
+    0 AS post_id,
+    15 AS type_id,
+    hb.created_at,
+    hf.follower AS src,
+    hf.following AS dst,
+    0 as dst_post_id,
+    ''::character varying(16) AS community,
+    ''::character varying AS community_title,
+    ''::character varying AS payload
+   FROM hive_follows hf
+   JOIN hive_blocks hb ON hb.num = hf.block_num - 1 -- use time of previous block to match head_block_time behavior at given block
+   WHERE hf.state = 1 --only follow blog
+
+UNION ALL
+ SELECT hr.block_num,
+    notification_id(hr.block_num, 14, hr.id) AS id,
+    hp.id AS post_id,
+    14 AS type_id,
+    hr.created_at,
+    hr.blogger_id AS src,
+    hp.author_id AS dst,
+    hr.post_id as dst_post_id,
+    ''::character varying(16) AS community,
+    ''::character varying AS community_title,
+    ''::character varying AS payload
+   FROM hive_reblogs hr
+   JOIN hive_posts hp ON hr.post_id = hp.id
+UNION ALL
+ SELECT hs.block_num,
+    notification_id(hs.block_num, 11, hs.id) AS id,
+    0 AS post_id,
+    11 AS type_id,
+    hs.created_at,
+    hs.account_id AS src,
+    hs.community_id AS dst,
+    0 as dst_post_id,
+    hc.name AS community,
+    hc.title AS community_title,
+    ''::character varying AS payload
+   FROM hive_subscriptions hs
+   JOIN hive_communities hc ON hs.community_id = hc.id
+UNION ALL
+ SELECT hm.block_num,
+    notification_id(hm.block_num, 16, hm.id) AS id,
+    hm.post_id,
+    16 AS type_id,
+    hp.created_at,
+    hp.author_id AS src,
+    hm.account_id AS dst,
+    hm.post_id as dst_post_id,
+    ''::character varying(16) AS community,
+    ''::character varying AS community_title,
+    ''::character varying AS payload
+   FROM hive_mentions hm
+   JOIN hive_posts hp ON hm.post_id = hp.id
+) notifs
+JOIN hive_accounts_rank_view harv ON harv.id = notifs.src
+;
+
+DROP VIEW IF EXISTS hive_raw_notifications_view_noas cascade;
+CREATE OR REPLACE VIEW hive_raw_notifications_view_noas
+AS
+SELECT -- votes
+      vn.block_num
+    , vn.id
+    , vn.post_id
+    , vn.type_id
+    , vn.created_at
+    , vn.src
+    , vn.dst
+    , vn.dst_post_id
+    , vn.community
+    , vn.community_title
+    , CASE
+        WHEN vn.vote_value < 0.01 THEN ''::VARCHAR
+        ELSE CAST( to_char(vn.vote_value, '($FM99990.00)') AS VARCHAR )
+      END as payload
+    , vn.score
+FROM
+  (
+    SELECT
+        hv1.block_num
+      , notification_id(hv1.block_num, 17, hv1.id::integer) AS id
+      , hpv.id AS post_id
+      , 17 AS type_id
+      , hv1.last_update AS created_at
+      , hv1.voter_id AS src
+      , hpv.author_id AS dst
+      , hpv.id AS dst_post_id
+      , ''::VARCHAR(16) AS community
+      , ''::VARCHAR AS community_title
+      , calculate_value_of_vote_on_post(hpv.payout + hpv.pending_payout, hpv.rshares, hv1.rshares) AS vote_value
+      , calculate_notify_vote_score(hpv.payout + hpv.pending_payout, hpv.abs_rshares, hv1.rshares) AS score
+    FROM hive_votes hv1
+    JOIN
+      (
+        SELECT
+            hpvi.id
+          , hpvi.author_id
+          , hpvi.payout
+          , hpvi.pending_payout
+          , hpvi.abs_rshares
+          , hpvi.vote_rshares as rshares
+         FROM hive_posts hpvi
+         WHERE hpvi.block_num > block_before_head('97 days'::interval)
+       ) hpv ON hv1.post_id = hpv.id
+    WHERE hv1.rshares >= 10e9
+  ) as vn
+  WHERE vn.vote_value >= 0.02
+UNION ALL
+  SELECT -- new community
+      hc.block_num as block_num
+      , notification_id(hc.block_num, 11, hc.id) as id
+      , 0 as post_id
+      , 1 as type_id
+      , hc.created_at as created_at
+      , 0 as src
+      , hc.id as dst
+      , 0 as dst_post_id
+      , hc.name as community
+      , ''::VARCHAR as community_title
+      , ''::VARCHAR as payload
+      , 35 as score
+  FROM
+      hive_communities hc
+UNION ALL
+  SELECT --persistent notifs
+       hn.block_num
+     , notification_id(hn.block_num, hn.type_id, CAST( hn.id as INT) ) as id
+     , hn.post_id as post_id
+     , hn.type_id as type_id
+     , hn.created_at as created_at
+     , hn.src_id as src
+     , hn.dst_id as dst
+     , hn.post_id as dst_post_id
+     , hc.name as community
+     , hc.title as community_title
+     , hn.payload as payload
+     , hn.score as score
+  FROM hive_notifs hn
+  JOIN hive_communities hc ON hn.community_id = hc.id
+;
+
+DROP VIEW IF EXISTS hive_raw_notifications_view CASCADE;
+CREATE OR REPLACE VIEW hive_raw_notifications_view
+AS
+SELECT *
+FROM
+  (
+  SELECT * FROM hive_raw_notifications_as_view
+  UNION ALL
+  SELECT * FROM hive_raw_notifications_view_noas
+  ) as notifs
+WHERE notifs.score >= 0;
diff --git a/hive/db/sql_scripts/payout_stats_view.sql b/hive/db/sql_scripts/payout_stats_view.sql
new file mode 100644
index 0000000000000000000000000000000000000000..0eac00acfd9dcabef666e2e75aae13e66437e0bc
--- /dev/null
+++ b/hive/db/sql_scripts/payout_stats_view.sql
@@ -0,0 +1,31 @@
+DROP MATERIALIZED VIEW IF EXISTS payout_stats_view;
+
+CREATE MATERIALIZED VIEW payout_stats_view AS
+  SELECT
+      hp1.community_id,
+      ha.name AS author,
+      SUM( hp1.payout + hp1.pending_payout ) AS payout,
+      COUNT(*) AS posts,
+      NULL AS authors
+  FROM hive_posts hp1
+      JOIN hive_accounts ha ON ha.id = hp1.author_id
+  WHERE hp1.counter_deleted = 0 AND NOT hp1.is_paidout AND hp1.id != 0
+  GROUP BY community_id, author
+
+  UNION ALL
+
+  SELECT
+        hp2.community_id,
+        NULL AS author,
+        SUM( hp2.payout + hp2.pending_payout ) AS payout,
+        COUNT(*) AS posts,
+        COUNT(DISTINCT(author_id)) AS authors
+  FROM hive_posts hp2
+  WHERE hp2.counter_deleted = 0 AND NOT hp2.is_paidout AND hp2.id != 0
+  GROUP BY community_id
+
+WITH DATA
+;
+
+CREATE UNIQUE INDEX IF NOT EXISTS payout_stats_view_ix1 ON payout_stats_view (community_id, author );
+CREATE INDEX IF NOT EXISTS payout_stats_view_ix2 ON payout_stats_view (community_id, author, payout);
diff --git a/hive/db/sql_scripts/postgres_handle_view_changes.sql b/hive/db/sql_scripts/postgres_handle_view_changes.sql
new file mode 100644
index 0000000000000000000000000000000000000000..5bd7f8b42b5d9265a832f608d78428cccecfefe3
--- /dev/null
+++ b/hive/db/sql_scripts/postgres_handle_view_changes.sql
@@ -0,0 +1,197 @@
+/**
+Easy way to drop and recreate table or view dependencies, when you need to alter
+something in them.
+See http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/.
+Enhanced by Wojciech Barcik wbarcik@syncad.com (handling of rules).
+*/
+
+DROP TABLE IF EXISTS deps_saved_ddl;
+DROP SEQUENCE IF EXISTS deps_saved_ddl_deps_id_seq;
+
+-- SEQUENCE: deps_saved_ddl_deps_id_seq
+
+-- DROP SEQUENCE deps_saved_ddl_deps_id_seq;
+
+CREATE SEQUENCE if not exists deps_saved_ddl_deps_id_seq
+    INCREMENT 1
+    START 1
+    MINVALUE 1
+    MAXVALUE 9223372036854775807
+    CACHE 1;
+
+
+-- Table: deps_saved_ddl
+
+-- DROP TABLE deps_saved_ddl;
+
+CREATE TABLE if not exists deps_saved_ddl
+(
+    deps_id integer NOT NULL DEFAULT nextval('deps_saved_ddl_deps_id_seq'::regclass),
+    deps_view_schema character varying(255),
+    deps_view_name character varying(255),
+    deps_ddl_to_run text,
+    CONSTRAINT deps_saved_ddl_pkey PRIMARY KEY (deps_id)
+)
+;
+
+-- FUNCTION: deps_save_and_drop_dependencies(character varying, character varying, boolean)
+
+-- DROP FUNCTION deps_save_and_drop_dependencies(character varying, character varying, boolean);
+
+CREATE OR REPLACE FUNCTION deps_save_and_drop_dependencies(
+    p_view_schema character varying,
+    p_view_name character varying,
+    drop_relation boolean DEFAULT true
+  )
+  RETURNS void
+  LANGUAGE 'plpgsql'
+  COST 100
+  VOLATILE
+AS $BODY$
+/**
+From http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/
+@wojtek added DDL for rules.
+
+Drops dependencies of view, but saves them into table `deps_saved_ddl`, for
+future restoration. Use function `deps_restore_dependencies` to restore
+dependencies dropped by this function.
+*/
+declare
+  v_curr record;
+begin
+for v_curr in
+(
+  select obj_schema, obj_name, obj_type from
+  (
+  with recursive recursive_deps(obj_schema, obj_name, obj_type, depth) as
+  (
+    select p_view_schema, p_view_name, null::varchar, 0
+    union
+    select dep_schema::varchar, dep_name::varchar, dep_type::varchar,
+        recursive_deps.depth + 1 from
+    (
+      select ref_nsp.nspname ref_schema, ref_cl.relname ref_name,
+          rwr_cl.relkind dep_type, rwr_nsp.nspname dep_schema,
+          rwr_cl.relname dep_name
+      from pg_depend dep
+      join pg_class ref_cl on dep.refobjid = ref_cl.oid
+      join pg_namespace ref_nsp on ref_cl.relnamespace = ref_nsp.oid
+      join pg_rewrite rwr on dep.objid = rwr.oid
+      join pg_class rwr_cl on rwr.ev_class = rwr_cl.oid
+      join pg_namespace rwr_nsp on rwr_cl.relnamespace = rwr_nsp.oid
+      where dep.deptype = 'n'
+      and dep.classid = 'pg_rewrite'::regclass
+    ) deps
+    join recursive_deps on deps.ref_schema = recursive_deps.obj_schema
+        and deps.ref_name = recursive_deps.obj_name
+    where (deps.ref_schema != deps.dep_schema or deps.ref_name != deps.dep_name)
+  )
+  select obj_schema, obj_name, obj_type, depth
+  from recursive_deps
+  where depth > 0
+  ) t
+  group by obj_schema, obj_name, obj_type
+  order by max(depth) desc
+) loop
+
+  insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+  select p_view_schema, p_view_name, 'COMMENT ON ' ||
+  case
+    when c.relkind = 'v' then 'VIEW'
+    when c.relkind = 'm' then 'MATERIALIZED VIEW'
+  else ''
+  end
+  || ' ' || n.nspname || '.' || c.relname || ' IS '''
+      || replace(d.description, '''', '''''') || ''';'
+  from pg_class c
+  join pg_namespace n on n.oid = c.relnamespace
+  join pg_description d on d.objoid = c.oid and d.objsubid = 0
+  where n.nspname = v_curr.obj_schema and c.relname = v_curr.obj_name
+      and d.description is not null;
+
+  insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+  select p_view_schema, p_view_name, 'COMMENT ON COLUMN ' || n.nspname || '.'
+      || c.relname || '.' || a.attname || ' IS '''
+      || replace(d.description, '''', '''''') || ''';'
+  from pg_class c
+  join pg_attribute a on c.oid = a.attrelid
+  join pg_namespace n on n.oid = c.relnamespace
+  join pg_description d on d.objoid = c.oid and d.objsubid = a.attnum
+  where n.nspname = v_curr.obj_schema and c.relname = v_curr.obj_name
+      and d.description is not null;
+
+  insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+  select p_view_schema, p_view_name, 'GRANT ' || privilege_type || ' ON '
+      || table_schema || '.' || table_name || ' TO ' || grantee
+  from information_schema.role_table_grants
+  where table_schema = v_curr.obj_schema and table_name = v_curr.obj_name;
+
+  if v_curr.obj_type = 'v' then
+
+    insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+    select p_view_schema, p_view_name, definition
+    from pg_catalog.pg_rules
+    where schemaname = v_curr.obj_schema and tablename = v_curr.obj_name;
+
+    insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+    select p_view_schema, p_view_name, 'CREATE VIEW '
+        || v_curr.obj_schema || '.' || v_curr.obj_name || ' AS ' || view_definition
+    from information_schema.views
+    where table_schema = v_curr.obj_schema and table_name = v_curr.obj_name;
+
+  elsif v_curr.obj_type = 'm' then
+    insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+    select p_view_schema, p_view_name, 'CREATE MATERIALIZED VIEW '
+        || v_curr.obj_schema || '.' || v_curr.obj_name || ' AS ' || definition
+    from pg_matviews
+    where schemaname = v_curr.obj_schema and matviewname = v_curr.obj_name;
+  end if;
+
+  if drop_relation = true then
+    execute 'DROP ' ||
+    case
+      when v_curr.obj_type = 'v' then 'VIEW'
+      when v_curr.obj_type = 'm' then 'MATERIALIZED VIEW'
+    end
+    || ' ' || v_curr.obj_schema || '.' || v_curr.obj_name;
+  end if;
+
+end loop;
+end;
+$BODY$;
+
+
+-- FUNCTION: deps_restore_dependencies(character varying, character varying)
+
+-- DROP FUNCTION deps_restore_dependencies(character varying, character varying);
+
+CREATE OR REPLACE FUNCTION deps_restore_dependencies(
+    p_view_schema character varying,
+    p_view_name character varying
+  )
+  RETURNS void
+  LANGUAGE 'plpgsql'
+  COST 100
+  VOLATILE
+AS $BODY$
+/**
+From http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/
+
+Restores dependencies dropped by function `deps_save_and_drop_dependencies`.
+*/
+declare
+  v_curr record;
+begin
+for v_curr in
+(
+  select deps_ddl_to_run
+  from deps_saved_ddl
+  where deps_view_schema = p_view_schema and deps_view_name = p_view_name
+  order by deps_id desc
+) loop
+  execute v_curr.deps_ddl_to_run;
+end loop;
+delete from deps_saved_ddl
+where deps_view_schema = p_view_schema and deps_view_name = p_view_name;
+end;
+$BODY$;
diff --git a/hive/db/sql_scripts/update_communities_rank.sql b/hive/db/sql_scripts/update_communities_rank.sql
new file mode 100644
index 0000000000000000000000000000000000000000..91d846bec1520ae5ab452ac5f5db3a7282a590a2
--- /dev/null
+++ b/hive/db/sql_scripts/update_communities_rank.sql
@@ -0,0 +1,35 @@
+DROP FUNCTION IF EXISTS update_communities_posts_data_and_rank;
+CREATE FUNCTION update_communities_posts_data_and_rank()
+RETURNS void
+AS
+$function$
+UPDATE hive_communities hc SET
+  num_pending = cr.posts,
+	sum_pending = cr.payouts,
+	num_authors = cr.authors,
+	rank = cr.rank
+FROM
+(
+		SELECT
+			c.id as id,
+			ROW_NUMBER() OVER ( ORDER BY COALESCE(p.payouts, 0) DESC, COALESCE(p.authors, 0) DESC, COALESCE(p.posts, 0) DESC, c.subscribers DESC, (CASE WHEN c.title = '' THEN 1 ELSE 0 END), c.id DESC ) as rank,
+			COALESCE(p.posts, 0) as posts,
+			COALESCE(p.payouts, 0) as payouts,
+			COALESCE(p.authors, 0) as authors
+		FROM hive_communities c
+		LEFT JOIN (
+              SELECT hp.community_id,
+                     COUNT(*) posts,
+                     ROUND(SUM(hp.pending_payout)) payouts,
+                     COUNT(DISTINCT hp.author_id) authors
+                FROM hive_posts hp
+               WHERE community_id IS NOT NULL
+                 AND NOT hp.is_paidout
+                 AND hp.counter_deleted = 0
+            GROUP BY hp.community_id
+         ) p
+         ON p.community_id = c.id
+) as cr
+WHERE hc.id = cr.id;
+$function$
+language sql;
diff --git a/hive/db/sql_scripts/update_feed_cache.sql b/hive/db/sql_scripts/update_feed_cache.sql
new file mode 100644
index 0000000000000000000000000000000000000000..62fd1a108694f612a21d684447c10d73d44a45f3
--- /dev/null
+++ b/hive/db/sql_scripts/update_feed_cache.sql
@@ -0,0 +1,27 @@
+DROP FUNCTION IF EXISTS update_feed_cache(in _from_block_num INTEGER, in _to_block_num INTEGER);
+CREATE OR REPLACE FUNCTION update_feed_cache(in _from_block_num INTEGER, in _to_block_num INTEGER)
+RETURNS void
+LANGUAGE 'plpgsql'
+VOLATILE
+AS $BODY$
+BEGIN
+    INSERT INTO
+      hive_feed_cache (account_id, post_id, created_at, block_num)
+    SELECT
+      hp.author_id, hp.id, hp.created_at, hp.block_num
+    FROM
+      hive_posts hp
+    WHERE hp.depth = 0 AND hp.counter_deleted = 0 AND ((_from_block_num IS NULL AND _to_block_num IS NULL) OR (hp.block_num BETWEEN _from_block_num AND _to_block_num))
+    ON CONFLICT DO NOTHING;
+
+    INSERT INTO
+      hive_feed_cache (account_id, post_id, created_at, block_num)
+    SELECT
+      hr.blogger_id, hr.post_id, hr.created_at, hr.block_num
+    FROM
+      hive_reblogs hr
+    WHERE (_from_block_num IS NULL AND _to_block_num IS NULL) OR (hr.block_num BETWEEN _from_block_num AND _to_block_num)
+    ON CONFLICT DO NOTHING;
+END
+$BODY$;
+
diff --git a/hive/db/sql_scripts/update_follow_count.sql b/hive/db/sql_scripts/update_follow_count.sql
new file mode 100644
index 0000000000000000000000000000000000000000..90376dda9d8e8dda680e110b7b6bcd96828e311f
--- /dev/null
+++ b/hive/db/sql_scripts/update_follow_count.sql
@@ -0,0 +1,33 @@
+DROP FUNCTION IF EXISTS update_follow_count(hive_blocks.num%TYPE, hive_blocks.num%TYPE);
+CREATE OR REPLACE FUNCTION update_follow_count(
+  in _first_block hive_blocks.num%TYPE,
+  in _last_block hive_blocks.num%TYPE
+)
+RETURNS VOID
+LANGUAGE 'plpgsql'
+AS
+$BODY$
+BEGIN
+UPDATE hive_accounts ha
+SET
+  followers = data_set.followers_count,
+  following = data_set.following_count
+FROM
+  (
+    WITH data_cfe(user_id) AS (
+      SELECT DISTINCT following FROM hive_follows WHERE block_num BETWEEN _first_block AND _last_block
+      UNION 
+      SELECT DISTINCT follower FROM hive_follows WHERE block_num BETWEEN _first_block AND _last_block
+    )
+    SELECT
+        data_cfe.user_id AS user_id,
+        (SELECT COUNT(1) FROM hive_follows hf1 WHERE hf1.following = data_cfe.user_id AND hf1.state = 1) AS followers_count,
+        (SELECT COUNT(1) FROM hive_follows hf2 WHERE hf2.follower = data_cfe.user_id AND hf2.state = 1) AS following_count
+    FROM
+        data_cfe
+  ) AS data_set(user_id, followers_count, following_count)
+WHERE
+  ha.id = data_set.user_id;
+END
+$BODY$
+;
\ No newline at end of file
diff --git a/hive/db/sql_scripts/update_hive_post_root_id.sql b/hive/db/sql_scripts/update_hive_post_root_id.sql
new file mode 100644
index 0000000000000000000000000000000000000000..756cb5688a32684f15e22b9376bf59d06d05b90b
--- /dev/null
+++ b/hive/db/sql_scripts/update_hive_post_root_id.sql
@@ -0,0 +1,16 @@
+DROP FUNCTION IF EXISTS public.update_hive_posts_root_id(INTEGER, INTEGER);
+
+CREATE OR REPLACE FUNCTION public.update_hive_posts_root_id(in _first_block_num INTEGER, _last_block_num INTEGER)
+    RETURNS void
+    LANGUAGE 'plpgsql'
+    VOLATILE
+AS $BODY$
+BEGIN
+
+--- _first_block_num can be null together with _last_block_num
+UPDATE hive_posts uhp
+SET root_id = id
+WHERE uhp.root_id = 0 AND (_first_block_num IS NULL OR (uhp.block_num >= _first_block_num AND uhp.block_num <= _last_block_num))
+;
+END
+$BODY$;
diff --git a/hive/db/sql_scripts/update_hive_posts_api_helper.sql b/hive/db/sql_scripts/update_hive_posts_api_helper.sql
new file mode 100644
index 0000000000000000000000000000000000000000..21f6659c7c9df6902d464a866f76ab344a658c16
--- /dev/null
+++ b/hive/db/sql_scripts/update_hive_posts_api_helper.sql
@@ -0,0 +1,29 @@
+DROP FUNCTION IF EXISTS public.update_hive_posts_api_helper(INTEGER, INTEGER);
+
+CREATE OR REPLACE FUNCTION public.update_hive_posts_api_helper(in _first_block_num INTEGER, _last_block_num INTEGER)
+  RETURNS void
+  LANGUAGE 'plpgsql'
+  VOLATILE
+AS $BODY$
+BEGIN
+IF _first_block_num IS NULL OR _last_block_num IS NULL THEN
+  -- initial creation of table.
+  INSERT INTO hive_posts_api_helper
+  (id, author_s_permlink)
+  SELECT hp.id, hp.author || '/' || hp.permlink
+  FROM hive_posts_view hp
+  ;
+ELSE
+  -- Regular incremental update.
+  INSERT INTO hive_posts_api_helper
+  (id, author_s_permlink)
+  SELECT hp.id, hp.author || '/' || hp.permlink
+  FROM hive_posts_view hp
+  WHERE hp.block_num BETWEEN _first_block_num AND _last_block_num AND
+          NOT EXISTS (SELECT NULL FROM hive_posts_api_helper h WHERE h.id = hp.id)
+  ;
+END IF;
+
+END
+$BODY$
+;
diff --git a/hive/db/sql_scripts/update_hive_posts_children_count.sql b/hive/db/sql_scripts/update_hive_posts_children_count.sql
new file mode 100644
index 0000000000000000000000000000000000000000..1a91e44bf1481e47f6719ac63a767559b91d3ce8
--- /dev/null
+++ b/hive/db/sql_scripts/update_hive_posts_children_count.sql
@@ -0,0 +1,111 @@
+DROP FUNCTION IF EXISTS public.update_hive_posts_children_count;
+CREATE OR REPLACE FUNCTION public.update_hive_posts_children_count(in _first_block INTEGER, in _last_block INTEGER)
+  RETURNS void
+  LANGUAGE 'plpgsql'
+  VOLATILE
+AS $BODY$
+BEGIN
+UPDATE hive_posts uhp
+SET children = data_source.delta + uhp.children
+FROM
+(
+WITH recursive tblChild AS
+(
+  SELECT
+    s.queried_parent as queried_parent
+  , s.id as id
+  , s.depth as depth
+  , (s.delta_created + s.delta_deleted) as delta
+  FROM
+  (
+  SELECT
+      h1.parent_id AS queried_parent
+    , h1.id as id
+    , h1.depth as depth
+    , (
+      CASE
+        WHEN (h1.block_num_created BETWEEN _first_block AND _last_block ) THEN 1
+        ELSE 0
+      END
+      ) as delta_created
+    , (
+      CASE
+        -- assumption that deleted post cannot be edited
+        WHEN h1.counter_deleted != 0 THEN -1
+        ELSE 0
+      END
+      ) as delta_deleted
+  FROM hive_posts h1
+  WHERE h1.block_num BETWEEN _first_block AND _last_block OR h1.block_num_created BETWEEN _first_block AND _last_block
+  ORDER BY h1.depth DESC
+  ) s
+  UNION ALL
+  SELECT
+    p.parent_id as queried_parent
+  , p.id as id
+  , p.depth as depth
+  , tblChild.delta as delta
+  FROM hive_posts p
+  JOIN tblChild  ON p.id = tblChild.queried_parent
+  WHERE p.depth < tblChild.depth
+)
+SELECT
+    queried_parent
+  , SUM(delta) as delta
+FROM
+  tblChild
+GROUP BY queried_parent
+) data_source
+WHERE uhp.id = data_source.queried_parent
+;
+END
+$BODY$;
+
+DROP FUNCTION IF EXISTS public.update_all_hive_posts_children_count;
+CREATE OR REPLACE FUNCTION public.update_all_hive_posts_children_count()
+  RETURNS void
+  LANGUAGE 'plpgsql'
+  VOLATILE
+AS $BODY$
+declare __depth INT;
+BEGIN
+  SELECT MAX(hp.depth) into __depth FROM hive_posts hp ;
+
+  CREATE UNLOGGED TABLE IF NOT EXISTS __post_children
+  (
+    id INT NOT NULL,
+    child_count INT NOT NULL,
+    CONSTRAINT __post_children_pkey PRIMARY KEY (id)
+  );
+
+  TRUNCATE TABLE __post_children;
+  
+  WHILE __depth >= 0 LOOP
+    INSERT INTO __post_children
+    (id, child_count)
+      SELECT
+        h1.parent_id AS queried_parent,
+        SUM(COALESCE((SELECT pc.child_count FROM __post_children pc WHERE pc.id = h1.id),
+                      0
+                    ) + 1
+        ) AS count
+      FROM hive_posts h1
+      WHERE (h1.parent_id != 0 OR __depth = 0) AND h1.counter_deleted = 0 AND h1.id != 0 AND h1.depth = __depth
+      GROUP BY h1.parent_id
+
+    ON CONFLICT ON CONSTRAINT __post_children_pkey DO UPDATE
+      SET child_count = __post_children.child_count + excluded.child_count
+    ;
+
+    __depth := __depth -1;
+  END LOOP;
+
+  UPDATE hive_posts uhp
+  SET children = s.child_count
+  FROM
+  __post_children s 
+  WHERE s.id = uhp.id and s.child_count != uhp.children
+  ;
+
+END
+$BODY$;
diff --git a/hive/db/sql_scripts/update_hive_posts_mentions.sql b/hive/db/sql_scripts/update_hive_posts_mentions.sql
new file mode 100644
index 0000000000000000000000000000000000000000..0b2e8517501b7745ac2667ca95d45f39e88c9238
--- /dev/null
+++ b/hive/db/sql_scripts/update_hive_posts_mentions.sql
@@ -0,0 +1,38 @@
+DROP FUNCTION IF EXISTS update_hive_posts_mentions(INTEGER, INTEGER);
+
+CREATE OR REPLACE FUNCTION update_hive_posts_mentions(in _first_block INTEGER, in _last_block INTEGER)
+RETURNS VOID
+LANGUAGE 'plpgsql'
+AS
+$function$
+DECLARE
+  __block_limit INT := 1200*24*90; --- 1200 blocks is equal to 1hr, so 90 days
+BEGIN
+
+  IF (_last_block - __block_limit) > _first_block THEN
+    _first_block = _last_block - __block_limit;
+  END IF;
+
+  INSERT INTO hive_mentions( post_id, account_id, block_num )
+    SELECT DISTINCT T.id_post, ha.id, T.block_num
+    FROM
+      hive_accounts ha
+    INNER JOIN
+    (
+      SELECT T.id_post, LOWER( ( SELECT trim( T.mention::text, '{""}') ) ) AS mention, T.author_id, T.block_num
+      FROM
+      (
+        SELECT
+          hp.id, REGEXP_MATCHES( hpd.body, '(?:^|[^a-zA-Z0-9_!#$%&*@\\/])(?:@)([a-zA-Z0-9\\.-]{1,16}[a-zA-Z0-9])(?![a-z])', 'g') AS mention, hp.author_id, hp.block_num
+        FROM hive_posts hp
+        INNER JOIN hive_post_data hpd ON hp.id = hpd.id
+        WHERE hp.block_num >= _first_block
+      )T( id_post, mention, author_id, block_num )
+    )T( id_post, mention, author_id, block_num ) ON ha.name = T.mention
+    WHERE ha.id != T.author_id
+    ORDER BY T.block_num, T.id_post, ha.id
+  ON CONFLICT DO NOTHING;
+
+END
+$function$
+;
diff --git a/hive/db/sql_scripts/update_posts_rshares.sql b/hive/db/sql_scripts/update_posts_rshares.sql
new file mode 100644
index 0000000000000000000000000000000000000000..98210253c512ac9092af2fd48824564c10c1acad
--- /dev/null
+++ b/hive/db/sql_scripts/update_posts_rshares.sql
@@ -0,0 +1,54 @@
+DROP FUNCTION IF EXISTS update_posts_rshares;
+CREATE OR REPLACE FUNCTION update_posts_rshares(
+    _first_block hive_blocks.num%TYPE
+  , _last_block hive_blocks.num%TYPE
+)
+RETURNS VOID
+LANGUAGE 'plpgsql'
+VOLATILE
+AS
+$BODY$
+BEGIN
+SET LOCAL work_mem='2GB';
+SET LOCAL enable_seqscan=False;
+UPDATE hive_posts hp
+SET
+    abs_rshares = votes_rshares.abs_rshares
+  , vote_rshares = votes_rshares.rshares
+  , sc_hot = CASE hp.is_paidout WHEN True Then 0 ELSE calculate_hot( votes_rshares.rshares, hp.created_at) END
+  , sc_trend = CASE hp.is_paidout WHEN True Then 0 ELSE calculate_tranding( votes_rshares.rshares, hp.created_at) END
+  , total_votes = votes_rshares.total_votes
+  , net_votes = votes_rshares.net_votes
+FROM
+  (
+    SELECT
+        hv.post_id
+      , SUM( hv.rshares ) as rshares
+      , SUM( ABS( hv.rshares ) ) as abs_rshares
+      , SUM( CASE hv.is_effective WHEN True THEN 1 ELSE 0 END ) as total_votes
+      , SUM( CASE
+              WHEN hv.rshares > 0 THEN 1
+              WHEN hv.rshares = 0 THEN 0
+              ELSE -1
+            END ) as net_votes
+    FROM hive_votes hv
+    WHERE EXISTS
+      (
+        SELECT NULL
+        FROM hive_votes hv2
+        WHERE hv2.post_id = hv.post_id AND hv2.block_num BETWEEN _first_block AND _last_block
+      )
+    GROUP BY hv.post_id
+  ) as votes_rshares
+WHERE hp.id = votes_rshares.post_id
+AND (
+  hp.abs_rshares != votes_rshares.abs_rshares
+  OR hp.vote_rshares != votes_rshares.rshares
+  OR hp.total_votes != votes_rshares.total_votes
+  OR hp.net_votes != votes_rshares.net_votes
+);
+RESET work_mem;
+RESET enable_seqscan;
+END;
+$BODY$
+;
diff --git a/hive/db/sql_scripts/upgrade/do_conditional_vacuum.sql b/hive/db/sql_scripts/upgrade/do_conditional_vacuum.sql
new file mode 100644
index 0000000000000000000000000000000000000000..2c18d0c181cc1d1d166b826cdc707429cf65e84c
--- /dev/null
+++ b/hive/db/sql_scripts/upgrade/do_conditional_vacuum.sql
@@ -0,0 +1,16 @@
+SELECT COALESCE((SELECT hd.vacuum_needed FROM hive_db_vacuum_needed hd WHERE hd.vacuum_needed LIMIT 1), False) AS needs_vacuum
+\gset
+\if :needs_vacuum
+\qecho Running VACUUM on the database
+VACUUM FULL VERBOSE ANALYZE;
+\qecho Waiting 1 second...
+SELECT pg_sleep(1);
+SELECT relname, n_dead_tup AS n_dead_tup_now
+,      n_live_tup AS n_live_tup_now
+FROM pg_stat_user_tables
+WHERE relname like 'hive_%';
+
+\else
+\qecho Skipping VACUUM on the database...
+\endif
+
diff --git a/hive/db/sql_scripts/upgrade/upgrade_runtime_migration.sql b/hive/db/sql_scripts/upgrade/upgrade_runtime_migration.sql
new file mode 100644
index 0000000000000000000000000000000000000000..8e31ee131709ef8afbebc272264f20c917bf8510
--- /dev/null
+++ b/hive/db/sql_scripts/upgrade/upgrade_runtime_migration.sql
@@ -0,0 +1,232 @@
+
+START TRANSACTION;
+
+DO
+$BODY$
+BEGIN
+SET work_mem='2GB';
+IF EXISTS(SELECT * FROM hive_db_data_migration WHERE migration = 'Reputation calculation') THEN
+  RAISE NOTICE 'Performing initial account reputation calculation...';
+  PERFORM update_account_reputations(NULL, NULL, True);
+ELSE
+  RAISE NOTICE 'Skipping initial account reputation calculation...';
+END IF;
+END
+$BODY$;
+
+COMMIT;
+
+START TRANSACTION;
+
+DO
+$BODY$
+BEGIN
+IF EXISTS(SELECT * FROM hive_db_data_migration WHERE migration = 'hive_posts_api_helper fill') THEN
+  RAISE NOTICE 'Performing initial hive_posts_api_helper collection...';
+    SET work_mem='2GB';
+    TRUNCATE TABLE hive_posts_api_helper;
+    DROP INDEX IF EXISTS hive_posts_api_helper_author_permlink_idx;
+    DROP INDEX IF EXISTS hive_posts_api_helper_author_s_permlink_idx;
+    PERFORM update_hive_posts_api_helper(NULL, NULL);
+    CREATE INDEX IF NOT EXISTS hive_posts_api_helper_author_s_permlink_idx ON hive_posts_api_helper (author_s_permlink);
+ELSE
+  RAISE NOTICE 'Skipping initial hive_posts_api_helper collection...';
+END IF;
+END
+$BODY$;
+
+COMMIT;
+
+START TRANSACTION;
+DO
+$BODY$
+BEGIN
+IF EXISTS(SELECT * FROM hive_db_data_migration WHERE migration = 'hive_mentions fill') THEN
+  RAISE NOTICE 'Performing initial post body mentions collection...';
+  SET work_mem='2GB';
+  DROP INDEX IF EXISTS hive_mentions_block_num_idx;
+  PERFORM update_hive_posts_mentions(0, (SELECT hb.num FROM hive_blocks hb ORDER BY hb.num DESC LIMIT 1) );
+  CREATE INDEX IF NOT EXISTS hive_mentions_block_num_idx ON hive_mentions (block_num);
+ELSE
+  RAISE NOTICE 'Skipping initial post body mentions collection...';
+END IF;
+END
+$BODY$;
+
+COMMIT;
+
+START TRANSACTION;
+
+DO
+$BODY$
+BEGIN
+IF EXISTS (SELECT * FROM hive_db_data_migration WHERE migration = 'update_posts_rshares( 0, head_block_number) execution') THEN
+  RAISE NOTICE 'Performing posts rshares, hot and trend recalculation on range ( 0, head_block_number)...';
+  SET work_mem='2GB';
+  PERFORM update_posts_rshares(0, (SELECT hb.num FROM hive_blocks hb ORDER BY hb.num DESC LIMIT 1) );
+  DELETE FROM hive_db_data_migration WHERE migration = 'update_posts_rshares( 0, head_block_number) execution';
+ELSE
+  RAISE NOTICE 'Skipping update_posts_rshares( 0, head_block_number) recalculation...';
+END IF;
+END
+$BODY$;
+
+COMMIT;
+
+START TRANSACTION;
+DO
+$BODY$
+BEGIN
+IF EXISTS (SELECT * FROM hive_db_data_migration WHERE migration = 'update_hive_posts_children_count execution') THEN
+  RAISE NOTICE 'Performing initial post children count execution ( 0, head_block_number)...';
+  SET work_mem='2GB';
+  update hive_posts set children = 0 where children != 0;
+  PERFORM update_all_hive_posts_children_count();
+  DELETE FROM hive_db_data_migration WHERE migration = 'update_hive_posts_children_count execution';
+ELSE
+  RAISE NOTICE 'Skipping initial post children count execution ( 0, head_block_number) recalculation...';
+END IF;
+END
+$BODY$;
+COMMIT;
+
+START TRANSACTION;
+DO
+$BODY$
+BEGIN
+IF EXISTS (SELECT * FROM hive_db_data_migration WHERE migration = 'update_hive_post_mentions refill execution') THEN
+  RAISE NOTICE 'Performing hive_mentions refill...';
+  SET work_mem='2GB';
+  TRUNCATE TABLE hive_mentions RESTART IDENTITY;
+  PERFORM update_hive_posts_mentions(0, (select max(num) from hive_blocks));
+  DELETE FROM hive_db_data_migration WHERE migration = 'update_hive_post_mentions refill execution';
+ELSE
+  RAISE NOTICE 'Skipping hive_mentions refill...';
+END IF;
+
+END
+$BODY$;
+COMMIT;
+
+START TRANSACTION;
+DO
+$BODY$
+BEGIN
+-- Also covers previous changes at a80c7642a1f3b08997af7e8a9915c13d34b7f0e0
+-- Also covers previous changes at b100db27f37dda3c869c2756d99ab2856f7da9f9
+IF NOT EXISTS (SELECT * FROM hive_db_patch_level WHERE patched_to_revision = 'bd83414409b7624e2413b97a62fa7d97d83edd86')
+THEN
+  RAISE NOTICE 'Performing notification cache initial fill...';
+  SET work_mem='2GB';
+  PERFORM update_notification_cache(NULL, NULL, False);
+  DELETE FROM hive_db_data_migration WHERE migration = 'Notification cache initial fill';
+ELSE
+  RAISE NOTICE 'Skipping notification cache initial fill...';
+END IF;
+
+END
+$BODY$;
+COMMIT;
+
+
+START TRANSACTION;
+
+DO
+$BODY$
+BEGIN
+SET work_mem='2GB';
+IF NOT EXISTS(SELECT * FROM hive_db_patch_level WHERE patched_to_revision = 'cce7fe54a2242b7a80354ee7e50e5b3275a2b039') THEN
+  RAISE NOTICE 'Performing reputation livesync recalculation...';
+  --- reputations have to be recalculated from scratch.
+  UPDATE hive_accounts SET reputation = 0, is_implicit = True;
+  PERFORM update_account_reputations(NULL, NULL, True);
+  INSERT INTO hive_db_vacuum_needed
+  (vacuum_needed)
+  values
+  (True)
+  ;
+ELSE
+  RAISE NOTICE 'Skipping reputation livesync recalculation...';
+END IF;
+END
+$BODY$;
+
+COMMIT;
+
+START TRANSACTION;
+
+DO
+$BODY$
+BEGIN
+SET work_mem='2GB';
+IF NOT EXISTS(SELECT * FROM hive_db_patch_level WHERE patched_to_revision = '33dd5e52673335284c6aa28ee89a069f83bd2dc6') THEN
+  RAISE NOTICE 'Performing reputation data cleanup...';
+  PERFORM truncate_account_reputation_data('30 days'::interval);
+  INSERT INTO hive_db_vacuum_needed
+    (vacuum_needed)
+  values
+    (True)
+  ;
+ELSE
+  RAISE NOTICE 'Skipping reputation data cleanup...';
+END IF;
+END
+$BODY$;
+
+COMMIT;
+
+START TRANSACTION;
+
+TRUNCATE TABLE hive_db_data_migration;
+
+insert into hive_db_patch_level
+(patch_date, patched_to_revision)
+select ds.patch_date, ds.patch_revision
+from
+(
+values
+(now(), '7b8def051be224a5ebc360465f7a1522090c7125'),
+(now(), 'e17bfcb08303cbf07b3ce7d1c435d59a368b4a9e'),
+(now(), '0be8e6e8b2121a8f768113e35e47725856c5da7c'), -- update_hot_and_trending_for_blocks fix, https://gitlab.syncad.com/hive/hivemind/-/merge_requests/247
+(now(), '26c2f1862770178d4575ec09e9f9c225dcf3d206'), -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/252
+(now(), 'e8b65adf22654203f5a79937ff2a95c5c47e10c5'), -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/251
+(now(), '8d0b673e7c40c05d2b8ae74ccf32adcb6b11f906'), -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/265
+-- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/281
+-- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/282
+-- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/257
+-- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/251
+-- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/265
+--
+(now(), '45c2883131472cc14a03fe4e355ba1435020d720'),
+(now(), '7cfc2b90a01b32688075b22a6ab173f210fc770f'), -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/286
+(now(), 'f2e5f656a421eb1dd71328a94a421934eda27a87')  -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/275
+,(now(), '4cdf5d19f6cfcb73d3fa504cac9467c4df31c02e') -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/295
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/294
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/298
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/301
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/297
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/302
+,(now(), '166327bfa87beda588b20bfcfa574389f4100389')
+,(now(), '88e62bdf1fcc47809fec84424cf98c71ce87ca89') -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/310
+,(now(), 'f8ecf376da5e0efef64b79f91e9803eac8b163a4') -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/289
+,(now(), '0e3c8700659d98b45f1f7146dc46a195f905fc2d') -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/306 update posts children count fix
+,(now(), '9e126e9d762755f2b9a0fd68f076c9af6bb73b76') -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/314 mentions fix
+,(now(), '033619277eccea70118a5b8dc0c73b913da0025f') -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/326 https://gitlab.syncad.com/hive/hivemind/-/merge_requests/322 posts rshares recalc
+,(now(), '1847c75702384c7e34c624fc91f24d2ef20df91d') -- latest version of develop containing included changes.
+,(now(), '1f23e1326f3010bc84353aba82d4aa7ff2f999e4') -- hive_posts_author_id_created_at_idx index def. to speedup hive_accounts_info_view.
+,(now(), '2a274e586454968a4f298a855a7e60394ed90bde') -- get_number_of_unread_notifications speedup https://gitlab.syncad.com/hive/hivemind/-/merge_requests/348/diffs
+,(now(), '431fdaead7dcd69e4d2a45e7ce8a3186b8075515') -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/367
+,(now(), 'cc7bb174d40fe1a0e2221d5d7e1c332c344dca34') -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/372
+,(now(), 'cce7fe54a2242b7a80354ee7e50e5b3275a2b039') -- reputation calc at LIVE sync.
+,(now(), '3cb920ec2a3a83911d31d8dd2ec647e2258a19e0') -- Reputation data cleanup https://gitlab.syncad.com/hive/hivemind/-/merge_requests/425
+,(now(), '33dd5e52673335284c6aa28ee89a069f83bd2dc6') -- Post initial sync fixes https://gitlab.syncad.com/hive/hivemind/-/merge_requests/439
+,(now(), 'a80c7642a1f3b08997af7e8a9915c13d34b7f0e0') -- Notification IDs https://gitlab.syncad.com/hive/hivemind/-/merge_requests/445
+,(now(), 'b100db27f37dda3c869c2756d99ab2856f7da9f9') -- hive_notification_cache table supplement https://gitlab.syncad.com/hive/hivemind/-/merge_requests/447
+,(now(), 'bd83414409b7624e2413b97a62fa7d97d83edd86') -- follow notification time is taken from block affecting it  https://gitlab.syncad.com/hive/hivemind/-/merge_requests/449
+,(now(), 'd64c73792175b12684ec74691297782cf67a2093') -- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/452
+) ds (patch_date, patch_revision)
+where not exists (select null from hive_db_patch_level hpl where hpl.patched_to_revision = ds.patch_revision);
+
+COMMIT;
+
+;
diff --git a/hive/db/sql_scripts/upgrade/upgrade_table_schema.sql b/hive/db/sql_scripts/upgrade/upgrade_table_schema.sql
new file mode 100644
index 0000000000000000000000000000000000000000..2840fef0951ef831748f38c36355fe580bad9f1b
--- /dev/null
+++ b/hive/db/sql_scripts/upgrade/upgrade_table_schema.sql
@@ -0,0 +1,461 @@
+do $$
+BEGIN
+   ASSERT EXISTS (SELECT * FROM pg_extension WHERE extname='intarray'), 'The database requires created "intarray" extension';
+END$$;
+
+CREATE TABLE IF NOT EXISTS hive_db_patch_level
+(
+  level SERIAL NOT NULL PRIMARY KEY,
+  patch_date timestamp without time zone NOT NULL,
+  patched_to_revision TEXT
+);
+
+CREATE TABLE IF NOT EXISTS hive_db_data_migration
+(
+  migration varchar(128) not null
+);
+
+CREATE TABLE IF NOT EXISTS hive_db_vacuum_needed
+(
+  vacuum_needed BOOLEAN NOT NULL
+);
+
+TRUNCATE TABLE hive_db_vacuum_needed;
+
+DO $$
+BEGIN
+  EXECUTE 'ALTER DATABASE '||current_database()||' SET join_collapse_limit TO 16';
+  EXECUTE 'ALTER DATABASE '||current_database()||' SET from_collapse_limit TO 16';
+END
+$$;
+
+SHOW join_collapse_limit;
+SHOW from_collapse_limit;
+
+DO
+$BODY$
+BEGIN
+IF NOT EXISTS(SELECT data_type
+              FROM information_schema.columns
+              WHERE table_name = 'hive_accounts' AND column_name = 'is_implicit') THEN
+    RAISE NOTICE 'Performing hive_accounts upgrade - adding new column is_implicit';
+    PERFORM deps_save_and_drop_dependencies('public', 'hive_accounts', true);
+    alter table ONlY hive_accounts
+      add column is_implicit boolean,
+      alter column is_implicit set default True;
+
+    --- reputations have to be recalculated from scratch.
+    update hive_accounts set reputation = 0, is_implicit = True;
+
+    alter table ONlY hive_accounts
+      alter column is_implicit set not null;
+
+    perform deps_restore_dependencies('public', 'hive_accounts');
+
+    INSERT INTO hive_db_data_migration VALUES ('Reputation calculation');
+ELSE
+  RAISE NOTICE 'hive_accounts::is_implicit migration skipped';
+END IF;
+
+IF EXISTS(SELECT data_type
+          FROM information_schema.columns
+          WHERE table_name = 'hive_accounts' AND column_name = 'blacklist_description') THEN
+    RAISE NOTICE 'Performing hive_accounts upgrade - removing columns blacklist_description/muted_list_description';
+    -- drop hive_accounts_info_view since it uses removed column. It will be rebuilt after upgrade
+    DROP VIEW IF EXISTS hive_accounts_info_view;
+
+    PERFORM deps_save_and_drop_dependencies('public', 'hive_accounts', true);
+    ALTER TABLE ONlY hive_accounts
+      DROP COLUMN IF EXISTS blacklist_description,
+      DROP COLUMN IF EXISTS muted_list_description
+      ;
+ELSE
+  RAISE NOTICE 'hive_accounts::blacklist_description/muted_list_description migration skipped';
+END IF;
+
+END
+$BODY$;
+
+DROP TABLE IF EXISTS hive_account_reputation_status;
+
+drop index if exists hive_posts_sc_hot_idx;
+drop index if exists hive_posts_sc_trend_idx;
+drop index if exists hive_reblogs_blogger_id;
+drop index if exists hive_subscriptions_community_idx;
+drop index if exists hive_votes_post_id_idx;
+drop index if exists hive_votes_voter_id_idx;
+drop index if exists hive_votes_last_update_idx;
+drop index if exists hive_posts_community_id_idx;
+
+CREATE INDEX IF NOT EXISTS hive_posts_cashout_time_id_idx ON hive_posts (cashout_time, id);
+CREATE INDEX IF NOT EXISTS hive_posts_updated_at_idx ON hive_posts (updated_at DESC);
+CREATE INDEX IF NOT EXISTS hive_votes_block_num_idx ON hive_votes (block_num);
+CREATE INDEX IF NOT EXISTS hive_posts_community_id_id_idx ON hive_posts (community_id, id DESC);
+
+DO
+$BODY$
+BEGIN
+IF NOT EXISTS(SELECT data_type
+              FROM information_schema.columns
+              WHERE table_name = 'hive_posts_api_helper' AND column_name = 'author_s_permlink') THEN
+    RAISE NOTICE 'Performing hive_posts_api_helper upgrade - adding new column author_s_permlink';
+    PERFORM deps_save_and_drop_dependencies('public', 'hive_posts_api_helper', true);
+
+    DROP INDEX IF EXISTS hive_posts_api_helper_parent_permlink_or_category;
+    DROP TABLE IF EXISTS hive_posts_api_helper;
+
+    CREATE TABLE public.hive_posts_api_helper
+    (
+        id integer NOT NULL,
+        author_s_permlink character varying(275) COLLATE pg_catalog."C" NOT NULL,
+        CONSTRAINT hive_posts_api_helper_pkey PRIMARY KEY (id)
+    );
+
+    perform deps_restore_dependencies('public', 'hive_posts_api_helper');
+
+    CREATE INDEX IF NOT EXISTS hive_posts_api_helper_author_s_permlink_idx ON hive_posts_api_helper (author_s_permlink);
+
+    INSERT INTO hive_db_data_migration VALUES ('hive_posts_api_helper fill');
+ELSE
+  RAISE NOTICE 'hive_posts_api_helper migration skipped';
+END IF;
+END
+$BODY$
+;
+
+DO
+$BODY$
+BEGIN
+IF NOT EXISTS(SELECT data_type
+              FROM information_schema.columns
+              WHERE table_name = 'hive_mentions' AND column_name = 'block_num') THEN
+  RAISE NOTICE 'Performing hive_mentions upgrade - adding new column block_num';
+
+  TRUNCATE TABLE public.hive_mentions RESTART IDENTITY;
+  PERFORM deps_save_and_drop_dependencies('public', 'hive_mentions', true);
+
+  ALTER TABLE hive_mentions
+    DROP CONSTRAINT IF EXISTS hive_mentions_pk,
+    ADD COLUMN IF NOT EXISTS id SERIAL,
+    ADD COLUMN  IF NOT EXISTS block_num INTEGER,
+    ALTER COLUMN id SET NOT NULL,
+    ALTER COLUMN block_num SET NOT NULL,
+    ADD CONSTRAINT hive_mentions_pk PRIMARY KEY (id);
+
+  perform deps_restore_dependencies('public', 'hive_mentions');
+
+  INSERT INTO hive_db_data_migration VALUES ('hive_mentions fill');
+END IF;
+END
+$BODY$
+;
+
+---------------------------------------------------------------------------------------------------
+--                                hive_posts table migration
+---------------------------------------------------------------------------------------------------
+
+DO
+$BODY$
+BEGIN
+IF EXISTS(SELECT data_type
+              FROM information_schema.columns
+              WHERE table_name = 'hive_posts' AND column_name = 'is_grayed') THEN
+  RAISE NOTICE 'Performing hive_posts upgrade - dropping is_grayed column';
+
+  --- Warning we need to first drop hive_posts view since it references column is_grayed, which will be dropped.
+  --- Saving it in the dependencies, will restore wrong (old) definition of the view and make an error.
+  DROP VIEW IF EXISTS hive_posts_view CASCADE;
+
+  PERFORM deps_save_and_drop_dependencies('public', 'hive_posts', true);
+
+  ALTER TABLE hive_posts
+    DROP COLUMN IF EXISTS is_grayed;
+
+  perform deps_restore_dependencies('public', 'hive_posts');
+ELSE
+  RAISE NOTICE 'hive_posts upgrade - SKIPPED dropping is_grayed column';
+END IF;
+
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/322
+IF EXISTS(SELECT data_type FROM information_schema.columns
+          WHERE table_name = 'hive_posts' AND column_name = 'abs_rshares' AND data_type = 'bigint') AND
+   EXISTS(SELECT data_type FROM information_schema.columns
+          WHERE table_name = 'hive_posts' AND column_name = 'vote_rshares' AND data_type = 'bigint') AND
+   NOT EXISTS (SELECT data_type FROM information_schema.columns
+               WHERE table_name = 'hive_posts' AND column_name = 'block_num_created') THEN
+  RAISE NOTICE 'Performing hive_posts upgrade - adding block_num_created column, type change for abs_rshares/vote_rshares columns';
+
+  PERFORM deps_save_and_drop_dependencies('public', 'hive_posts', true);
+
+  ALTER TABLE ONLY hive_posts
+    ALTER COLUMN abs_rshares SET DATA TYPE NUMERIC,
+    ALTER COLUMN vote_rshares SET DATA TYPE NUMERIC,
+    ADD COLUMN block_num_created INTEGER;
+
+    UPDATE hive_posts SET block_num_created = 1; -- Artificial number, since we don't have this info atm, it requires full sync
+
+    ALTER TABLE ONLY hive_posts
+      ALTER COLUMN block_num_created set not null;
+
+  perform deps_restore_dependencies('public', 'hive_posts');
+ELSE
+  RAISE NOTICE 'SKIPPING hive_posts upgrade - adding a block_num_created column, type change for abs_rshares/vote_rshares columns';
+END IF;
+
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/367
+IF NOT EXISTS (SELECT data_type FROM information_schema.columns
+               WHERE table_name = 'hive_posts' AND column_name = 'total_votes')
+   AND NOT EXISTS (SELECT data_type FROM information_schema.columns
+                 WHERE table_name = 'hive_posts' AND column_name = 'net_votes') THEN
+  RAISE NOTICE 'Performing hive_posts upgrade - adding total_votes and net_votes columns';
+
+  PERFORM deps_save_and_drop_dependencies('public', 'hive_posts', true);
+
+  ALTER TABLE ONLY hive_posts
+    ADD COLUMN total_votes BIGINT,
+    ADD COLUMN net_votes BIGINT;
+
+  UPDATE hive_posts SET total_votes = 0, net_votes = 0; -- Artificial number, requires to start update_posts_rshares for all blocks
+
+  ALTER TABLE ONLY hive_posts
+    ALTER COLUMN total_votes SET NOT NULL,
+    ALTER COLUMN total_votes SET DEFAULT 0,
+    ALTER COLUMN net_votes SET NOT NULL,
+    ALTER COLUMN net_votes SET DEFAULT 0;
+
+  PERFORM deps_restore_dependencies('public', 'hive_posts');
+ELSE
+  RAISE NOTICE 'SKIPPING hive_posts upgrade - adding total_votes and net_votes columns';
+END IF;
+
+IF NOT EXISTS(SELECT data_type FROM information_schema.columns
+          WHERE table_name = 'hive_posts' AND column_name = 'tags_ids') THEN
+    ALTER TABLE ONLY hive_posts
+            ADD COLUMN tags_ids INTEGER[];
+
+    UPDATE hive_posts hp
+    SET
+        tags_ids = tags.tags
+    FROM
+    (
+      SELECT
+          post_id as post_id,
+          array_agg( hpt.tag_id ) as tags
+      FROM
+        hive_post_tags hpt
+      GROUP BY post_id
+    ) as tags
+    WHERE hp.id = tags.post_id;
+ELSE
+    RAISE NOTICE 'SKIPPING hive_posts upgrade - adding a tags_ids column';
+END IF;
+
+END
+
+$BODY$
+;
+
+DROP INDEX IF EXISTS hive_posts_created_at_idx;
+-- skip it since it is dropped below.
+-- CREATE INDEX IF NOT EXISTS hive_posts_created_at_author_id_idx ON hive_posts (created_at, author_id);
+
+CREATE INDEX IF NOT EXISTS hive_posts_block_num_created_idx ON hive_posts (block_num_created);
+
+DROP INDEX IF EXISTS hive_mentions_post_id_idx;
+
+-- updated up to 7b8def051be224a5ebc360465f7a1522090c7125
+-- updated up to 033619277eccea70118a5b8dc0c73b913da0025f
+INSERT INTO hive_db_data_migration
+select 'update_posts_rshares( 0, head_block_number) execution'
+where not exists (select null from hive_db_patch_level where patched_to_revision = '431fdaead7dcd69e4d2a45e7ce8a3186b8075515')
+;
+
+-- updated to e8b65adf22654203f5a79937ff2a95c5c47e10c5 - See merge request hive/hivemind!251
+
+-- COMMENTED OUT DUE TO MRs:processed below.
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/298
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/302
+--CREATE INDEX IF NOT EXISTS hive_posts_is_paidout_idx ON hive_posts (is_paidout);
+--CREATE INDEX IF NOT EXISTS hive_posts_payout_plus_pending_payout_id ON hive_posts ((payout+pending_payout), id);
+
+INSERT INTO hive_tag_data (id, tag) VALUES (0, '')
+ON CONFLICT DO NOTHING;
+
+--- updated to f2e5f656a421eb1dd71328a94a421934eda27a87 - See MR https://gitlab.syncad.com/hive/hivemind/-/merge_requests/275
+DO
+$BODY$
+BEGIN
+IF NOT EXISTS(SELECT data_type
+              FROM information_schema.columns
+              WHERE table_name = 'hive_follows' AND column_name = 'follow_muted') THEN
+    RAISE NOTICE 'Performing hive_follows upgrade - adding new column follow_muted';
+    PERFORM deps_save_and_drop_dependencies('public', 'hive_follows', true);
+    alter table ONLY hive_follows
+      add column follow_muted boolean,
+      alter column follow_muted set default False;
+
+    --- Fill the default value for all existing records.
+    update hive_follows set follow_muted = False;
+
+    alter table ONlY hive_follows
+      alter column follow_muted set not null;
+
+    perform deps_restore_dependencies('public', 'hive_follows');
+ELSE
+  RAISE NOTICE 'hive_follows::follow_muted migration skipped';
+END IF;
+
+END
+$BODY$;
+
+--- 4cdf5d19f6cfcb73d3fa504cac9467c4df31c02e - https://gitlab.syncad.com/hive/hivemind/-/merge_requests/295
+--- 9e126e9d762755f2b9a0fd68f076c9af6bb73b76 - https://gitlab.syncad.com/hive/hivemind/-/merge_requests/314 mentions fix
+INSERT INTO hive_db_data_migration
+select 'update_hive_post_mentions refill execution'
+where not exists (select null from hive_db_patch_level where patched_to_revision = '9e126e9d762755f2b9a0fd68f076c9af6bb73b76' )
+;
+
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/298
+
+DROP INDEX IF EXISTS hive_posts_is_paidout_idx;
+DROP INDEX IF EXISTS hive_posts_sc_trend_id_idx;
+DROP INDEX IF EXISTS hive_posts_sc_hot_id_idx;
+
+--- Commented out as it is dropped below.
+--- CREATE INDEX IF NOT EXISTS hive_posts_sc_trend_id_is_paidout_idx ON hive_posts(sc_trend, id, is_paidout );
+
+--- Commented out as it is dropped below.
+--- CREATE INDEX IF NOT EXISTS hive_posts_sc_hot_id_is_paidout_idx ON hive_posts(sc_hot, id, is_paidout );
+
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/302
+
+DROP INDEX IF EXISTS hive_posts_payout_plus_pending_payout_id;
+--- Commented out as dropped below.
+--- CREATE INDEX IF NOT EXISTS hive_posts_payout_plus_pending_payout_id_is_paidout_idx ON hive_posts ((payout+pending_payout), id, is_paidout);
+
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/310
+
+CREATE INDEX IF NOT EXISTS hive_votes_voter_id_last_update_idx ON hive_votes (voter_id, last_update);
+
+--- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/306 update posts children count fix
+--- 0e3c8700659d98b45f1f7146dc46a195f905fc2d
+INSERT INTO hive_db_data_migration
+select 'update_hive_posts_children_count execution'
+where not exists (select null from hive_db_patch_level where patched_to_revision = '0e3c8700659d98b45f1f7146dc46a195f905fc2d' )
+;
+
+-- https://gitlab.syncad.com/hive/hivemind/-/merge_requests/372
+INSERT INTO hive_db_data_migration
+select 'Notification cache initial fill'
+where not exists (select null from hive_db_patch_level where patched_to_revision = 'cc7bb174d40fe1a0e2221d5d7e1c332c344dca34' )
+;
+
+--- 1847c75702384c7e34c624fc91f24d2ef20df91d latest version of develop included in this migration script.
+
+--- Rename hive_votes_ux1 unique constraint to the hive_votes_voter_id_author_id_permlink_id_uk
+DO $$
+BEGIN
+IF EXISTS (SELECT * FROM pg_constraint WHERE conname='hive_votes_ux1') THEN
+  RAISE NOTICE 'Attempting to rename hive_votes_ux1 to hive_votes_voter_id_author_id_permlink_id_uk...';
+  ALTER TABLE hive_votes RENAME CONSTRAINT hive_votes_ux1 to hive_votes_voter_id_author_id_permlink_id_uk;
+END IF;
+END
+$$
+;
+
+--- Change definition of index hive_posts_created_at_author_id_idx to hive_posts_author_id_created_at_idx to improve hive_accounts_info_view performance.
+DROP INDEX IF EXISTS public.hive_posts_created_at_author_id_idx;
+
+CREATE INDEX IF NOT EXISTS hive_posts_author_id_created_at_idx ON public.hive_posts ( author_id DESC, created_at DESC);
+
+CREATE INDEX IF NOT EXISTS hive_blocks_created_at_idx ON hive_blocks (created_at);
+
+--- Notification cache to significantly speedup notification APIs.
+CREATE TABLE IF NOT EXISTS hive_notification_cache
+(
+  id BIGINT NOT NULL DEFAULT nextval('hive_notification_cache_id_seq'::regclass),
+  block_num INT NOT NULL,
+  type_id INT NOT NULL,
+  dst INT NULL,
+  src INT NULL,
+  dst_post_id INT NULL,
+  post_id INT NULL,
+  score INT NOT NULL,
+  created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL,
+  community_title VARCHAR(32) NULL,
+  community VARCHAR(16) NULL,
+  payload VARCHAR NULL,
+
+  CONSTRAINT hive_notification_cache_pk PRIMARY KEY (id)
+);
+
+CREATE INDEX IF NOT EXISTS hive_notification_cache_block_num_idx ON hive_notification_cache (block_num);
+CREATE INDEX IF NOT EXISTS hive_notification_cache_dst_score_idx ON hive_notification_cache (dst, score) WHERE dst IS NOT NULL;
+
+CREATE INDEX IF NOT EXISTS hive_feed_cache_block_num_idx on hive_feed_cache (block_num);
+CREATE INDEX IF NOT EXISTS hive_feed_cache_created_at_idx on hive_feed_cache (created_at);
+
+--- condenser_get_trending_tags optimizations and slight index improvements.
+
+DROP INDEX IF EXISTS hive_posts_category_id_idx;
+
+CREATE INDEX IF NOT EXISTS hive_posts_category_id_payout_plus_pending_payout_depth_idx ON hive_posts (category_id, (payout + pending_payout), depth)
+  WHERE NOT is_paidout AND counter_deleted = 0;
+
+DROP INDEX IF EXISTS hive_posts_sc_trend_id_is_paidout_idx;
+
+CREATE INDEX IF NOT EXISTS hive_posts_sc_trend_id_idx ON hive_posts USING btree (sc_trend, id)
+  WHERE NOT is_paidout AND counter_deleted = 0 AND depth = 0
+;
+
+DROP INDEX IF EXISTS hive_posts_sc_hot_id_is_paidout_idx;
+
+CREATE INDEX IF NOT EXISTS hive_posts_sc_hot_id_idx ON hive_posts (sc_hot, id)
+  WHERE NOT is_paidout AND counter_deleted = 0 AND depth = 0
+  ;
+
+DROP INDEX IF EXISTS hive_posts_payout_plus_pending_payout_id_is_paidout_idx;
+
+CREATE INDEX IF NOT EXISTS hive_posts_payout_plus_pending_payout_id_idx ON hive_posts ((payout + pending_payout), id)
+  WHERE counter_deleted = 0 AND NOT is_paidout
+;
+
+DROP INDEX IF EXISTS hive_posts_promoted_idx;
+
+CREATE INDEX IF NOT EXISTS hive_posts_promoted_id_idx ON hive_posts (promoted, id)
+  WHERE NOT is_paidout AND counter_deleted = 0
+ ;
+
+
+ CREATE INDEX IF NOT EXISTS hive_posts_tags_ids_idx ON hive_posts USING gin(tags_ids gin__int_ops);
+
+ --DROP TABLE IF EXISTS hive_post_tags;
+
+
+CREATE SEQUENCE IF NOT EXISTS hive_notification_cache_id_seq
+    INCREMENT 1
+    START 1
+    MINVALUE 1
+    MAXVALUE 9223372036854775807
+    CACHE 1
+    ;
+
+ALTER TABLE hive_notification_cache
+  ALTER COLUMN id SET DEFAULT nextval('hive_notification_cache_id_seq'::regclass);
+
+ -- Changes done in https://gitlab.syncad.com/hive/hivemind/-/merge_requests/452
+ DROP INDEX IF EXISTS hive_posts_parent_id_idx;
+
+ CREATE INDEX IF NOT EXISTS hive_posts_parent_id_counter_deleted_id_idx ON hive_posts (parent_id, counter_deleted, id);
+
+ DROP INDEX IF EXISTS hive_posts_author_id_created_at_idx;
+
+ CREATE INDEX IF NOT EXISTS hive_posts_author_id_created_at_id_idx ON hive_posts (author_id DESC, created_at DESC, id);
+
+ DROP INDEX IF EXISTS hive_posts_author_posts_idx;
+
+ CREATE INDEX IF NOT EXISTS hive_posts_author_id_id_idx ON hive_posts (author_id, id)
+ WHERE depth = 0;
+
+ CREATE INDEX IF NOT EXISTS hive_feed_cache_post_id_idx ON hive_feed_cache (post_id);
+
diff --git a/hive/db/sql_scripts/utility_functions.sql b/hive/db/sql_scripts/utility_functions.sql
new file mode 100644
index 0000000000000000000000000000000000000000..ae326d9a0b436e81d6683c11d9e5dc144890c537
--- /dev/null
+++ b/hive/db/sql_scripts/utility_functions.sql
@@ -0,0 +1,150 @@
+DROP FUNCTION IF EXISTS public.max_time_stamp() CASCADE;
+CREATE OR REPLACE FUNCTION public.max_time_stamp( _first TIMESTAMP, _second TIMESTAMP )
+RETURNS TIMESTAMP
+LANGUAGE 'plpgsql'
+IMMUTABLE
+AS $BODY$
+BEGIN
+  IF _first > _second THEN
+        RETURN _first;
+    ELSE
+        RETURN _second;
+    END IF;
+END
+$BODY$;
+
+DROP FUNCTION IF EXISTS find_comment_id(character varying, character varying, boolean)
+;
+CREATE OR REPLACE FUNCTION find_comment_id(
+  in _author hive_accounts.name%TYPE,
+  in _permlink hive_permlink_data.permlink%TYPE,
+  in _check boolean)
+RETURNS INT
+LANGUAGE 'plpgsql'
+AS
+$function$
+DECLARE
+  __post_id INT = 0;
+BEGIN
+  IF (_author <> '' OR _permlink <> '') THEN
+    SELECT INTO __post_id COALESCE( (
+      SELECT hp.id
+      FROM hive_posts hp
+      JOIN hive_accounts ha ON ha.id = hp.author_id
+      JOIN hive_permlink_data hpd ON hpd.id = hp.permlink_id
+      WHERE ha.name = _author AND hpd.permlink = _permlink AND hp.counter_deleted = 0
+    ), 0 );
+    IF _check AND __post_id = 0 THEN
+      SELECT INTO __post_id (
+        SELECT COUNT(hp.id)
+        FROM hive_posts hp
+        JOIN hive_accounts ha ON ha.id = hp.author_id
+        JOIN hive_permlink_data hpd ON hpd.id = hp.permlink_id
+        WHERE ha.name = _author AND hpd.permlink = _permlink
+      );
+      IF __post_id = 0 THEN
+        RAISE EXCEPTION 'Post %/% does not exist', _author, _permlink;
+      ELSE
+        RAISE EXCEPTION 'Post %/% was deleted % time(s)', _author, _permlink, __post_id;
+      END IF;
+    END IF;
+  END IF;
+  RETURN __post_id;
+END
+$function$
+;
+
+DROP FUNCTION IF EXISTS find_account_id(character varying, boolean)
+;
+CREATE OR REPLACE FUNCTION find_account_id(
+  in _account hive_accounts.name%TYPE,
+  in _check boolean)
+RETURNS INT
+LANGUAGE 'plpgsql'
+AS
+$function$
+DECLARE
+  __account_id INT = 0;
+BEGIN
+  IF (_account <> '') THEN
+    SELECT INTO __account_id COALESCE( ( SELECT id FROM hive_accounts WHERE name=_account ), 0 );
+    IF _check AND __account_id = 0 THEN
+      RAISE EXCEPTION 'Account % does not exist', _account;
+    END IF;
+  END IF;
+  RETURN __account_id;
+END
+$function$
+;
+
+DROP FUNCTION IF EXISTS public.find_tag_id CASCADE
+;
+CREATE OR REPLACE FUNCTION public.find_tag_id(
+    in _tag_name hive_tag_data.tag%TYPE,
+    in _check BOOLEAN
+)
+RETURNS INTEGER
+LANGUAGE 'plpgsql' STABLE
+AS
+$function$
+DECLARE
+  __tag_id INT = 0;
+BEGIN
+  IF (_tag_name <> '') THEN
+    SELECT INTO __tag_id COALESCE( ( SELECT id FROM hive_tag_data WHERE tag=_tag_name ), 0 );
+    IF _check AND __tag_id = 0 THEN
+      RAISE EXCEPTION 'Tag % does not exist', _tag_name;
+    END IF;
+  END IF;
+  RETURN __tag_id;
+END
+$function$
+;
+
+DROP FUNCTION IF EXISTS public.find_category_id CASCADE
+;
+CREATE OR REPLACE FUNCTION public.find_category_id(
+    in _category_name hive_category_data.category%TYPE,
+    in _check BOOLEAN
+)
+RETURNS INTEGER
+LANGUAGE 'plpgsql' STABLE
+AS
+$function$
+DECLARE
+  __category_id INT = 0;
+BEGIN
+  IF (_category_name <> '') THEN
+    SELECT INTO __category_id COALESCE( ( SELECT id FROM hive_category_data WHERE category=_category_name ), 0 );
+    IF _check AND __category_id = 0 THEN
+      RAISE EXCEPTION 'Category % does not exist', _category_name;
+    END IF;
+  END IF;
+  RETURN __category_id;
+END
+$function$
+;
+
+DROP FUNCTION IF EXISTS public.find_community_id CASCADE
+;
+CREATE OR REPLACE FUNCTION public.find_community_id(
+    in _community_name hive_communities.name%TYPE,
+    in _check BOOLEAN
+)
+RETURNS INTEGER
+LANGUAGE 'plpgsql' STABLE
+AS
+$function$
+DECLARE
+  __community_id INT = 0;
+BEGIN
+  IF (_community_name <> '') THEN
+    SELECT INTO __community_id COALESCE( ( SELECT id FROM hive_communities WHERE name=_community_name ), 0 );
+    IF _check AND __community_id = 0 THEN
+      RAISE EXCEPTION 'Community % does not exist', _community_name;
+    END IF;
+  END IF;
+  RETURN __community_id;
+END
+$function$
+;
diff --git a/hive/indexer/accounts.py b/hive/indexer/accounts.py
index 76fa2b1f1f83c948a9965adceca088eb8fa7130f..0aefcbe314c4f65b391e675ef6a5fa9710c152cb 100644
--- a/hive/indexer/accounts.py
+++ b/hive/indexer/accounts.py
@@ -2,36 +2,52 @@
 
 import logging
 
-from datetime import datetime
-from toolz import partition_all
-
-import ujson as json
-
 from hive.db.adapter import Db
-from hive.utils.normalize import rep_log10, vests_amount
-from hive.utils.timer import Timer
-from hive.utils.account import safe_profile_metadata
-from hive.utils.unique_fifo import UniqueFIFO
+from hive.utils.account import get_profile_str
+
+from hive.indexer.db_adapter_holder import DbAdapterHolder
+from hive.utils.normalize import escape_characters
 
 log = logging.getLogger(__name__)
 
 DB = Db.instance()
 
-class Accounts:
+class Accounts(DbAdapterHolder):
     """Manages account id map, dirty queue, and `hive_accounts` table."""
 
+    _updates_data = {}
+
+    inside_flush = False
+
     # name->id map
     _ids = {}
 
-    # fifo queue
-    _dirty = UniqueFIFO()
-
     # in-mem id->rank map
     _ranks = {}
 
     # account core methods
     # --------------------
 
+    @classmethod
+    def update_op(cls, update_operation, allow_change_posting):
+        """Save json_metadata."""
+
+        if cls.inside_flush:
+            log.exception("Adding new update-account-info into '_updates_data' dict")
+            raise RuntimeError("Fatal error")
+
+        key = update_operation['account']
+        ( _posting_json_metadata, _json_metadata ) = get_profile_str( update_operation )
+
+        if key in cls._updates_data:
+            if allow_change_posting:
+                cls._updates_data[key]['allow_change_posting'] = True
+                cls._updates_data[key]['posting_json_metadata'] = _posting_json_metadata
+
+            cls._updates_data[key]['json_metadata'] = _json_metadata
+        else:
+            cls._updates_data[key] = { 'allow_change_posting' : allow_change_posting, 'posting_json_metadata' : _posting_json_metadata, 'json_metadata' : _json_metadata }
+
     @classmethod
     def load_ids(cls):
         """Load a full (name: id) dict into memory."""
@@ -59,18 +75,29 @@ class Accounts:
     def get_id(cls, name):
         """Get account id by name. Throw if not found."""
         assert isinstance(name, str), "account name should be string"
-        assert name in cls._ids, "account does not exist or was not registered"
+        assert name in cls._ids, 'Account \'%s\' does not exist' % name
         return cls._ids[name]
 
     @classmethod
-    def exists(cls, name):
+    def exists(cls, names):
         """Check if an account name exists."""
-        if isinstance(name, str):
-            return name in cls._ids
+        if isinstance(names, str):
+            return names in cls._ids
         return False
 
     @classmethod
-    def register(cls, names, block_date):
+    def check_names(cls, names):
+        """ Check which names from name list does not exists in the database """
+        assert isinstance(names, list), "Expecting list as argument"
+        return [name for name in names if name not in cls._ids]
+
+    @classmethod
+    def get_json_data(cls, source ):
+        """json-data preprocessing."""
+        return escape_characters( source )
+
+    @classmethod
+    def register(cls, name, op_details, block_date, block_num):
         """Block processing: register "candidate" names.
 
         There are four ops which can result in account creation:
@@ -79,148 +106,92 @@ class Accounts:
         the account they name does not already exist!
         """
 
-        # filter out names which already registered
-        new_names = list(filter(lambda n: not cls.exists(n), set(names)))
-        if not new_names:
+        if name is None:
             return
 
-        for name in new_names:
-            DB.query("INSERT INTO hive_accounts (name, created_at) "
-                     "VALUES (:name, :date)", name=name, date=block_date)
-
-        # pull newly-inserted ids and merge into our map
-        sql = "SELECT name, id FROM hive_accounts WHERE name IN :names"
-        for name, _id in DB.query_all(sql, names=tuple(new_names)):
-            cls._ids[name] = _id
-
-        # post-insert: pass to communities to check for new registrations
-        from hive.indexer.community import Community, START_DATE
-        if block_date > START_DATE:
-            Community.register(new_names, block_date)
-
-    # account cache methods
-    # ---------------------
-
-    @classmethod
-    def dirty(cls, account):
-        """Marks given account as needing an update."""
-        return cls._dirty.add(account)
-
-    @classmethod
-    def dirty_set(cls, accounts):
-        """Marks given accounts as needing an update."""
-        return cls._dirty.extend(accounts)
-
-    @classmethod
-    def dirty_all(cls):
-        """Marks all accounts as dirty. Use to rebuild entire table."""
-        cls.dirty(set(DB.query_col("SELECT name FROM hive_accounts")))
-
-    @classmethod
-    def dirty_oldest(cls, limit=50000):
-        """Flag `limit` least-recently updated accounts for update."""
-        sql = "SELECT name FROM hive_accounts ORDER BY cached_at LIMIT :limit"
-        return cls.dirty_set(set(DB.query_col(sql, limit=limit)))
-
-    @classmethod
-    def flush(cls, steem, trx=False, spread=1):
-        """Process all accounts flagged for update.
-
-         - trx: bool - wrap the update in a transaction
-         - spread: int - spread writes over a period of `n` calls
-        """
-        accounts = cls._dirty.shift_portion(spread)
-
-        count = len(accounts)
-        if not count:
-            return 0
-
-        if trx:
-            log.info("[SYNC] update %d accounts", count)
-
-        cls._cache_accounts(accounts, steem, trx=trx)
-        return count
-
-    @classmethod
-    def fetch_ranks(cls):
-        """Rebuild account ranks and store in memory for next update."""
-        sql = "SELECT id FROM hive_accounts ORDER BY vote_weight DESC"
-        for rank, _id in enumerate(DB.query_col(sql)):
-            cls._ranks[_id] = rank + 1
+        # filter out names which already registered
+        if cls.exists(name):
+            return
 
-    @classmethod
-    def _cache_accounts(cls, accounts, steem, trx=True):
-        """Fetch all `accounts` and write to db."""
-        timer = Timer(len(accounts), 'account', ['rps', 'wps'])
-        for name_batch in partition_all(1000, accounts):
-            cached_at = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
+        ( _posting_json_metadata, _json_metadata ) = get_profile_str( op_details )
 
-            timer.batch_start()
-            batch = steem.get_accounts(name_batch)
+        sql = """
+                  INSERT INTO hive_accounts (name, created_at, posting_json_metadata, json_metadata )
+                  VALUES ( '{}', '{}', {}, {} )
+                  RETURNING id
+              """.format( name, block_date, cls.get_json_data( _posting_json_metadata ), cls.get_json_data( _json_metadata ) )
 
-            timer.batch_lap()
-            sqls = [cls._sql(acct, cached_at) for acct in batch]
-            DB.batch_queries(sqls, trx)
+        cls._ids[name] = DB.query_one( sql )
 
-            timer.batch_finish(len(batch))
-            if trx or len(accounts) > 1000:
-                log.info(timer.batch_status())
+        # post-insert: pass to communities to check for new registrations
+        from hive.indexer.community import Community
+        if block_num > Community.start_block:
+            Community.register(name, block_date, block_num)
 
     @classmethod
-    def _sql(cls, account, cached_at):
-        """Prepare a SQL query from a steemd account."""
-        vests = vests_amount(account['vesting_shares'])
-
-        vote_weight = (vests
-                       + vests_amount(account['received_vesting_shares'])
-                       - vests_amount(account['delegated_vesting_shares']))
-
-        proxy_weight = 0 if account['proxy'] else float(vests)
-        for satoshis in account['proxied_vsf_votes']:
-            proxy_weight += float(satoshis) / 1e6
-
-        # remove empty keys
-        useless = ['transfer_history', 'market_history', 'post_history',
-                   'vote_history', 'other_history', 'tags_usage',
-                   'guest_bloggers']
-        for key in useless:
-            del account[key]
-
-        # pull out valid profile md and delete the key
-        profile = safe_profile_metadata(account)
-        del account['json_metadata']
-        del account['posting_json_metadata']
-
-        active_at = max(account['created'],
-                        account['last_account_update'],
-                        account['last_post'],
-                        account['last_root_post'],
-                        account['last_vote_time'])
-
-        values = {
-            'name':         account['name'],
-            'created_at':   account['created'],
-            'proxy':        account['proxy'],
-            'post_count':   account['post_count'],
-            'reputation':   rep_log10(account['reputation']),
-            'proxy_weight': proxy_weight,
-            'vote_weight':  vote_weight,
-            'active_at':    active_at,
-            'cached_at':    cached_at,
-
-            'display_name':  profile['name'],
-            'about':         profile['about'],
-            'location':      profile['location'],
-            'website':       profile['website'],
-            'profile_image': profile['profile_image'],
-            'cover_image':   profile['cover_image'],
-
-            'raw_json': json.dumps(account)}
-
-        # update rank field, if present
-        _id = cls.get_id(account['name'])
-        if _id in cls._ranks:
-            values['rank'] = cls._ranks[_id]
-
-        bind = ', '.join([k+" = :"+k for k in list(values.keys())][1:])
-        return ("UPDATE hive_accounts SET %s WHERE name = :name" % bind, values)
+    def flush(cls):
+        """ Flush json_metadatafrom cache to database """
+
+        cls.inside_flush = True
+        n = 0
+
+        if cls._updates_data:
+            cls.beginTx()
+
+            sql = """
+                    UPDATE hive_accounts ha
+                    SET
+                    posting_json_metadata = 
+                            (
+                                CASE T2.allow_change_posting
+                                    WHEN True THEN T2.posting_json_metadata
+                                    ELSE ha.posting_json_metadata
+                                END
+                            ),
+                    json_metadata = T2.json_metadata
+                    FROM
+                    (
+                      SELECT
+                        allow_change_posting,
+                        posting_json_metadata,
+                        json_metadata,
+                        name
+                      FROM
+                      (
+                      VALUES
+                        -- allow_change_posting, posting_json_metadata, json_metadata, name
+                        {}
+                      )T( allow_change_posting, posting_json_metadata, json_metadata, name )
+                    )T2
+                    WHERE ha.name = T2.name
+                """
+
+            values = []
+            values_limit = 1000
+
+            for name, data in cls._updates_data.items():
+                values.append("({}, {}, {}, '{}')".format(
+                  data['allow_change_posting'],
+                  cls.get_json_data( data['posting_json_metadata'] ),
+                  cls.get_json_data( data['json_metadata'] ),
+                  name))
+
+                if len(values) >= values_limit:
+                    values_str = ','.join(values)
+                    actual_query = sql.format(values_str)
+                    cls.db.query(actual_query)
+                    values.clear()
+
+            if len(values) > 0:
+                values_str = ','.join(values)
+                actual_query = sql.format(values_str)
+                cls.db.query(actual_query)
+                values.clear()
+
+            n = len(cls._updates_data)
+            cls._updates_data.clear()
+            cls.commitTx()
+
+        cls.inside_flush = False
+
+        return n
diff --git a/hive/indexer/blocks.py b/hive/indexer/blocks.py
index 58af5b48ffa431036cbd6b57f887e73ec428f971..44b3c6f0d83f6cfcbcf3def15ea6668ae279fcb5 100644
--- a/hive/indexer/blocks.py
+++ b/hive/indexer/blocks.py
@@ -1,22 +1,79 @@
 """Blocks processor."""
 
 import logging
-
+import concurrent
+from time import perf_counter
+from concurrent.futures import ThreadPoolExecutor
 from hive.db.adapter import Db
 
 from hive.indexer.accounts import Accounts
 from hive.indexer.posts import Posts
-from hive.indexer.cached_post import CachedPost
 from hive.indexer.custom_op import CustomOp
 from hive.indexer.payments import Payments
 from hive.indexer.follow import Follow
+from hive.indexer.votes import Votes
+from hive.indexer.post_data_cache import PostDataCache
+from hive.indexer.reputations import Reputations
+from hive.indexer.reblog import Reblog
+from hive.indexer.notify import Notify
+
+from hive.utils.stats import OPStatusManager as OPSM
+from hive.utils.stats import FlushStatusManager as FSM
+from hive.utils.post_active import update_active_starting_from_posts_on_block
+
+from hive.server.common.payout_stats import PayoutStats
+from hive.server.common.mentions import Mentions
+from hive.utils.timer import time_it
 
 log = logging.getLogger(__name__)
 
 DB = Db.instance()
 
+def time_collector(f):
+    startTime = FSM.start()
+    result = f()
+    elapsedTime = FSM.stop(startTime)
+
+    return (result, elapsedTime)
+
 class Blocks:
     """Processes blocks, dispatches work, manages `hive_blocks` table."""
+    blocks_to_flush = []
+    _head_block_date = None
+    _current_block_date = None
+
+    _concurrent_flush = [
+      ('Posts', Posts.flush, Posts),
+      ('PostDataCache', PostDataCache.flush, PostDataCache),
+      ('Reputations', Reputations.flush, Reputations),
+      ('Votes', Votes.flush, Votes),
+      ('Follow', Follow.flush, Follow),
+      ('Reblog', Reblog.flush, Reblog),
+      ('Notify', Notify.flush, Notify),
+      ('Accounts', Accounts.flush, Accounts)
+    ]
+
+    def __init__(cls):
+        head_date = cls.head_date()
+        if head_date == '':
+            cls._head_block_date = None
+            cls._current_block_date = None
+        else:
+            cls._head_block_date = head_date
+            cls._current_block_date = head_date
+
+    @classmethod
+    def setup_own_db_access(cls, sharedDbAdapter):
+        PostDataCache.setup_own_db_access(sharedDbAdapter)
+        Reputations.setup_own_db_access(sharedDbAdapter)
+        Votes.setup_own_db_access(sharedDbAdapter)
+        Follow.setup_own_db_access(sharedDbAdapter)
+        Posts.setup_own_db_access(sharedDbAdapter)
+        Reblog.setup_own_db_access(sharedDbAdapter)
+        Notify.setup_own_db_access(sharedDbAdapter)
+        Accounts.setup_own_db_access(sharedDbAdapter)
+        PayoutStats.setup_own_db_access(sharedDbAdapter)
+        Mentions.setup_own_db_access(sharedDbAdapter)
 
     @classmethod
     def head_num(cls):
@@ -27,24 +84,23 @@ class Blocks:
     @classmethod
     def head_date(cls):
         """Get hive's head block date."""
-        sql = "SELECT created_at FROM hive_blocks ORDER BY num DESC LIMIT 1"
+        sql = "SELECT head_block_time()"
         return str(DB.query_one(sql) or '')
 
     @classmethod
-    def process(cls, block):
-        """Process a single block. Always wrap in a transaction!"""
-        #assert is_trx_active(), "Block.process must be in a trx"
-        return cls._process(block, is_initial_sync=False)
-
-    @classmethod
-    def process_multi(cls, blocks, is_initial_sync=False):
+    def process_multi(cls, blocks, vops, is_initial_sync):
         """Batch-process blocks; wrapped in a transaction."""
+        time_start = OPSM.start()
+
         DB.query("START TRANSACTION")
 
         last_num = 0
+        first_block = -1
         try:
             for block in blocks:
-                last_num = cls._process(block, is_initial_sync)
+                if first_block == -1:
+                    first_block = int(block['block_id'][:8], base=16)
+                last_num = cls._process(block, vops)
         except Exception as e:
             log.error("exception encountered block %d", last_num + 1)
             raise e
@@ -52,66 +108,183 @@ class Blocks:
         # Follows flushing needs to be atomic because recounts are
         # expensive. So is tracking follows at all; hence we track
         # deltas in memory and update follow/er counts in bulk.
-        Follow.flush(trx=False)
+
+        flush_time = FSM.start()
+        def register_time(f_time, name, pushed):
+            assert pushed is not None
+            FSM.flush_stat(name, FSM.stop(f_time), pushed)
+            return FSM.start()
+
+        log.info("#############################################################################")
+        flush_time = register_time(flush_time, "Blocks", cls._flush_blocks())
 
         DB.query("COMMIT")
 
+        completedThreads = 0
+
+        pool = ThreadPoolExecutor(max_workers = len(cls._concurrent_flush))
+        flush_futures = {pool.submit(time_collector, f): (description, c) for (description, f, c) in cls._concurrent_flush}
+        for future in concurrent.futures.as_completed(flush_futures):
+            (description, c) = flush_futures[future]
+            completedThreads = completedThreads + 1
+            try:
+                (n, elapsedTime) = future.result()
+                assert n is not None
+                assert not c.tx_active()
+
+                FSM.flush_stat(description, elapsedTime, n)
+
+#                if n > 0:
+#                    log.info('%r flush generated %d records' % (description, n))
+            except Exception as exc:
+                log.error('%r generated an exception: %s' % (description, exc))
+                raise exc
+        pool.shutdown()
+
+        assert completedThreads == len(cls._concurrent_flush)
+
+        if (not is_initial_sync) and (first_block > -1):
+            DB.query("START TRANSACTION")
+            cls.on_live_blocks_processed( first_block, last_num )
+            DB.query("COMMIT")
+
+        log.info(f"[PROCESS MULTI] {len(blocks)} blocks in {OPSM.stop(time_start) :.4f}s")
+
+    @staticmethod
+    def prepare_vops(comment_payout_ops, vopsList, date, block_num):
+        ineffective_deleted_ops = {}
+        registered_ops_stats = [ 'author_reward_operation', 'comment_reward_operation', 'effective_comment_vote_operation', 'comment_payout_update_operation', 'ineffective_delete_comment_operation']
+
+        for vop in vopsList:
+            start = OPSM.start()
+            key = None
+
+            op_type = vop['type']
+            if op_type not in registered_ops_stats:
+                continue
+            op_value = vop['value']
+            op_value['block_num'] = block_num
+            key = "{}/{}".format(op_value['author'], op_value['permlink'])
+
+            if op_type == 'author_reward_operation':
+                if key not in comment_payout_ops:
+                    comment_payout_ops[key] = { 'author_reward_operation':None, 'comment_reward_operation':None, 'effective_comment_vote_operation':None, 'comment_payout_update_operation':None }
+
+                comment_payout_ops[key][op_type] = ( op_value, date )
+
+            elif op_type == 'comment_reward_operation':
+                if key not in comment_payout_ops:
+                    comment_payout_ops[key] = { 'author_reward_operation':None, 'comment_reward_operation':None, 'effective_comment_vote_operation':None, 'comment_payout_update_operation':None }
+
+                comment_payout_ops[key]['effective_comment_vote_operation'] = None
+
+                comment_payout_ops[key][op_type] = ( op_value, date )
+
+            elif op_type == 'effective_comment_vote_operation':
+                Votes.effective_comment_vote_op( op_value )
+                Reputations.process_vote(block_num, op_value)
+
+                if key not in comment_payout_ops:
+                    comment_payout_ops[key] = { 'author_reward_operation':None, 'comment_reward_operation':None, 'effective_comment_vote_operation':None, 'comment_payout_update_operation':None }
+
+                comment_payout_ops[key][op_type] = ( op_value, date )
+
+            elif op_type == 'comment_payout_update_operation':
+                if key not in comment_payout_ops:
+                    comment_payout_ops[key] = { 'author_reward_operation':None, 'comment_reward_operation':None, 'effective_comment_vote_operation':None, 'comment_payout_update_operation':None }
+
+                comment_payout_ops[key][op_type] = ( op_value, date )
+
+            elif op_type == 'ineffective_delete_comment_operation':
+                ineffective_deleted_ops[key] = {}
+
+            if op_type in registered_ops_stats:
+                OPSM.op_stats(op_type, OPSM.stop(start))
+
+        return ineffective_deleted_ops
+
+
     @classmethod
-    def _process(cls, block, is_initial_sync=False):
+    def _process(cls, block, virtual_operations):
         """Process a single block. Assumes a trx is open."""
         #pylint: disable=too-many-branches
         num = cls._push(block)
-        date = block['timestamp']
+        cls._current_block_date = block['timestamp']
+
+        # head block date shall point to last imported block (not yet current one) to conform hived behavior.
+        # that's why operations processed by node are included in the block being currently produced, so its processing time is equal to last produced block.
+        # unfortunately it is not true to all operations, most likely in case of dates that used to come from
+        # FatNode where it supplemented it with its-current head block, since it was already past block processing,
+        # it saw later block (equal to _current_block_date here)
+        if cls._head_block_date is None:
+            cls._head_block_date = cls._current_block_date
+
+        ineffective_deleted_ops = None
+
+        if num in virtual_operations:
+            ineffective_deleted_ops = Blocks.prepare_vops(Posts.comment_payout_ops, virtual_operations[num], cls._current_block_date, num)
 
-        account_names = set()
         json_ops = []
         for tx_idx, tx in enumerate(block['transactions']):
             for operation in tx['operations']:
+                start = OPSM.start()
                 op_type = operation['type']
                 op = operation['value']
 
+                assert 'block_num' not in op
+                op['block_num'] = num
+
+                account_name = None
+                op_details = None
                 # account ops
                 if op_type == 'pow_operation':
-                    account_names.add(op['worker_account'])
+                    account_name = op['worker_account']
                 elif op_type == 'pow2_operation':
-                    account_names.add(op['work']['value']['input']['worker_account'])
+                    account_name = op['work']['value']['input']['worker_account']
                 elif op_type == 'account_create_operation':
-                    account_names.add(op['new_account_name'])
+                    account_name = op['new_account_name']
+                    op_details = op
                 elif op_type == 'account_create_with_delegation_operation':
-                    account_names.add(op['new_account_name'])
+                    account_name = op['new_account_name']
+                    op_details = op
                 elif op_type == 'create_claimed_account_operation':
-                    account_names.add(op['new_account_name'])
+                    account_name = op['new_account_name']
+                    op_details = op
+
+                Accounts.register(account_name, op_details, cls._head_block_date, num)
 
                 # account metadata updates
-                elif op_type == 'account_update_operation':
-                    if not is_initial_sync:
-                        Accounts.dirty(op['account']) # full
+                if op_type == 'account_update_operation':
+                    Accounts.update_op( op, False )
                 elif op_type == 'account_update2_operation':
-                    if not is_initial_sync:
-                        Accounts.dirty(op['account']) # full
+                    Accounts.update_op( op, True )
 
                 # post ops
                 elif op_type == 'comment_operation':
-                    Posts.comment_op(op, date)
-                    if not is_initial_sync:
-                        Accounts.dirty(op['author']) # lite - stats
+                    Posts.comment_op(op, cls._head_block_date)
                 elif op_type == 'delete_comment_operation':
-                    Posts.delete_op(op)
+                    key = "{}/{}".format(op['author'], op['permlink'])
+                    if ( ineffective_deleted_ops is None ) or ( key not in ineffective_deleted_ops ):
+                        Posts.delete_op(op, cls._head_block_date)
+                elif op_type == 'comment_options_operation':
+                    Posts.comment_options_op(op)
                 elif op_type == 'vote_operation':
-                    if not is_initial_sync:
-                        Accounts.dirty(op['author']) # lite - rep
-                        Accounts.dirty(op['voter']) # lite - stats
-                        CachedPost.vote(op['author'], op['permlink'],
-                                        None, op['voter'])
+                    Votes.vote_op(op, cls._head_block_date)
 
                 # misc ops
                 elif op_type == 'transfer_operation':
-                    Payments.op_transfer(op, tx_idx, num, date)
+                    Payments.op_transfer(op, tx_idx, num, cls._head_block_date)
                 elif op_type == 'custom_json_operation':
                     json_ops.append(op)
 
-        Accounts.register(account_names, date)     # register any new names
-        CustomOp.process_ops(json_ops, num, date)  # follow/reblog/community ops
+                if op_type != 'custom_json_operation':
+                    OPSM.op_stats(op_type, OPSM.stop(start))
+
+        # follow/reblog/community ops
+        if json_ops:
+            CustomOp.process_ops(json_ops, num, cls._head_block_date)
+
+        cls._head_block_date = cls._current_block_date
 
         return num
 
@@ -162,16 +335,33 @@ class Blocks:
         """Insert a row in `hive_blocks`."""
         num = int(block['block_id'][:8], base=16)
         txs = block['transactions']
-        DB.query("INSERT INTO hive_blocks (num, hash, prev, txs, ops, created_at) "
-                 "VALUES (:num, :hash, :prev, :txs, :ops, :date)", **{
-                     'num': num,
-                     'hash': block['block_id'],
-                     'prev': block['previous'],
-                     'txs': len(txs),
-                     'ops': sum([len(tx['operations']) for tx in txs]),
-                     'date': block['timestamp']})
+        cls.blocks_to_flush.append({
+            'num': num,
+            'hash': block['block_id'],
+            'prev': block['previous'],
+            'txs': len(txs),
+            'ops': sum([len(tx['operations']) for tx in txs]),
+            'date': block['timestamp']})
         return num
 
+    @classmethod
+    def _flush_blocks(cls):
+        query = """
+            INSERT INTO
+                hive_blocks (num, hash, prev, txs, ops, created_at)
+            VALUES
+        """
+        values = []
+        for block in cls.blocks_to_flush:
+            values.append("({}, '{}', '{}', {}, {}, '{}')".format(block['num'], block['hash'],
+                                                                  block['prev'], block['txs'],
+                                                                  block['ops'], block['date']))
+        query = query + ",".join(values)
+        DB.query(query)
+        n = len(cls.blocks_to_flush)
+        cls.blocks_to_flush.clear()
+        return n
+
     @classmethod
     def _pop(cls, blocks):
         """Pop head blocks to navigate head to a point prior to fork.
@@ -215,13 +405,12 @@ class Blocks:
             # remove all recent records -- core
             DB.query("DELETE FROM hive_feed_cache  WHERE created_at >= :date", date=date)
             DB.query("DELETE FROM hive_reblogs     WHERE created_at >= :date", date=date)
-            DB.query("DELETE FROM hive_follows     WHERE created_at >= :date", date=date) #*
+            DB.query("DELETE FROM hive_follows     WHERE created_at >= :date", date=date)
 
             # remove posts: core, tags, cache entries
             if post_ids:
-                DB.query("DELETE FROM hive_posts_cache WHERE post_id IN :ids", ids=post_ids)
-                DB.query("DELETE FROM hive_post_tags   WHERE post_id IN :ids", ids=post_ids)
                 DB.query("DELETE FROM hive_posts       WHERE id      IN :ids", ids=post_ids)
+                DB.query("DELETE FROM hive_post_data   WHERE id      IN :ids", ids=post_ids)
 
             DB.query("DELETE FROM hive_payments    WHERE block_num = :num", num=num)
             DB.query("DELETE FROM hive_blocks      WHERE num = :num", num=num)
@@ -229,3 +418,30 @@ class Blocks:
         DB.query("COMMIT")
         log.warning("[FORK] recovery complete")
         # TODO: manually re-process here the blocks which were just popped.
+
+    @classmethod
+    @time_it
+    def on_live_blocks_processed( cls, first_block, last_block ):
+        """Is invoked when processing of block range is done and received
+           informations from hived are already stored in db
+        """
+        update_active_starting_from_posts_on_block( first_block, last_block )
+
+        is_hour_action = last_block % 1200 == 0
+
+        queries = [
+            "SELECT update_posts_rshares({}, {})".format(first_block, last_block),
+            "SELECT update_hive_posts_children_count({}, {})".format(first_block, last_block),
+            "SELECT update_hive_posts_root_id({},{})".format(first_block, last_block),
+            "SELECT update_hive_posts_api_helper({},{})".format(first_block, last_block),
+            "SELECT update_feed_cache({}, {})".format(first_block, last_block),
+            "SELECT update_hive_posts_mentions({}, {})".format(first_block, last_block),
+            "SELECT update_notification_cache({}, {}, {})".format(first_block, last_block, is_hour_action),
+            "SELECT update_follow_count({}, {})".format(first_block, last_block),
+            "SELECT update_account_reputations({}, {}, False)".format(first_block, last_block)
+        ]
+
+        for query in queries:
+            time_start = perf_counter()
+            DB.query_no_return(query)
+            log.info("%s executed in: %.4f s", query, perf_counter() - time_start)
diff --git a/hive/indexer/cached_post.py b/hive/indexer/cached_post.py
deleted file mode 100644
index 25f731827daa6cd9eed3dbd72f1757791f30da04..0000000000000000000000000000000000000000
--- a/hive/indexer/cached_post.py
+++ /dev/null
@@ -1,678 +0,0 @@
-"""Manages cached post data."""
-
-import math
-import collections
-import logging
-import ujson as json
-
-from toolz import partition_all
-from hive.db.adapter import Db
-
-from hive.utils.post import post_basic, post_legacy, post_payout, post_stats, mentions
-from hive.utils.timer import Timer
-from hive.indexer.accounts import Accounts
-from hive.indexer.notify import Notify
-from hive.server.common.mutes import Mutes
-
-# pylint: disable=too-many-lines
-
-log = logging.getLogger(__name__)
-
-DB = Db.instance()
-
-# levels of post dirtiness, in order of decreasing priority
-LEVELS = ['insert', 'payout', 'update', 'upvote', 'recount']
-
-def _keyify(items):
-    return dict(map(lambda x: ("val_%d" % x[0], x[1]), enumerate(items)))
-
-class CachedPost:
-    """Maintain update queue and writing to `hive_posts_cache`."""
-
-    # cursor signifying upper bound of cached post span
-    _last_id = -1
-
-    # cached id map
-    _ids = {}
-
-    # urls which are missing from id map
-    _noids = set()
-
-    # dirty posts; {key: dirty_level}
-    _queue = collections.OrderedDict()
-
-    # new promoted values, pending write
-    _pending_promoted = {}
-
-    # pending vote notifs {pid: [voters]}
-    _votes = {}
-
-    @classmethod
-    def update_promoted_amount(cls, post_id, amount):
-        """Set a new pending amount for a post for its next update."""
-        cls._pending_promoted[post_id] = amount
-
-    @classmethod
-    def _dirty(cls, level, author, permlink, pid=None):
-        """Mark a post as dirty."""
-        assert level in LEVELS, "invalid level {}".format(level)
-        mode = LEVELS.index(level)
-        url = author + '/' + permlink
-
-        # add to appropriate queue.
-        if url not in cls._queue:
-            cls._queue[url] = mode
-        # upgrade priority if needed
-        elif cls._queue[url] > mode:
-            cls._queue[url] = mode
-
-        # add to id map, or register missing
-        if pid and url in cls._ids:
-            assert pid == cls._ids[url], "pid map conflict #78"
-        elif pid:
-            cls._ids[url] = pid
-        else:
-            cls._noids.add(url)
-
-    @classmethod
-    def _get_id(cls, url):
-        """Given a post url, get its id."""
-        if url in cls._ids:
-            return cls._ids[url]
-        raise Exception("requested id for %s not in map" % url)
-
-    @classmethod
-    def recount(cls, author, permlink, pid=None):
-        """Force a child re-count."""
-        cls._dirty('recount', author, permlink, pid)
-
-    @classmethod
-    def vote(cls, author, permlink, pid=None, voter=None):
-        """Handle a post dirtied by a `vote` op."""
-        cls._dirty('upvote', author, permlink, pid)
-        if voter:
-            url = author + '/' + permlink
-            if url not in cls._votes:
-                cls._votes[url] = []
-            cls._votes[url].append(voter)
-
-    @classmethod
-    def insert(cls, author, permlink, pid):
-        """Handle a post created by a `comment` op."""
-        cls._dirty('insert', author, permlink, pid)
-
-    @classmethod
-    def update(cls, author, permlink, pid):
-        """Handle a post updated by a `comment` op."""
-        cls._dirty('update', author, permlink, pid)
-
-    @classmethod
-    def delete(cls, post_id, author, permlink):
-        """Handle a post deleted by a `delete_comment` op.
-
-        With steemd, posts can be 'deleted' or unallocated in certain
-        conditions. It requires foregoing convenient assumptions, e.g.:
-
-         - author/permlink is unique and always references the same post
-         - you can always get_content on any author/permlink you see in an op
-        """
-        DB.query("DELETE FROM hive_posts_cache WHERE post_id = :id", id=post_id)
-        DB.query("DELETE FROM hive_post_tags   WHERE post_id = :id", id=post_id)
-
-        # if it was queued for a write, remove it
-        url = author+'/'+permlink
-        log.warning("deleting %s", url) #173
-        if url in cls._queue:
-            del cls._queue[url]
-            log.warning("deleted %s", url) #173
-            if url in cls._ids:
-                del cls._ids[url]
-
-    @classmethod
-    def undelete(cls, post_id, author, permlink, category):
-        """Handle a post 'undeleted' by a `comment` op.
-
-        'Undeletion' occurs when hive detects that a previously deleted
-        author/permlink combination has been reused on a new post. Hive
-        does not delete hive_posts entries because they are currently
-        irreplaceable in case of a fork. Instead, we reuse the slot.
-        It's important to immediately insert a placeholder in the cache
-        table since hive only scans forward. This row's properties push
-        it to the front of update-immediately queue.
-
-        Alternate ways of handling undeletes:
-
-         - delete row from hive_posts so that it can be re-indexed (re-id'd)
-            - comes at a risk of losing expensive entry on fork (and no undo)
-         - create undo table for hive_posts, hive_follows, etc, & link to block
-         - rely on steemd's post.id instead of database autoincrement
-           - requires way to query steemd post objects by id to be useful
-             - batch get_content_by_ids in steemd would be /huge/ speedup
-         - create a consistent cache queue table or dirty flag col
-        """
-        # do not force-write unless cache spans this id.
-        if post_id > cls.last_id():
-            cls.insert(author, permlink, post_id)
-            return
-
-        # force-create dummy row to ensure cache is aware. only needed when
-        # cache already spans this id, in case in-mem buffer is lost. default
-        # value for payout_at ensures that it will get picked up for update.
-        DB.query(cls._insert({
-            'post_id': post_id,
-            'author': author,
-            'permlink': permlink,
-            'category': category}))
-        cls.update(author, permlink, post_id)
-        log.warning("undeleted %s/%s", author, permlink) #173
-
-    @classmethod
-    def flush(cls, steem, trx=False, spread=1, full_total=None):
-        """Process all posts which have been marked as dirty."""
-        cls._load_noids() # load missing ids
-        assert spread == 1, "not fully tested, use with caution"
-
-        counts = {}
-        tuples = []
-        for level in LEVELS:
-            tups = cls._get_tuples_for_level(level, spread)
-            counts[level] = len(tups)
-            tuples.extend(tups)
-
-        if trx or len(tuples) > 250:
-            changed = filter(lambda t: t[1], counts.items())
-            summary = list(map(lambda group: "%d %ss" % group[::-1], changed))
-            summary = ', '.join(summary) if summary else 'none'
-            log.info("[PREP] posts cache process: %s", summary)
-
-        for url, _, _ in tuples:
-            del cls._queue[url]
-
-        cls._update_batch(steem, tuples, trx, full_total=full_total)
-
-        for url, _, _ in tuples:
-            if url not in cls._queue and url in cls._ids:
-                del cls._ids[url]
-
-        return counts
-
-    @classmethod
-    def _get_tuples_for_level(cls, level, fraction=1):
-        """Query tuples to be updated.
-
-        Given a specific flush level (insert, payout, update, upvote),
-        returns a list of tuples to be passed to _update_batch, in the
-        form of: `[(url, id, level)*]`
-        """
-        mode = LEVELS.index(level)
-        urls = [url for url, i in cls._queue.items() if i == mode]
-        if fraction > 1 and level != 'insert': # inserts must be full flush
-            urls = urls[0:math.ceil(len(urls) / fraction)]
-        return [(url, cls._get_id(url), level) for url in urls]
-
-    @classmethod
-    def _load_noids(cls):
-        """Load ids for posts we don't know the ids of.
-
-        When posts are marked dirty, specifying the id is optional
-        because a successive call might be able to provide it "for
-        free". Before flushing changes this method should be called
-        to fill in any gaps.
-        """
-        from hive.indexer.posts import Posts
-        noids = cls._noids - set(cls._ids.keys())
-        tuples = [(Posts.get_id(*url.split('/')), url) for url in noids]
-        for pid, url in tuples:
-            assert pid, "WARNING: missing id for %s" % url
-            cls._ids[url] = pid
-        cls._noids = set()
-        return len(tuples)
-
-    @classmethod
-    def _select_paidout_tuples(cls, date):
-        """Query hive_posts_cache for payout sweep.
-
-        Select all posts which should have been paid out before `date`
-        yet do not have the `is_paidout` flag set. We perform this
-        sweep to ensure that we always have accurate final payout
-        state. Since payout values vary even between votes, we'd have
-        stale data if we didn't sweep, and only waited for incoming
-        votes before an update.
-        """
-        from hive.indexer.posts import Posts
-
-        sql = """SELECT post_id FROM hive_posts_cache
-                  WHERE is_paidout = '0' AND payout_at <= :date"""
-        ids = DB.query_col(sql, date=date)
-        if not ids:
-            return []
-
-        sql = """SELECT id, author, permlink
-                 FROM hive_posts WHERE id IN :ids"""
-        results = DB.query_all(sql, ids=tuple(ids))
-        return Posts.save_ids_from_tuples(results)
-
-    @classmethod
-    def dirty_paidouts(cls, date):
-        """Mark dirty all paidout posts not yet updated in db."""
-        paidout = cls._select_paidout_tuples(date)
-        authors = set()
-        for (pid, author, permlink) in paidout:
-            authors.add(author)
-            cls._dirty('payout', author, permlink, pid)
-        Accounts.dirty_set(authors) # force-update accounts on payout
-
-        if len(paidout) > 200:
-            log.info("[PREP] Found %d payouts for %d authors since %s",
-                     len(paidout), len(authors), date)
-        return len(paidout)
-
-    @classmethod
-    def _select_missing_tuples(cls, last_cached_id, limit=1000000):
-        """Fetch posts inserted into main posts table but not cache."""
-        from hive.indexer.posts import Posts
-        sql = """SELECT id, author, permlink, promoted FROM hive_posts
-                  WHERE is_deleted = '0' AND id > :id
-               ORDER BY id LIMIT :limit"""
-        results = DB.query_all(sql, id=last_cached_id, limit=limit)
-        return Posts.save_ids_from_tuples(results)
-
-    @classmethod
-    def dirty_missing(cls, limit=250000):
-        """Mark dirty all hive_posts records not yet written to cache."""
-        from hive.indexer.posts import Posts
-
-        # cached posts inserted sequentially, so compare MAX(id)'s
-        last_cached_id = cls.last_id()
-        last_post_id = Posts.last_id()
-        gap = last_post_id - last_cached_id
-
-        if gap:
-            missing = cls._select_missing_tuples(last_cached_id, limit)
-            for pid, author, permlink, promoted in missing:
-                if promoted > 0: # ensure we don't miss promote amount
-                    cls.update_promoted_amount(pid, promoted)
-                cls._dirty('insert', author, permlink, pid)
-
-        return gap
-
-    @classmethod
-    def recover_missing_posts(cls, steem):
-        """Startup routine that cycles through missing posts.
-
-        This is used for (1) initial sync, and (2) recovering missing
-        cache records upon launch if hive fast-sync was interrupted.
-        """
-        gap = cls.dirty_missing()
-        log.info("[INIT] %d missing post cache entries", gap)
-        while cls.flush(steem, trx=True, full_total=gap)['insert']:
-            last_gap = gap
-            gap = cls.dirty_missing()
-            if gap == last_gap:
-                # edge case -- if last post entry was deleted, this
-                # process would never reach condition where the last
-                # cached id == last post id. abort if progress stalls.
-                log.warning('ignoring %d inserts -- may be deleted')
-                break
-
-    @classmethod
-    def _update_batch(cls, steem, tuples, trx=True, full_total=None):
-        """Fetch, process, and write a batch of posts.
-
-        Given a set of posts, fetch from steemd and write them to the
-        db. The `tuples` arg is the form of `[(url, id, level)*]`
-        representing posts which are to be fetched from steemd and
-        updated in cache.
-
-        Regarding _bump_last_id: there's a rare edge case when the last
-        hive_post entry has been deleted "in the future" (ie, we haven't
-        seen the delete op yet). So even when the post is not found
-        (i.e. `not post['author']`), it's important to advance _last_id,
-        because this cursor is used to deduce any missing cache entries.
-        """
-        # pylint: disable=too-many-locals
-
-        timer = Timer(total=len(tuples), entity='post',
-                      laps=['rps', 'wps'], full_total=full_total)
-        tuples = sorted(tuples, key=lambda x: x[1]) # enforce ASC id's
-
-        for tups in partition_all(1000, tuples):
-            timer.batch_start()
-            buffer = []
-
-            post_args = [tup[0].split('/') for tup in tups]
-            posts = steem.get_content_batch(post_args)
-            post_ids = [tup[1] for tup in tups]
-            post_levels = [tup[2] for tup in tups]
-
-            coremap = cls._get_core_fields(tups)
-            for pid, post, level in zip(post_ids, posts, post_levels):
-                if post['author']:
-                    assert pid in coremap, 'pid not in coremap'
-                    if pid in coremap:
-                        core = coremap[pid]
-                        post['category'] = core['category']
-                        post['community_id'] = core['community_id']
-                        post['gray'] = core['is_muted']
-                        post['hide'] = not core['is_valid']
-                    buffer.extend(cls._sql(pid, post, level=level))
-                else:
-                    # When a post has been deleted (or otherwise DNE),
-                    # steemd simply returns a blank post  object w/ all
-                    # fields blank. While it's best to not try to cache
-                    # already-deleted posts, it can happen during missed
-                    # post sweep and while using `trail_blocks` > 0.
-
-                    # monitor: post not found which should def. exist; see #173
-                    sql = """SELECT id, author, permlink, is_deleted
-                               FROM hive_posts WHERE id = :id"""
-                    row = DB.query_row(sql, id=pid)
-                    if row['is_deleted']:
-                        # rare or impossible -- report if detected
-                        log.error("found deleted post for %s: %s", level, row)
-                    else:
-                        # most likely cause of this condition is that the post
-                        # has been deleted (e.g. sync trails by 2 blocks, post
-                        # was inserted at head-2, deleted at head). another
-                        # possible cause is that a node behind a load balancer
-                        # is behind; we detected a new post but querying a node
-                        # that hasn't seen it yet.
-                        log.warning("post not found -- DEFER %s %s", level, row)
-                        cls._dirty(level, row['author'], row['permlink'], pid)
-
-                cls._bump_last_id(pid)
-
-            timer.batch_lap()
-            DB.batch_queries(buffer, trx)
-
-            timer.batch_finish(len(posts))
-            if len(tuples) >= 1000:
-                log.info(timer.batch_status())
-
-    @classmethod
-    def last_id(cls):
-        """Retrieve the latest post_id that was cached."""
-        if cls._last_id == -1:
-            # after initial query, we maintain last_id w/ _bump_last_id()
-            sql = "SELECT COALESCE(MAX(post_id), 0) FROM hive_posts_cache"
-            cls._last_id = DB.query_one(sql)
-        return cls._last_id
-
-    @classmethod
-    def _get_core_fields(cls, tups):
-        """Cached posts must inherit some properties from hive_posts.
-
-        Purpose
-         - immutable `category` (returned from steemd is subject to change)
-         - authoritative community_id can be determined and written
-         - community muted/valid cols override legacy gray/hide logic
-        """
-        # get list of ids of posts which are to be inserted
-        # TODO: try conditional. currently competes w/ legacy flags on vote
-        #ids = [tup[1] for tup in tups if tup[2] in ('insert', 'update')]
-        ids = [tup[1] for tup in tups]
-        if not ids:
-            return {}
-
-        # build a map of id->fields for each of those posts
-        sql = """SELECT id, category, community_id, is_muted, is_valid
-                   FROM hive_posts WHERE id IN :ids"""
-        core = {r[0]: {'category': r[1],
-                       'community_id': r[2],
-                       'is_muted': r[3],
-                       'is_valid': r[4]}
-                for r in DB.query_all(sql, ids=tuple(ids))}
-        return core
-
-    @classmethod
-    def _bump_last_id(cls, next_id):
-        """Update our last_id based on a recent insert."""
-        last_id = cls.last_id()
-        if next_id <= last_id:
-            return
-
-        gap = next_id - last_id - 1
-        if gap:
-            log.info("skipped %d ids %d -> %d", gap, last_id, next_id)
-            cls._ensure_safe_gap(last_id, next_id)
-
-        cls._last_id = next_id
-
-    @classmethod
-    def _ensure_safe_gap(cls, last_id, next_id):
-        """Paranoid check of important operating assumption."""
-        sql = """SELECT COUNT(*) FROM hive_posts
-                  WHERE id BETWEEN :x1 AND :x2 AND is_deleted = '0'"""
-        missing_posts = DB.query_one(sql, x1=(last_id + 1), x2=(next_id - 1))
-        if missing_posts:
-            raise Exception("found cache gap: %d --> %d (%d)"
-                            % (last_id, next_id, missing_posts))
-
-    @classmethod
-    def _sql(cls, pid, post, level=None):
-        """Given a post and "update level", generate SQL edit statement.
-
-        Valid levels are:
-         - `insert`: post does not yet exist in cache
-         - `payout`: post was paidout
-         - `update`: post was modified
-         - `upvote`: post payout/votes changed
-         - `recount`: post child count changed
-        """
-
-        #pylint: disable=bad-whitespace
-        assert post['author'], "post {} is blank".format(pid)
-
-        # last-minute sanity check to ensure `pid` is correct #78
-        pid2 = cls._get_id(post['author']+'/'+post['permlink'])
-        assert pid == pid2, "hpc id %d maps to %d" % (pid, pid2)
-
-        # inserts always sequential. if pid > last_id, this operation
-        # *must* be an insert; so `level` must not be any form of update.
-        if pid > cls.last_id() and level != 'insert':
-            raise Exception("WARNING: new pid, but level=%s. #%d vs %d, %s"
-                            % (level, pid, cls.last_id(), repr(post)))
-
-        # start building the queries
-        values = [('post_id', pid)]
-
-        # immutable; write only once (*edge case: undeleted posts)
-        if level == 'insert':
-            values.extend([
-                ('author',   post['author']),
-                ('permlink', post['permlink']),
-                ('category', post['category']),
-                ('depth',    post['depth'])])
-
-        # always write, unless simple vote update
-        if level in ['insert', 'payout', 'update']:
-            basic = post_basic(post)
-            values.extend([
-                ('community_id',  post['community_id']), # immutable*
-                ('created_at',    post['created']),    # immutable*
-                ('updated_at',    post['last_update']),
-                ('title',         post['title']),
-                ('payout_at',     basic['payout_at']), # immutable*
-                ('preview',       basic['preview']),
-                ('body',          basic['body']),
-                ('img_url',       basic['image']),
-                ('is_nsfw',       basic['is_nsfw']),
-                ('is_declined',   basic['is_payout_declined']),
-                ('is_full_power', basic['is_full_power']),
-                ('is_paidout',    basic['is_paidout']),
-                ('json',          json.dumps(basic['json_metadata'])),
-                ('raw_json',      json.dumps(post_legacy(post))),
-            ])
-
-        # if there's a pending promoted value to write, pull it out
-        if pid in cls._pending_promoted:
-            bal = cls._pending_promoted.pop(pid)
-            values.append(('promoted', bal))
-
-        # update unconditionally
-        payout = post_payout(post)
-        stats = post_stats(post)
-
-        # //--
-        # if community - override fields.
-        # TODO: make conditional (date-based?)
-        assert 'community_id' in post, 'comm_id not loaded'
-        if post['community_id']:
-            stats['hide'] = post['hide']
-            stats['gray'] = post['gray']
-        # //--
-
-        values.extend([
-            ('payout',      payout['payout']),
-            ('rshares',     payout['rshares']),
-            ('votes',       payout['csvotes']),
-            ('sc_trend',    payout['sc_trend']),
-            ('sc_hot',      payout['sc_hot']),
-            ('flag_weight', stats['flag_weight']),
-            ('total_votes', stats['total_votes']),
-            ('up_votes',    stats['up_votes']),
-            ('is_hidden',   stats['hide']),
-            ('is_grayed',   stats['gray']),
-            ('author_rep',  stats['author_rep']),
-            ('children',    min(post['children'], 32767)),
-        ])
-
-        # update tags if action is insert/update and is root post
-        tag_sqls = []
-        if level in ['insert', 'update'] and not post['depth']:
-            diff = level != 'insert' # do not attempt tag diff on insert
-            tag_sqls.extend(cls._tag_sqls(pid, basic['tags'], diff=diff))
-
-        # if recounting, update the parent next pass.
-        if level == 'recount' and post['depth']:
-            cls.recount(post['parent_author'], post['parent_permlink'])
-
-        # trigger any notifications
-        cls._notifs(post, pid, level, payout['payout'])
-
-        # build the post insert/update SQL, add tag SQLs
-        if level == 'insert':
-            sql = cls._insert(values)
-        else:
-            sql = cls._update(values)
-        return [sql] + tag_sqls
-
-    @classmethod
-    def _notifs(cls, post, pid, level, payout):
-        # pylint: disable=too-many-locals,too-many-branches
-        author = post['author']
-        author_id = Accounts.get_id(author)
-        parent_author = post['parent_author']
-        date = post['last_update']
-
-        # reply notif
-        if level == 'insert' and parent_author and parent_author != author:
-            irredeemable = parent_author in Mutes.all()
-            parent_author_id = Accounts.get_id(parent_author)
-            if not irredeemable and not cls._muted(parent_author_id, author_id):
-                ntype = 'reply' if post['depth'] == 1 else 'reply_comment'
-                Notify(ntype, src_id=author_id, dst_id=parent_author_id,
-                       score=Accounts.default_score(author), post_id=pid,
-                       when=date).write()
-
-        # mentions notif
-        if level in ('insert', 'update'):
-            accounts = set(filter(Accounts.exists, mentions(post['body'])))
-            accounts -= {author, parent_author}
-            score = Accounts.default_score(author)
-            if score < 30: max_mentions = 5
-            elif score < 60: max_mentions = 10
-            else: max_mentions = 25
-            if len(accounts) <= max_mentions:
-                penalty = min([score, 2 * (len(accounts) - 1)])
-                for mention in accounts:
-                    mention_id = Accounts.get_id(mention)
-                    if (not cls._mentioned(pid, mention_id)
-                            and not cls._muted(mention_id, author_id)):
-                        Notify('mention', src_id=author_id,
-                               dst_id=mention_id, post_id=pid, when=date,
-                               score=(score - penalty)).write()
-            else:
-                url = '@%s/%s' % (author, post['permlink'])
-                log.info("skip %d mentions in %s", len(accounts), url)
-
-        # votes notif
-        url = post['author'] + '/' + post['permlink']
-        if url in cls._votes:
-            voters = cls._votes[url]
-            del cls._votes[url]
-            net = float(post['net_rshares'])
-            ratio = float(payout) / net if net else 0
-            for vote in post['active_votes']:
-                rshares = int(vote['rshares'])
-                if vote['voter'] not in voters or rshares < 10e9: continue
-                contrib = int(1000 * ratio * rshares)
-                if contrib < 20: continue # < $0.020
-
-                voter_id = Accounts.get_id(vote['voter'])
-                if not cls._voted(pid, author_id, voter_id):
-                    score = min(100, (len(str(contrib)) - 1) * 25) # $1 = 75
-                    payload = "$%.3f" % (contrib / 1000)
-                    Notify('vote', src_id=voter_id, dst_id=author_id,
-                           when=vote['time'], post_id=pid, score=score,
-                           payload=payload).write()
-
-
-    @classmethod
-    def _muted(cls, account, target):
-        # TODO: optimize (mem cache?)
-        sql = """SELECT 1 FROM hive_follows
-                  WHERE follower = :account
-                    AND following = :target
-                    AND state = 2"""
-        return DB.query_col(sql, account=account, target=target)
-
-    @classmethod
-    def _voted(cls, post_id, account_id, voter_id):
-        sql = """SELECT 1
-                   FROM hive_notifs
-                  WHERE dst_id = :dst_id
-                    AND src_id = :src_id
-                    AND post_id = :post_id
-                    AND type_id = 17"""
-        return bool(DB.query_one(sql, dst_id=account_id,
-                                 post_id=post_id, src_id=voter_id))
-
-    @classmethod
-    def _mentioned(cls, post_id, account_id):
-        sql = """SELECT 1
-                   FROM hive_notifs
-                  WHERE dst_id = :dst_id
-                    AND post_id = :post_id
-                    AND type_id = 16"""
-        return bool(DB.query_one(sql, dst_id=account_id, post_id=post_id))
-
-    @classmethod
-    def _tag_sqls(cls, pid, tags, diff=True):
-        """Generate SQL "deltas" for a post_id's associated tags."""
-        next_tags = set(tags)
-        curr_tags = set()
-        if diff:
-            sql = "SELECT tag FROM hive_post_tags WHERE post_id = :id"
-            curr_tags = set(DB.query_col(sql, id=pid))
-
-        to_rem = (curr_tags - next_tags)
-        if to_rem:
-            sql = "DELETE FROM hive_post_tags WHERE post_id = :id AND tag IN :tags"
-            yield (sql, dict(id=pid, tags=tuple(to_rem)))
-
-        to_add = (next_tags - curr_tags)
-        if to_add:
-            params = _keyify(to_add)
-            vals = ["(:id, :%s)" % key for key in params.keys()]
-            sql = "INSERT INTO hive_post_tags (post_id, tag) VALUES %s"
-            sql += " ON CONFLICT DO NOTHING" # (conflicts due to collation)
-            yield (sql % ','.join(vals), {'id': pid, **params})
-
-    @classmethod
-    def _insert(cls, values):
-        return DB.build_insert('hive_posts_cache', values, pk='post_id')
-
-    @classmethod
-    def _update(cls, values):
-        return DB.build_update('hive_posts_cache', values, pk='post_id')
diff --git a/hive/indexer/community.py b/hive/indexer/community.py
index 1840731a8cc4dfc0900d1941f035f3d474107c7a..4bf36430003673ef8795dc363dcb567916d1e382 100644
--- a/hive/indexer/community.py
+++ b/hive/indexer/community.py
@@ -10,7 +10,6 @@ import ujson as json
 from hive.db.adapter import Db
 from hive.indexer.accounts import Accounts
 from hive.indexer.notify import Notify
-from hive.db.db_state import DbState
 
 log = logging.getLogger(__name__)
 
@@ -29,9 +28,6 @@ TYPE_TOPIC = 1
 TYPE_JOURNAL = 2
 TYPE_COUNCIL = 3
 
-START_BLOCK = 37500000
-START_DATE = '2019-10-22T07:12:36' # effectively 2019-10-22 12:00:00
-
 # https://en.wikipedia.org/wiki/ISO_639-1
 LANGS = ("ab,aa,af,ak,sq,am,ar,an,hy,as,av,ae,ay,az,bm,ba,eu,be,bn,bh,bi,"
          "bs,br,bg,my,ca,ch,ce,ny,zh,cv,kw,co,cr,hr,cs,da,dv,nl,dz,en,eo,"
@@ -57,9 +53,9 @@ def assert_keys_match(keys, expected, allow_missing=True):
     extra = keys - expected
     assert not extra, 'extraneous keys: %s' % extra
 
-def process_json_community_op(actor, op_json, date):
+def process_json_community_op(actor, op_json, date, block_num):
     """Validates community op and apply state changes to db."""
-    CommunityOp.process_if_valid(actor, op_json, date)
+    CommunityOp.process_if_valid(actor, op_json, date, block_num)
 
 def read_key_bool(op, key):
     """Reads a key from dict, ensuring valid bool if present."""
@@ -103,34 +99,32 @@ class Community:
     # id -> name map
     _names = {}
 
+    start_block = 37500000
+
     @classmethod
-    def register(cls, names, block_date):
+    def register(cls, name, block_date, block_num):
         """Block processing: hooks into new account registration.
 
         `Accounts` calls this method with any newly registered names.
         This method checks for any valid community names and inserts them.
         """
 
-        for name in names:
-            #if not re.match(r'^hive-[123]\d{4,6}$', name):
-            if not re.match(r'^hive-[1]\d{4,6}$', name):
-                continue
-            type_id = int(name[5])
-            _id = Accounts.get_id(name)
-
-            # insert community
-            sql = """INSERT INTO hive_communities (id, name, type_id, created_at)
-                          VALUES (:id, :name, :type_id, :date)"""
-            DB.query(sql, id=_id, name=name, type_id=type_id, date=block_date)
+        #if not re.match(r'^hive-[123]\d{4,6}$', name):
+        if not re.match(r'^hive-[1]\d{4,6}$', name):
+            return
+        type_id = int(name[5])
+        _id = Accounts.get_id(name)
 
-            # insert owner
-            sql = """INSERT INTO hive_roles (community_id, account_id, role_id, created_at)
-                         VALUES (:community_id, :account_id, :role_id, :date)"""
-            DB.query(sql, community_id=_id, account_id=_id,
-                     role_id=Role.owner.value, date=block_date)
+        # insert community
+        sql = """INSERT INTO hive_communities (id, name, type_id, created_at, block_num)
+                        VALUES (:id, :name, :type_id, :date, :block_num)"""
+        DB.query(sql, id=_id, name=name, type_id=type_id, date=block_date, block_num=block_num)
 
-            Notify('new_community', src_id=None, dst_id=_id,
-                   when=block_date, community_id=_id).write()
+        # insert owner
+        sql = """INSERT INTO hive_roles (community_id, account_id, role_id, created_at)
+                        VALUES (:community_id, :account_id, :role_id, :date)"""
+        DB.query(sql, community_id=_id, account_id=_id,
+                    role_id=Role.owner.value, date=block_date)
 
     @classmethod
     def validated_id(cls, name):
@@ -223,41 +217,6 @@ class Community:
             return role >= Role.member
         return role >= Role.guest # or at least not muted
 
-    @classmethod
-    def recalc_pending_payouts(cls):
-        """Update all pending payout and rank fields."""
-        sql = """SELECT id,
-                        COALESCE(posts, 0),
-                        COALESCE(payouts, 0),
-                        COALESCE(authors, 0)
-                   FROM hive_communities c
-              LEFT JOIN (
-                             SELECT community_id,
-                                    COUNT(*) posts,
-                                    ROUND(SUM(payout)) payouts,
-                                    COUNT(DISTINCT author) authors
-                               FROM hive_posts_cache
-                              WHERE community_id IS NOT NULL
-                                AND is_paidout = '0'
-                           GROUP BY community_id
-                        ) p
-                     ON community_id = id
-               ORDER BY COALESCE(payouts, 0) DESC,
-                        COALESCE(authors, 0) DESC,
-                        COALESCE(posts, 0) DESC,
-                        subscribers DESC,
-                        (CASE WHEN c.title = '' THEN 1 ELSE 0 END)
-        """
-
-        for rank, row in enumerate(DB.query_all(sql)):
-            cid, posts, payouts, authors = row
-            sql = """UPDATE hive_communities
-                        SET sum_pending = :payouts, num_pending = :posts,
-                            num_authors = :authors, rank = :rank
-                      WHERE id = :id"""
-            DB.query(sql, id=cid, payouts=payouts, posts=posts,
-                     authors=authors, rank=rank+1)
-
 class CommunityOp:
     """Handles validating and processing of community custom_json ops."""
     #pylint: disable=too-many-instance-attributes
@@ -275,9 +234,10 @@ class CommunityOp:
         'unsubscribe':    ['community'],
     }
 
-    def __init__(self, actor, date):
+    def __init__(self, actor, date, block_num):
         """Inits a community op for validation and processing."""
         self.date = date
+        self.block_num = block_num
         self.valid = False
         self.action = None
         self.op = None
@@ -302,9 +262,9 @@ class CommunityOp:
         self.props = None
 
     @classmethod
-    def process_if_valid(cls, actor, op_json, date):
+    def process_if_valid(cls, actor, op_json, date, block_num):
         """Helper to instantiate, validate, process an op."""
-        op = CommunityOp(actor, date)
+        op = CommunityOp(actor, date, block_num)
         if op.validate(op_json):
             op.process()
             return True
@@ -331,15 +291,15 @@ class CommunityOp:
 
         except AssertionError as e:
             payload = str(e)
-            Notify('error', dst_id=self.actor_id,
-                   when=self.date, payload=payload).write()
+            log.info("validation failed with message: '%s'", payload)
+            Notify(block_num=self.block_num, type_id='error', dst_id=self.actor_id,
+                   when=self.date, payload=payload)
 
         return self.valid
 
     def process(self):
         """Applies a validated operation."""
         assert self.valid, 'cannot apply invalid op'
-        from hive.indexer.cached_post import CachedPost
 
         action = self.action
         params = dict(
@@ -354,6 +314,7 @@ class CommunityOp:
             role_id=self.role_id,
             notes=self.notes,
             title=self.title,
+            block_num=self.block_num
         )
 
         # Community-level commands
@@ -365,12 +326,11 @@ class CommunityOp:
 
         elif action == 'subscribe':
             DB.query("""INSERT INTO hive_subscriptions
-                               (account_id, community_id, created_at)
-                        VALUES (:actor_id, :community_id, :date)""", **params)
+                               (account_id, community_id, created_at, block_num)
+                        VALUES (:actor_id, :community_id, :date, :block_num)""", **params)
             DB.query("""UPDATE hive_communities
                            SET subscribers = subscribers + 1
                          WHERE id = :community_id""", **params)
-            self._notify('subscribe')
         elif action == 'unsubscribe':
             DB.query("""DELETE FROM hive_subscriptions
                          WHERE account_id = :actor_id
@@ -385,7 +345,7 @@ class CommunityOp:
                                (account_id, community_id, role_id, created_at)
                         VALUES (:account_id, :community_id, :role_id, :date)
                             ON CONFLICT (account_id, community_id)
-                            DO UPDATE SET role_id = :role_id""", **params)
+                            DO UPDATE SET role_id = :role_id """, **params)
             self._notify('set_role', payload=Role(self.role_id).name)
         elif action == 'setUserTitle':
             DB.query("""INSERT INTO hive_roles
@@ -400,15 +360,11 @@ class CommunityOp:
             DB.query("""UPDATE hive_posts SET is_muted = '1'
                          WHERE id = :post_id""", **params)
             self._notify('mute_post', payload=self.notes)
-            if not DbState.is_initial_sync():
-                CachedPost.update(self.account, self.permlink, self.post_id)
 
         elif action == 'unmutePost':
             DB.query("""UPDATE hive_posts SET is_muted = '0'
                          WHERE id = :post_id""", **params)
             self._notify('unmute_post', payload=self.notes)
-            if not DbState.is_initial_sync():
-                CachedPost.update(self.account, self.permlink, self.post_id)
 
         elif action == 'pinPost':
             DB.query("""UPDATE hive_posts SET is_pinned = '1'
@@ -424,11 +380,6 @@ class CommunityOp:
         return True
 
     def _notify(self, op, **kwargs):
-        if DbState.is_initial_sync():
-            # TODO: set start date for notifs?
-            # TODO: address other callers
-            return
-
         dst_id = None
         score = 35
 
@@ -437,10 +388,10 @@ class CommunityOp:
             if not self._subscribed(self.account_id):
                 score = 15
 
-        Notify(op, src_id=self.actor_id, dst_id=dst_id,
+        Notify(block_num=self.block_num, type_id=op, src_id=self.actor_id, dst_id=dst_id,
                post_id=self.post_id, when=self.date,
                community_id=self.community_id,
-               score=score, **kwargs).write()
+               score=score, **kwargs)
 
     def _validate_raw_op(self, raw_op):
         assert isinstance(raw_op, list), 'op json must be list'
@@ -466,7 +417,7 @@ class CommunityOp:
         _name = read_key_str(self.op, 'community', 16)
         assert _name, 'must name a community'
         _id = Community.validated_id(_name)
-        assert _id, 'community `%s` does not exist' % _name
+        assert _id, 'Community \'%s\' does not exist' % _name
 
         self.community = _name
         self.community_id = _id
diff --git a/hive/indexer/custom_op.py b/hive/indexer/custom_op.py
index 6c7eb84f46805f791bc750e689f36fec96ee5d52..feba173999aca841a34bd64d0d0e7293751c15cb 100644
--- a/hive/indexer/custom_op.py
+++ b/hive/indexer/custom_op.py
@@ -1,20 +1,20 @@
 """Main custom_json op handler."""
+
 import logging
 
 from funcy.seqs import first, second
 from hive.db.adapter import Db
-from hive.db.db_state import DbState
 
-from hive.indexer.accounts import Accounts
-from hive.indexer.posts import Posts
-from hive.indexer.feed_cache import FeedCache
 from hive.indexer.follow import Follow
+from hive.indexer.reblog import Reblog
 from hive.indexer.notify import Notify
 
-from hive.indexer.community import process_json_community_op, START_BLOCK
+from hive.indexer.community import Community, process_json_community_op
 from hive.utils.normalize import load_json_key
 from hive.utils.json import valid_op_json, valid_date, valid_command, valid_keys
 
+from hive.utils.stats import OPStatusManager as OPSM
+
 DB = Db.instance()
 
 log = logging.getLogger(__name__)
@@ -27,7 +27,6 @@ def _get_auth(op):
     `required_active_auths` in the future. For now, these are ignored.
     """
     if op['required_auths']:
-        log.warning("unexpected active auths: %s", op)
         return None
     if len(op['required_posting_auths']) != 1:
         log.warning("unexpected auths: %s", op)
@@ -41,8 +40,8 @@ class CustomOp:
     def process_ops(cls, ops, block_num, block_date):
         """Given a list of operation in block, filter and process them."""
         for op in ops:
-            if op['id'] not in ['follow', 'community', 'notify']:
-                continue
+            start = OPSM.start()
+            opName = str(op['id']) + ( '-ignored' if op['id'] not in ['follow', 'community', 'notify', 'reblog'] else '' )
 
             account = _get_auth(op)
             if not account:
@@ -52,12 +51,17 @@ class CustomOp:
             if op['id'] == 'follow':
                 if block_num < 6000000 and not isinstance(op_json, list):
                     op_json = ['follow', op_json]  # legacy compat
-                cls._process_legacy(account, op_json, block_date)
+                cls._process_legacy(account, op_json, block_date, block_num)
+            elif op['id'] == 'reblog':
+                if block_num < 6000000 and not isinstance(op_json, list):
+                    op_json = ['reblog', op_json]  # legacy compat
+                cls._process_legacy(account, op_json, block_date, block_num)
             elif op['id'] == 'community':
-                if block_num > START_BLOCK:
-                    process_json_community_op(account, op_json, block_date)
+                if block_num > Community.start_block:
+                    process_json_community_op(account, op_json, block_date, block_num)
             elif op['id'] == 'notify':
                 cls._process_notify(account, op_json, block_date)
+            OPSM.op_stats(opName, OPSM.stop(start))
 
     @classmethod
     def _process_notify(cls, account, op_json, block_date):
@@ -77,7 +81,7 @@ class CustomOp:
             log.warning("notify op fail: %s in %s", e, op_json)
 
     @classmethod
-    def _process_legacy(cls, account, op_json, block_date):
+    def _process_legacy(cls, account, op_json, block_date, block_num):
         """Handle legacy 'follow' plugin ops (follow/mute/clear, reblog)
 
         follow {follower: {type: 'account'},
@@ -99,50 +103,6 @@ class CustomOp:
 
         cmd, op_json = op_json  # ['follow', {data...}]
         if cmd == 'follow':
-            Follow.follow_op(account, op_json, block_date)
+            Follow.follow_op(account, op_json, block_date, block_num)
         elif cmd == 'reblog':
-            cls.reblog(account, op_json, block_date)
-
-    @classmethod
-    def reblog(cls, account, op_json, block_date):
-        """Handle legacy 'reblog' op"""
-        if ('account' not in op_json
-                or 'author' not in op_json
-                or 'permlink' not in op_json):
-            return
-        blogger = op_json['account']
-        author = op_json['author']
-        permlink = op_json['permlink']
-
-        if blogger != account:
-            return  # impersonation
-        if not all(map(Accounts.exists, [author, blogger])):
-            return
-
-        post_id, depth = Posts.get_id_and_depth(author, permlink)
-
-        if depth > 0:
-            return  # prevent comment reblogs
-
-        if not post_id:
-            log.debug("reblog: post not found: %s/%s", author, permlink)
-            return
-
-        author_id = Accounts.get_id(author)
-        blogger_id = Accounts.get_id(blogger)
-
-        if 'delete' in op_json and op_json['delete'] == 'delete':
-            DB.query("DELETE FROM hive_reblogs WHERE account = :a AND "
-                     "post_id = :pid LIMIT 1", a=blogger, pid=post_id)
-            if not DbState.is_initial_sync():
-                FeedCache.delete(post_id, blogger_id)
-
-        else:
-            sql = ("INSERT INTO hive_reblogs (account, post_id, created_at) "
-                   "VALUES (:a, :pid, :date) ON CONFLICT (account, post_id) DO NOTHING")
-            DB.query(sql, a=blogger, pid=post_id, date=block_date)
-            if not DbState.is_initial_sync():
-                FeedCache.insert(post_id, blogger_id, block_date)
-                Notify('reblog', src_id=blogger_id, dst_id=author_id,
-                       post_id=post_id, when=block_date,
-                       score=Accounts.default_score(blogger)).write()
+            Reblog.reblog_op(account, op_json, block_date, block_num)
diff --git a/hive/indexer/db_adapter_holder.py b/hive/indexer/db_adapter_holder.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7d53f9fbf92ca5a96ac66f4ebabd0bb0010bf0b
--- /dev/null
+++ b/hive/indexer/db_adapter_holder.py
@@ -0,0 +1,25 @@
+import logging
+log = logging.getLogger(__name__)
+
+class DbAdapterHolder(object):
+    db = None
+
+    _inside_tx = False
+
+    @classmethod
+    def setup_own_db_access(cls, sharedDb):
+        cls.db = sharedDb.clone()
+
+    @classmethod
+    def tx_active(cls):
+        return cls._inside_tx
+
+    @classmethod
+    def beginTx(cls):
+        cls.db.query("START TRANSACTION")
+        cls._inside_tx = True
+
+    @classmethod
+    def commitTx(cls):
+        cls.db.query("COMMIT")
+        cls._inside_tx = False
diff --git a/hive/indexer/feed_cache.py b/hive/indexer/feed_cache.py
deleted file mode 100644
index e826569c90861d6508dfe25b1c0304f924ed44dd..0000000000000000000000000000000000000000
--- a/hive/indexer/feed_cache.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Maintains feed cache (blogs + reblogs)"""
-
-import logging
-import time
-from hive.db.adapter import Db
-from hive.db.db_state import DbState
-
-log = logging.getLogger(__name__)
-
-DB = Db.instance()
-
-class FeedCache:
-    """Maintains `hive_feed_cache`, which merges posts and reports.
-
-    The feed cache allows for efficient querying of posts + reblogs,
-    savings us from expensive queries. Effectively a materialized view.
-    """
-
-    @classmethod
-    def insert(cls, post_id, account_id, created_at):
-        """Inserts a [re-]post by an account into feed."""
-        assert not DbState.is_initial_sync(), 'writing to feed cache in sync'
-        sql = """INSERT INTO hive_feed_cache (account_id, post_id, created_at)
-                      VALUES (:account_id, :id, :created_at)
-                 ON CONFLICT (account_id, post_id) DO NOTHING"""
-        DB.query(sql, account_id=account_id, id=post_id, created_at=created_at)
-
-    @classmethod
-    def delete(cls, post_id, account_id=None):
-        """Remove a post from feed cache.
-
-        If `account_id` is specified, we remove a single entry (e.g. a
-        singular un-reblog). Otherwise, we remove all instances of the
-        post (e.g. a post was deleted; its entry and all reblogs need
-        to be removed.
-        """
-        assert not DbState.is_initial_sync(), 'writing to feed cache in sync'
-        sql = "DELETE FROM hive_feed_cache WHERE post_id = :id"
-        if account_id:
-            sql = sql + " AND account_id = :account_id"
-        DB.query(sql, account_id=account_id, id=post_id)
-
-    @classmethod
-    def rebuild(cls, truncate=True):
-        """Rebuilds the feed cache upon completion of initial sync."""
-
-        log.info("[HIVE] Rebuilding feed cache, this will take a few minutes.")
-        DB.query("START TRANSACTION")
-        if truncate:
-            DB.query("TRUNCATE TABLE hive_feed_cache")
-
-        lap_0 = time.perf_counter()
-        DB.query("""
-            INSERT INTO hive_feed_cache (account_id, post_id, created_at)
-                 SELECT hive_accounts.id, hive_posts.id, hive_posts.created_at
-                   FROM hive_posts
-                   JOIN hive_accounts ON hive_posts.author = hive_accounts.name
-                  WHERE depth = 0 AND is_deleted = '0'
-            ON CONFLICT DO NOTHING
-        """)
-        lap_1 = time.perf_counter()
-        DB.query("""
-            INSERT INTO hive_feed_cache (account_id, post_id, created_at)
-                 SELECT hive_accounts.id, post_id, hive_reblogs.created_at
-                   FROM hive_reblogs
-                   JOIN hive_accounts ON hive_reblogs.account = hive_accounts.name
-            ON CONFLICT DO NOTHING
-        """)
-        lap_2 = time.perf_counter()
-        DB.query("COMMIT")
-
-        log.info("[HIVE] Rebuilt hive feed cache in %ds (%d+%d)",
-                 (lap_2 - lap_0), (lap_1 - lap_0), (lap_2 - lap_1))
diff --git a/hive/indexer/follow.py b/hive/indexer/follow.py
index 632d75ba8636f7b094fd9ab940e92fbcd8126ea6..dfbe742584a7961aae2750b4037a4d9dd5a84461 100644
--- a/hive/indexer/follow.py
+++ b/hive/indexer/follow.py
@@ -1,212 +1,434 @@
 """Handles follow operations."""
 
 import logging
-from time import perf_counter as perf
 
 from funcy.seqs import first
 from hive.db.adapter import Db
-from hive.db.db_state import DbState
+from hive.utils.misc import chunks
 from hive.indexer.accounts import Accounts
-from hive.indexer.notify import Notify
+
+from hive.indexer.db_adapter_holder import DbAdapterHolder
+from hive.utils.normalize import escape_characters
+
 
 log = logging.getLogger(__name__)
 
+
 DB = Db.instance()
 
-FOLLOWERS = 'followers'
-FOLLOWING = 'following'
+class Follow(DbAdapterHolder):
+    """Handles processing of incoming follow ups and flushing to db."""
 
-def _flip_dict(dict_to_flip):
-    """Swap keys/values. Returned dict values are array of keys."""
-    flipped = {}
-    for key, value in dict_to_flip.items():
-        if value in flipped:
-            flipped[value].append(key)
-        else:
-            flipped[value] = [key]
-    return flipped
+    follow_items_to_flush = dict()
 
-class Follow:
-    """Handles processing of incoming follow ups and flushing to db."""
+    # [DK] this dictionary will hold data for table update operations
+    # since for each status update query is different we will group
+    # follower id per status:
+    # {
+    #   state_number_1 : [follower_id_1, follower_id_2, ...]
+    #   state_number_2 : [follower_id_3, follower_id_4, ...]
+    # }
+    # we will use this dict later to perform batch updates
+    follow_update_items_to_flush = dict()
+
+    idx = 0
 
     @classmethod
-    def follow_op(cls, account, op_json, date):
+    def follow_op(cls, account, op_json, date, block_num):
         """Process an incoming follow op."""
         op = cls._validated_op(account, op_json, date)
         if not op:
             return
+        op['block_num'] = block_num
 
-        # perform delta check
-        new_state = op['state']
-        old_state = cls._get_follow_db_state(op['flr'], op['flg'])
-        if new_state == (old_state or 0):
-            return
-        sql = ''
-
-        # insert or update state
-        if old_state is None:
-            sql = """INSERT INTO hive_follows (follower, following,
-                     created_at, state, blacklisted, follow_blacklists) VALUES (:flr, :flg, :at, :state, %s)"""
-            if new_state == 3:
-                sql = sql % """ true, false """
-            elif new_state == 4:
-                sql = sql % """ false, true """
+        state = op['state']
+
+        for following in op['flg']:
+            k = '{}/{}'.format(op['flr'], following)
+            if k in cls.follow_items_to_flush:
+                cls.follow_items_to_flush[k]['state'] = state
+                cls.follow_items_to_flush[k]['idx'] = cls.idx
+                cls.follow_items_to_flush[k]['block_num'] = block_num
             else:
-                sql = sql % """false, false"""
-        else:
-            if new_state < 3:
-                sql = """UPDATE hive_follows SET state = :state
-                         WHERE follower = :flr AND following = :flg"""
-            elif new_state == 3:
-                sql = """UPDATE hive_follows SET blacklisted = true
-                          WHERE follower = :flr AND following = :flg"""
-            elif new_state == 4:
-                sql = """UPDATE hive_follows SET follow_blacklists = true
-                         WHERE follower = :flr AND following = :flg"""
-            elif new_state == 5:
-                sql = """UPDATE hive_follows SET blacklisted = false
-                         WHERE follower = :flr AND following = :flg"""
-            elif new_state == 6:
-                sql = """UPDATE hive_follows SET follow_blacklists = false
-                         WHERE follower = :flr AND following = :flg"""
-        DB.query(sql, **op)
-
-        # track count deltas
-        if not DbState.is_initial_sync():
-            if new_state == 1:
-                Follow.follow(op['flr'], op['flg'])
-                if old_state is None:
-                    score = Accounts.default_score(op_json['follower'])
-                    Notify('follow', src_id=op['flr'], dst_id=op['flg'],
-                           when=op['at'], score=score).write()
-            if old_state == 1:
-                Follow.unfollow(op['flr'], op['flg'])
+                cls.follow_items_to_flush[k] = dict(
+                    idx=cls.idx,
+                    flr=op['flr'],
+                    flg=following,
+                    state=state,
+                    at=op['at'],
+                    block_num=block_num)
+            cls.idx += 1
+
+        if state > 8:
+            # check if given state exists in dict
+            # if exists add follower to a list for a given state
+            # if not exists create list and set that list for given state
+            if state in cls.follow_update_items_to_flush:
+                cls.follow_update_items_to_flush[state].append((op['flr'], block_num))
+            else:
+                cls.follow_update_items_to_flush[state] = [(op['flr'], block_num)]
 
     @classmethod
     def _validated_op(cls, account, op, date):
         """Validate and normalize the operation."""
-        if(not 'what' in op
+        if (not 'what' in op
            or not isinstance(op['what'], list)
            or not 'follower' in op
            or not 'following' in op):
             return None
 
-        what = first(op['what']) or ''
-        if not isinstance(what, str):
-            return None
-        defs = {'': 0, 'blog': 1, 'ignore': 2, 'blacklist': 3, 'follow_blacklist': 4, 'unblacklist': 5, 'unfollow_blacklist': 6}
-        if what not in defs:
-            return None
-
-        if(op['follower'] == op['following']        # can't follow self
-           or op['follower'] != account             # impersonation
-           or not Accounts.exists(op['following'])  # invalid account
-           or not Accounts.exists(op['follower'])): # invalid account
+                # follower/following is empty
+        if not op['follower'] or not op['following']:
             return None
 
-        return dict(flr=Accounts.get_id(op['follower']),
-                    flg=Accounts.get_id(op['following']),
-                    state=defs[what],
-                    at=date)
-
-    @classmethod
-    def _get_follow_db_state(cls, follower, following):
-        """Retrieve current follow state of an account pair."""
-        sql = """SELECT state FROM hive_follows
-                  WHERE follower = :follower
-                    AND following = :following"""
-        return DB.query_one(sql, follower=follower, following=following)
-
+        op['following'] = op['following'] if isinstance(op['following'], list) else [op['following']]
 
-    # -- stat tracking --
+        # mimic original behaviour
+        # if following name does not exist do not process it: basically equal to drop op for single following entry
 
-    _delta = {FOLLOWERS: {}, FOLLOWING: {}}
+        op['following'] = [op for op in op['following'] if Accounts.exists(op)]
 
-    @classmethod
-    def follow(cls, follower, following):
-        """Applies follow count change the next flush."""
-        cls._apply_delta(follower, FOLLOWING, 1)
-        cls._apply_delta(following, FOLLOWERS, 1)
-
-    @classmethod
-    def unfollow(cls, follower, following):
-        """Applies follow count change the next flush."""
-        cls._apply_delta(follower, FOLLOWING, -1)
-        cls._apply_delta(following, FOLLOWERS, -1)
-
-    @classmethod
-    def _apply_delta(cls, account, role, direction):
-        """Modify an account's follow delta in specified direction."""
-        if not account in cls._delta[role]:
-            cls._delta[role][account] = 0
-        cls._delta[role][account] += direction
-
-    @classmethod
-    def flush(cls, trx=True):
-        """Flushes pending follow count deltas."""
-
-        updated = 0
-        sqls = []
-        for col, deltas in cls._delta.items():
-            for delta, names in _flip_dict(deltas).items():
-                updated += len(names)
-                sql = "UPDATE hive_accounts SET %s = %s + :mag WHERE id IN :ids"
-                sqls.append((sql % (col, col), dict(mag=delta, ids=tuple(names))))
-
-        if not updated:
-            return 0
+        # if follower name does not exist drop op
+        if not Accounts.exists(op['follower']):
+            return None
 
-        start = perf()
-        DB.batch_queries(sqls, trx=trx)
-        if trx:
-            log.info("[SYNC] flushed %d follow deltas in %ds",
-                     updated, perf() - start)
+        if op['follower'] in op['following'] or op['follower'] != account:
+            return None
 
-        cls._delta = {FOLLOWERS: {}, FOLLOWING: {}}
-        return updated
+        what = first(op['what']) or ''
+        if not isinstance(what, str):
+            return None
+        defs = {'': 0, 'blog': 1, 'ignore': 2, 'blacklist': 3, 'follow_blacklist': 4, 'unblacklist': 5, 'unfollow_blacklist': 6,
+                'follow_muted': 7, 'unfollow_muted': 8, 'reset_blacklist' : 9, 'reset_following_list': 10, 'reset_muted_list': 11,
+                'reset_follow_blacklist': 12, 'reset_follow_muted_list': 13, 'reset_all_lists': 14}
+        if what not in defs:
+            return None
 
-    @classmethod
-    def flush_recount(cls):
-        """Recounts follows/following counts for all queued accounts.
-
-        This is currently not used; this approach was shown to be too
-        expensive, but it's useful in case follow counts manage to get
-        out of sync.
-        """
-        ids = set([*cls._delta[FOLLOWERS].keys(),
-                   *cls._delta[FOLLOWING].keys()])
-        sql = """
-            UPDATE hive_accounts
-               SET followers = (SELECT COUNT(*) FROM hive_follows WHERE state = 1 AND following = hive_accounts.id),
-                   following = (SELECT COUNT(*) FROM hive_follows WHERE state = 1 AND follower  = hive_accounts.id)
-             WHERE id IN :ids
-        """
-        DB.query(sql, ids=tuple(ids))
+        return dict(flr=escape_characters(op['follower']),
+                    flg=[escape_characters(following) for following in op['following']],
+                    state=defs[what],
+                    at=date)
 
     @classmethod
-    def force_recount(cls):
-        """Recounts all follows after init sync."""
-        log.info("[SYNC] query follower counts")
-        sql = """
-            CREATE TEMPORARY TABLE following_counts AS (
-                  SELECT id account_id, COUNT(state) num
-                    FROM hive_accounts
-               LEFT JOIN hive_follows hf ON id = hf.follower AND state = 1
-                GROUP BY id);
-            CREATE TEMPORARY TABLE follower_counts AS (
-                  SELECT id account_id, COUNT(state) num
-                    FROM hive_accounts
-               LEFT JOIN hive_follows hf ON id = hf.following AND state = 1
-                GROUP BY id);
-        """
-        DB.query(sql)
-
-        log.info("[SYNC] update follower counts")
-        sql = """
-            UPDATE hive_accounts SET followers = num FROM follower_counts
-             WHERE id = account_id AND followers != num;
-
-            UPDATE hive_accounts SET following = num FROM following_counts
-             WHERE id = account_id AND following != num;
-        """
-        DB.query(sql)
+    def flush(cls):
+        n = 0
+        if cls.follow_items_to_flush:
+            sql_prefix = """
+                INSERT INTO hive_follows as hf (follower, following, created_at, state, blacklisted, follow_blacklists, follow_muted, block_num)
+                SELECT ds.follower_id, ds.following_id, ds.created_at, ds.state, ds.blacklisted, ds.follow_blacklists, ds.follow_muted, ds.block_num
+                FROM
+                (
+                    SELECT
+                        t.id,
+                        ha_flr.id as follower_id,
+                        ha_flg.id as following_id,
+                        t.created_at,
+                        t.state,
+                        t.blacklisted,
+                        t.follow_blacklists,
+                        t.follow_muted,
+                        t.block_num
+                    FROM
+                        (
+                            VALUES
+                            {}
+                        ) as T (id, follower, following, created_at, state, blacklisted, follow_blacklists, follow_muted, block_num)
+                    INNER JOIN hive_accounts ha_flr ON ha_flr.name = T.follower
+                    INNER JOIN hive_accounts ha_flg ON ha_flg.name = T.following
+                    ORDER BY T.block_num ASC, T.id ASC
+                ) AS ds(id, follower_id, following_id, created_at, state, blacklisted, follow_blacklists, follow_muted, block_num)
+                ORDER BY ds.block_num ASC, ds.id ASC
+            """
+            sql_postfix = """
+                ON CONFLICT ON CONSTRAINT hive_follows_ux1 DO UPDATE
+                    SET
+                        state = (CASE EXCLUDED.state
+                                    WHEN 0 THEN 0 -- 0 blocks possibility to update state
+                                    ELSE EXCLUDED.state
+                                END),
+                        blacklisted = (CASE EXCLUDED.state
+                                        WHEN 3 THEN TRUE
+                                        WHEN 5 THEN FALSE
+                                        ELSE EXCLUDED.blacklisted
+                                    END),
+                        follow_blacklists = (CASE EXCLUDED.state
+                                                WHEN 4 THEN TRUE
+                                                WHEN 6 THEN FALSE
+                                                ELSE EXCLUDED.follow_blacklists
+                                            END),
+                        follow_muted = (CASE EXCLUDED.state
+                                           WHEN 7 THEN TRUE
+                                           WHEN 8 THEN FALSE
+                                           ELSE EXCLUDED.follow_muted
+                                        END),
+                        block_num = EXCLUDED.block_num
+                WHERE hf.following = EXCLUDED.following AND hf.follower = EXCLUDED.follower
+                """
+            values = []
+            limit = 1000
+            count = 0
+
+            cls.beginTx()
+            for _, follow_item in cls.follow_items_to_flush.items():
+                if count < limit:
+                    values.append("({}, {}, {}, '{}'::timestamp, {}, {}, {}, {}, {})".format(follow_item['idx'],
+                                                                          follow_item['flr'],
+                                                                          follow_item['flg'],
+                                                                          follow_item['at'],
+                                                                          follow_item['state'],
+                                                                          follow_item['state'] == 3,
+                                                                          follow_item['state'] == 4,
+                                                                          follow_item['state'] == 7,
+                                                                          follow_item['block_num']))
+                    count = count + 1
+                else:
+                    query = str(sql_prefix).format(",".join(values))
+                    query += sql_postfix
+                    cls.db.query(query)
+                    values.clear()
+                    values.append("({}, {}, {}, '{}'::timestamp, {}, {}, {}, {}, {})".format(follow_item['idx'],
+                                                                          follow_item['flr'],
+                                                                          follow_item['flg'],
+                                                                          follow_item['at'],
+                                                                          follow_item['state'],
+                                                                          follow_item['state'] == 3,
+                                                                          follow_item['state'] == 4,
+                                                                          follow_item['state'] == 7,
+                                                                          follow_item['block_num']))
+                    count = 1
+                n += 1
+
+            if len(values) > 0:
+                query = str(sql_prefix).format(",".join(values))
+                query += sql_postfix
+                cls.db.query(query)
+            cls.commitTx()
+            cls.follow_items_to_flush.clear()
+
+            # process follow_update_items_to_flush dictionary
+            # .items() will return list of tuples: [(state_number, [list of follower ids]), ...]
+            # for each state get list of follower_id and make update query
+            # for that list, if list size is greater than 1000 it will be divided
+            # to chunks of 1000
+            #
+            for state, update_flush_items in cls.follow_update_items_to_flush.items():
+                for chunk in chunks(update_flush_items, 1000):
+                    sql = None
+                    query_values = ','.join(["({}, {})".format(account[0], account[1]) for account in chunk])
+                    # [DK] probaly not a bad idea to move that logic to SQL function
+                    if state == 9:
+                        #reset blacklists for follower
+                        sql = """
+                            UPDATE
+                                hive_follows hf
+                            SET
+                                blacklisted = false,
+                                block_num = ds.block_num
+                            FROM
+                            (
+                                SELECT
+                                    ha.id as follower_id,
+                                    block_num
+                                FROM
+                                    (
+                                        VALUES
+                                        {}
+                                    ) AS T(name, block_num)
+                                INNER JOIN hive_accounts ha ON ha.name = T.name
+                            ) AS ds (follower_id, block_num)
+                            WHERE
+                                hf.follower = ds.follower_id
+                        """.format(query_values)
+                    elif state == 10:
+                        #reset following list for follower
+                        sql = """
+                            UPDATE
+                                hive_follows hf
+                            SET
+                                state = 0,
+                                block_num = ds.block_num
+                            FROM
+                            (
+                                SELECT
+                                    ha.id as follower_id,
+                                    block_num
+                                FROM
+                                    (
+                                        VALUES
+                                        {}
+                                    ) AS T(name, block_num)
+                                INNER JOIN hive_accounts ha ON ha.name = T.name
+                            ) AS ds (follower_id, block_num)
+                            WHERE
+                                hf.follower = ds.follower_id
+                                AND hf.state = 1
+                        """.format(query_values)
+                    elif state == 11:
+                        #reset all muted list for follower
+                        sql = """
+                            UPDATE
+                                hive_follows hf
+                            SET
+                                state = 0,
+                                block_num = ds.block_num
+                            FROM
+                            (
+                                SELECT
+                                    ha.id as follower_id,
+                                    block_num
+                                FROM
+                                    (
+                                        VALUES
+                                        {}
+                                    ) AS T(name, block_num)
+                                INNER JOIN hive_accounts ha ON ha.name = T.name
+                            ) AS ds (follower_id, block_num)
+                            WHERE
+                                hf.follower = ds.follower_id
+                                AND hf.state = 2
+                        """.format(query_values)
+                    elif state == 12:
+                        #reset followed blacklists
+                        sql = """
+                            UPDATE
+                                hive_follows hf
+                            SET
+                                follow_blacklists = false,
+                                block_num = ds.block_num
+                            FROM
+                            (
+                                SELECT
+                                    ha.id as follower_id,
+                                    block_num
+                                FROM
+                                    (
+                                        VALUES
+                                        {0}
+                                    ) AS T(name, block_num)
+                                INNER JOIN hive_accounts ha ON ha.name = T.name
+                            ) AS ds (follower_id, block_num)
+                            WHERE
+                                hf.follower = ds.follower_id;
+
+                            UPDATE
+                                hive_follows hf
+                            SET
+                                follow_blacklists = true,
+                                block_num = ds.block_num
+                            FROM
+                            (
+                                SELECT
+                                    ha.id as follower_id,
+                                    block_num
+                                FROM
+                                    (
+                                        VALUES
+                                        {0}
+                                    ) AS T(name, block_num)
+                                INNER JOIN hive_accounts ha ON ha.name = T.name
+                            ) AS ds (follower_id, block_num)
+                            WHERE
+                                hf.follower = ds.follower_id
+                                AND following = (SELECT id FROM hive_accounts WHERE name = 'null')
+                        """.format(query_values)
+
+                    elif state == 13:
+                        #reset followed mute lists
+                        sql = """
+                            UPDATE
+                                hive_follows hf
+                            SET
+                                follow_muted = false,
+                                block_num = ds.block_num
+                            FROM
+                            (
+                                SELECT
+                                    ha.id as follower_id,
+                                    block_num
+                                FROM
+                                    (
+                                        VALUES
+                                        {0}
+                                    ) AS T(name, block_num)
+                                INNER JOIN hive_accounts ha ON ha.name = T.name
+                            ) AS ds (follower_id, block_num)
+                            WHERE
+                                hf.follower = ds.follower_id;
+
+                            UPDATE
+                                hive_follows hf
+                            SET
+                                follow_muted = true,
+                                block_num = ds.block_num
+                            FROM
+                            (
+                                SELECT
+                                    ha.id as follower_id,
+                                    block_num
+                                FROM
+                                    (
+                                        VALUES
+                                        {0}
+                                    ) AS T(name, block_num)
+                                INNER JOIN hive_accounts ha ON ha.name = T.name
+                            ) AS ds (follower_id, block_num)
+                            WHERE
+                                hf.follower = ds.follower_id
+                                AND following = (SELECT id FROM hive_accounts WHERE name = 'null')
+                        """.format(query_values)
+                    elif state == 14:
+                        #reset all lists
+                        sql = """
+                            UPDATE
+                                hive_follows hf
+                            SET
+                                blacklisted = false,
+                                follow_blacklists = false,
+                                follow_muted = false,
+                                state = 0,
+                                block_num = ds.block_num
+                            FROM
+                            (
+                                SELECT
+                                    ha.id as follower_id,
+                                    block_num
+                                FROM
+                                    (
+                                        VALUES
+                                        {0}
+                                    ) AS T(name, block_num)
+                                INNER JOIN hive_accounts ha ON ha.name = T.name
+                            ) AS ds (follower_id, block_num)
+                            WHERE
+                                hf.follower = ds.follower_id;
+
+                            UPDATE
+                                hive_follows hf
+                            SET
+                                follow_blacklists = true,
+                                follow_muted = true,
+                                block_num = ds.block_num
+                            FROM
+                            (
+                                SELECT
+                                    ha.id as follower_id,
+                                    block_num
+                                FROM
+                                    (
+                                        VALUES
+                                        {0}
+                                    ) AS T(name, block_num)
+                                INNER JOIN hive_accounts ha ON ha.name = T.name
+                            ) AS ds (follower_id, block_num)
+                            WHERE
+                                hf.follower = ds.follower_id
+                                AND following = (SELECT id FROM hive_accounts WHERE name = 'null')
+                        """.format(query_values)
+                    if sql is not None:
+                        cls.beginTx()
+                        DB.query(sql)
+                        cls.commitTx()
+                    n += len(chunk)
+            cls.follow_update_items_to_flush.clear()
+            cls.idx = 0
+        return n
diff --git a/hive/indexer/jobs.py b/hive/indexer/jobs.py
deleted file mode 100644
index c64ff37687834b238500972fee6844782f6519ab..0000000000000000000000000000000000000000
--- a/hive/indexer/jobs.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""Hive indexer: various utility tasks"""
-import logging
-from hive.indexer.cached_post import CachedPost
-from hive.indexer.posts import Posts
-
-log = logging.getLogger(__name__)
-
-def _last_post_id(db):
-    sql = "SELECT id FROM hive_posts ORDER BY id DESC LIMIT 1"
-    return db.query_one(sql) or 0
-
-def _last_cached_post_id(db):
-    sql = "SELECT post_id FROM hive_posts_cache ORDER BY post_id DESC LIMIT 1"
-    return db.query_one(sql) or 0
-
-def audit_cache_missing(db, steem):
-    """Scan all posts to check for missing cache entries."""
-    last_id = _last_cached_post_id(db)
-    step = 1000000
-    steps = int(last_id / step) + 1
-    log.info("last post id: %d, batches: %d", last_id, steps)
-
-    sql = """
-        SELECT hp.id, hp.author, hp.permlink
-          FROM hive_posts hp
-     LEFT JOIN hive_posts_cache hpc
-            ON hp.id = hpc.post_id
-         WHERE hp.is_deleted = False
-           AND hp.id BETWEEN :lbound AND :ubound
-           AND hpc.post_id IS NULL"""
-
-    for idx in range(steps):
-        lbound = (idx * step) + 1
-        ubound = (idx + 1) * step
-
-        missing = db.query_all(sql, lbound=lbound, ubound=ubound)
-        log.info("%d <= id <= %d: %d missing", lbound, ubound, len(missing))
-        for row in missing:
-            CachedPost.insert(row['author'], row['permlink'], row['id'])
-
-        CachedPost.flush(steem, trx=True)
-
-def audit_cache_deleted(db):
-    """Scan all posts to check for extraneous cache entries."""
-    last_id = _last_cached_post_id(db)
-    step = 1000000
-    steps = int(last_id / step) + 1
-    log.info("audit_cache_deleted -- last id: %d, batches: %d", last_id, steps)
-
-    sql = """
-        SELECT hp.id, hp.author, hp.permlink
-          FROM hive_posts hp
-          JOIN hive_posts_cache hpc
-            ON hp.id = hpc.post_id
-         WHERE hp.id BETWEEN :lbound AND :ubound
-           AND hp.is_deleted = True"""
-
-    for idx in range(steps):
-        lbound = (idx * step) + 1
-        ubound = (idx + 1) * step
-
-        extra = db.query_all(sql, lbound=lbound, ubound=ubound)
-        log.info("%d <= id <= %d: %d to delete", lbound, ubound, len(extra))
-        for row in extra:
-            CachedPost.delete(row['id'], row['author'], row['permlink'])
-
-def audit_cache_undelete(db, steem):
-    """Scan all posts to check for posts erroneously deleted."""
-    last_id = _last_post_id(db)
-    step = 1000000
-    steps = int(last_id / step) + 1
-    log.info("last post id: %d, batches: %d", last_id, steps)
-
-    sql = """
-        SELECT id, author, permlink
-          FROM hive_posts
-         WHERE is_deleted = True
-           AND id BETWEEN :lbound AND :ubound
-    """
-
-    for idx in range(steps):
-        lbound = (idx * step) + 1
-        ubound = (idx + 1) * step
-
-        rows = db.query_all(sql, lbound=lbound, ubound=ubound)
-        log.info("%d <= id <= %d: %d to check", lbound, ubound, len(rows))
-
-        if not rows:
-            continue
-
-        post_args = [(row['author'], row['permlink']) for row in rows]
-        posts = steem.get_content_batch(post_args)
-
-        recovered = 0
-        for row, post in zip(rows, posts):
-            if post['author']:
-                recovered += 1
-                Posts.undelete(post, post['created'], row['id'])
-
-        log.info("%d <= id <= %d: %d recovered", lbound, ubound, recovered)
-        if recovered:
-            CachedPost.flush(steem, trx=True)
diff --git a/hive/indexer/mock_block_provider.py b/hive/indexer/mock_block_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..429d7a29653e8df979fab611c7c81f6b05cea686
--- /dev/null
+++ b/hive/indexer/mock_block_provider.py
@@ -0,0 +1,99 @@
+""" Data provider for test operations """
+import datetime
+import dateutil.parser
+import logging
+
+from hive.indexer.mock_data_provider import MockDataProvider, MockDataProviderException
+
+log = logging.getLogger(__name__)
+
+class MockBlockProvider(MockDataProvider):
+    """ Data provider for test ops """
+
+    min_block = 0
+    max_block = 0
+
+    last_real_block_num = 1
+    last_real_block_time = dateutil.parser.isoparse("2016-03-24T16:05:00")
+
+    @classmethod
+    def set_last_real_block_num_date(cls, block_num, block_date):
+        cls.last_real_block_num = int(block_num)
+        cls.last_real_block_time = dateutil.parser.isoparse(block_date)
+
+    @classmethod
+    def add_block_data_from_file(cls, file_name):
+        from json import load
+        data = {}
+        with open(file_name, "r") as src:
+            data = load(src)
+        for block_num, block_content in data.items():
+            cls.add_block_data(block_num, block_content)
+
+    @classmethod
+    def add_block_data(cls, _block_num, block_content):
+        block_num = int(_block_num)
+
+        if block_num > cls.max_block:
+            cls.max_block = block_num
+        if block_num < cls.min_block:
+            cls.min_block = block_num
+
+        #log.info("Loading mock data for block {} with timestamp: {}".format(block_num, block_content['timestamp']))
+
+        if block_num in cls.block_data:
+            assert 'transactions' in cls.block_data[block_num]
+            assert 'transactions' in block_content
+            cls.block_data[block_num]['transactions'] = cls.block_data[block_num]['transactions'] + block_content['transactions']
+        else:
+            cls.block_data[block_num] = dict(block_content)
+
+    @classmethod
+    def get_block_data(cls, block_num, make_on_empty=False):
+        if len(cls.block_data) == 0:
+            return None
+
+        data = cls.block_data.get(block_num, None)
+
+        #if data is not None:
+            #log.info("Block {} has timestamp: {}".format(block_num, data['timestamp']))
+
+        if make_on_empty and data is None:
+            data = cls.make_empty_block(block_num)
+
+        return data
+
+    @classmethod
+    def get_max_block_number(cls):
+        return cls.max_block
+
+    @classmethod
+    def make_block_id(cls, block_num):
+        return "{:08x}00000000000000000000000000000000".format(block_num)
+
+    @classmethod
+    def make_block_timestamp(cls, block_num):
+        block_delta = block_num - cls.last_real_block_num
+        time_delta = datetime.timedelta(days=0, seconds=block_delta*3, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0)
+        ret_time = cls.last_real_block_time + time_delta
+        return ret_time.replace(microsecond=0).isoformat()
+
+    @classmethod
+    def make_empty_block(cls, block_num, witness="initminer"):
+        fake_block = dict({
+            "previous": cls.make_block_id(block_num - 1),
+            "timestamp": cls.make_block_timestamp(block_num),
+            "witness": witness,
+            "transaction_merkle_root": "0000000000000000000000000000000000000000",
+            "extensions": [],
+            "witness_signature": "",
+            "transactions": [],
+            "block_id": cls.make_block_id(block_num),
+            "signing_key": "",
+            "transaction_ids": []
+            })
+        # supply enough blocks to fill block queue with empty blocks only
+        # throw exception if there is no more data to serve
+        if cls.min_block < block_num < cls.max_block + 3:
+            return fake_block
+        return None
diff --git a/hive/indexer/mock_data_provider.py b/hive/indexer/mock_data_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddf768f946867d2dde70110b37453c31e61f6b7d
--- /dev/null
+++ b/hive/indexer/mock_data_provider.py
@@ -0,0 +1,40 @@
+""" Data provider for test operations """
+import os
+import logging
+
+from json import dumps
+
+log = logging.getLogger(__name__)
+
+class MockDataProviderException(Exception):
+    pass
+
+class MockDataProvider():
+    """ Data provider for test operations """
+    block_data = {}
+
+    @classmethod
+    def print_data(cls):
+        print(dumps(cls.block_data, indent=4, sort_keys=True))
+
+    @classmethod
+    def add_block_data_from_directory(cls, dir_name):
+        from fnmatch import fnmatch
+        pattern = "*.json"
+        for path, _, files in os.walk(dir_name):
+            for name in files:
+                if fnmatch(name, pattern):
+                    cls.add_block_data_from_file(os.path.join(path, name))
+
+    @classmethod
+    def add_block_data_from_file(cls, file_name):
+        raise NotImplementedError("add_block_data_from_file is not implemented")
+
+    @classmethod
+    def load_block_data(cls, data_path):
+        if os.path.isdir(data_path):
+            log.warning("Loading mock ops data from directory: {}".format(data_path))
+            cls.add_block_data_from_directory(data_path)
+        else:
+            log.warning("Loading mock ops data from file: {}".format(data_path))
+            cls.add_block_data_from_file(data_path)
diff --git a/hive/indexer/mock_vops_provider.py b/hive/indexer/mock_vops_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..133f62985752ebbe30f9436226dadf5fb2eb3cfb
--- /dev/null
+++ b/hive/indexer/mock_vops_provider.py
@@ -0,0 +1,75 @@
+""" Data provider for test vops """
+from hive.indexer.mock_data_provider import MockDataProvider
+
+class MockVopsProvider(MockDataProvider):
+    """ Data provider for test vops """
+    block_data = {
+        'ops' : {},
+        'ops_by_block' : {}
+    }
+
+    @classmethod
+    def add_block_data_from_file(cls, file_name):
+        from json import load
+        data = {}
+        with open(file_name, "r") as src:
+            data = load(src)
+        cls.add_block_data(data)
+
+    @classmethod
+    def add_block_data(cls, data):
+        if 'ops' in data:
+            for op in data['ops']:
+                if 'ops' in cls.block_data and op['block'] in cls.block_data['ops']:
+                    cls.block_data['ops'][op['block']].append(op)
+                else:
+                    cls.block_data['ops'][op['block']] = [op]
+
+        if 'ops_by_block' in data:
+            for ops in data['ops_by_block']:
+                if 'ops_by_block' in cls.block_data and ops['block'] in cls.block_data['ops_by_block']:
+                    cls.block_data['ops_by_block'][ops['block']].extend(ops['ops'])
+                else:
+                    cls.block_data['ops_by_block'][ops['block']] = ops
+
+    @classmethod
+    def get_block_data(cls, block_num):
+        ret = {}
+        if 'ops' in cls.block_data and block_num in cls.block_data['ops']:
+            data = cls.block_data['ops'][block_num]
+            if data:
+                ret['timestamp'] = data[0]['timestamp']
+                if 'ops' in ret:
+                    ret['ops'].extend([op['op'] for op in data])
+                else:
+                    ret['ops'] = [op['op'] for op in data]
+
+        if 'ops_by_block' in cls.block_data and block_num in cls.block_data['ops_by_block']:
+            data = cls.block_data['ops_by_block'][block_num]
+            if data:
+                ret['timestamp'] = data['timestamp']
+                if 'ops_by_block' in ret:
+                    ret['ops_by_block'].extend([ops['op'] for ops in data['ops']])
+                else:
+                    ret['ops_by_block'] = [ops['op'] for ops in data['ops']]
+        return ret
+
+    @classmethod
+    def add_mock_vops(cls, ret, from_block, end_block):
+        # dont do anyting when there is no block data
+        if not cls.block_data['ops_by_block'] and not cls.block_data['ops']:
+            return
+        for block_num in range(from_block, end_block):
+            mock_vops = cls.get_block_data(block_num)
+            if mock_vops:
+                if block_num in ret:
+                    if 'ops_by_block' in mock_vops:
+                        ret[block_num]['ops'].extend(mock_vops['ops_by_block'])
+                    if 'ops' in mock_vops:
+                        ret[block_num]['ops'].extend(mock_vops['ops'])
+                else:
+                    if 'ops' in mock_vops:
+                        ret[block_num] = {'timestamp':mock_vops['timestamp'], "ops" : mock_vops['ops']}
+                    if 'ops_by_block' in mock_vops:
+                        ret[block_num] = {'timestamp':mock_vops['timestamp'], "ops" : mock_vops['ops_by_block']}
+
diff --git a/hive/indexer/notify.py b/hive/indexer/notify.py
index add2e3ec225de9905561af94d962c9e7f727fb88..7e2cfb494bdbf0a506253f954b03383790f39a59 100644
--- a/hive/indexer/notify.py
+++ b/hive/indexer/notify.py
@@ -3,6 +3,8 @@
 from enum import IntEnum
 import logging
 from hive.db.adapter import Db
+from hive.indexer.db_adapter_holder import DbAdapterHolder
+from hive.utils.normalize import escape_characters
 #pylint: disable=too-many-lines,line-too-long
 
 log = logging.getLogger(__name__)
@@ -42,12 +44,13 @@ class NotifyType(IntEnum):
     #power_down = 24
     #message = 25
 
-class Notify:
+class Notify(DbAdapterHolder):
     """Handles writing notifications/messages."""
     # pylint: disable=too-many-instance-attributes,too-many-arguments
     DEFAULT_SCORE = 35
+    _notifies = []
 
-    def __init__(self, type_id, when=None, src_id=None, dst_id=None, community_id=None,
+    def __init__(self, block_num, type_id, when=None, src_id=None, dst_id=None, community_id=None,
                  post_id=None, payload=None, score=None, **kwargs):
         """Create a notification."""
 
@@ -59,6 +62,7 @@ class Notify:
         else:
             raise Exception("unknown type %s" % repr(type_id))
 
+        self.block_num = block_num
         self.enum = enum
         self.score = score or self.DEFAULT_SCORE
         self.when = when
@@ -69,10 +73,10 @@ class Notify:
         self.payload = payload
         self._id = kwargs.get('id')
 
-    @classmethod
-    def from_dict(cls, row):
-        """Instantiate from db row."""
-        return Notify(**dict(row))
+        # for HF24 we started save notifications from block 44300000
+        # about 90 days before release day
+        if block_num > 44300000:
+            Notify._notifies.append( self )
 
     @classmethod
     def set_lastread(cls, account, date):
@@ -80,31 +84,53 @@ class Notify:
         sql = "UPDATE hive_accounts SET lastread_at = :date WHERE name = :name"
         DB.query(sql, date=date, name=account)
 
-    def to_dict(self):
+    def to_db_values(self):
         """Generate a db row."""
-        return dict(
-            type_id=self.enum.value,
-            score=self.score,
-            created_at=self.when,
-            src_id=self.src_id,
-            dst_id=self.dst_id,
-            post_id=self.post_id,
-            community_id=self.community_id,
-            payload=self.payload,
-            id=self._id)
-
-    def write(self):
-        """Store this notification."""
-        assert not self._id, 'notify has id %d' % self._id
-        ignore = ('reply', 'reply_comment', 'reblog', 'follow', 'mention', 'vote')
-        if self.enum.name not in ignore:
-            log.warning("[NOTIFY] %s - src %s dst %s pid %s%s cid %s (%d/100)",
-                        self.enum.name, self.src_id, self.dst_id, self.post_id,
-                        ' (%s)' % self.payload if self.payload else '',
-                        self.community_id, self.score)
-        sql = """INSERT INTO hive_notifs (type_id, score, created_at, src_id,
-                                          dst_id, post_id, community_id,
-                                          payload)
-                      VALUES (:type_id, :score, :created_at, :src_id, :dst_id,
-                              :post_id, :community_id, :payload)"""
-        DB.query(sql, **self.to_dict())
+        return "( {}, {}, {}, '{}'::timestamp, {}, {}, {}, {}, {} )".format(
+                  self.block_num
+                , self.enum.value
+                , self.score
+                , self.when if self.when else "NULL"
+                , self.src_id if self.src_id else "NULL"
+                , self.dst_id if self.dst_id else "NULL"
+                , self.post_id if self.post_id else "NULL"
+                , self.community_id if self.community_id else "NULL"
+                , escape_characters(str(self.payload)) if self.payload else "NULL")
+
+    @classmethod
+    def flush(cls):
+        """Store buffered notifs"""
+        def execute_query( sql, values ):
+            values_str = ','.join(values)
+            actual_query = sql.format(values_str)
+            cls.db.query(actual_query)
+            values.clear()
+
+        n = 0
+        if Notify._notifies:
+            cls.beginTx()
+
+            sql = """INSERT INTO hive_notifs (block_num, type_id, score, created_at, src_id,
+                                              dst_id, post_id, community_id,
+                                              payload)
+                          VALUES
+                          -- block_num, type_id, score, created_at, src_id, dst_id, post_id, community_id, payload
+                          {}"""
+
+            values = []
+            values_limit = 1000
+
+            for notify in Notify._notifies:
+                values.append( "{}".format( notify.to_db_values() ) )
+
+                if len(values) >= values_limit:
+                    execute_query(sql, values)
+
+            if len(values) > 0:
+                execute_query(sql, values)
+
+            n = len(Notify._notifies)
+            Notify._notifies.clear()
+            cls.commitTx()
+
+        return n
diff --git a/hive/indexer/payments.py b/hive/indexer/payments.py
index 08892070b8da7d939cf21f78df406d3330f75aa1..d68f743e3a16a8912178ae1ced7b82639ccde0e2 100644
--- a/hive/indexer/payments.py
+++ b/hive/indexer/payments.py
@@ -3,12 +3,10 @@
 import logging
 
 from hive.db.adapter import Db
-from hive.db.db_state import DbState
 from hive.utils.normalize import parse_amount
 
 from hive.indexer.posts import Posts
 from hive.indexer.accounts import Accounts
-from hive.indexer.cached_post import CachedPost
 
 log = logging.getLogger(__name__)
 
@@ -38,12 +36,6 @@ class Payments:
         sql = "UPDATE hive_posts SET promoted = :val WHERE id = :id"
         DB.query(sql, val=new_amount, id=record['post_id'])
 
-        # notify cached_post of new promoted balance, and trigger update
-        if not DbState.is_initial_sync():
-            CachedPost.update_promoted_amount(record['post_id'], new_amount)
-            author, permlink = cls._split_url(op['memo'])
-            CachedPost.vote(author, permlink, record['post_id'])
-
     @classmethod
     def _validated(cls, op, tx_idx, num, date):
         """Validate and normalize the transfer op."""
diff --git a/hive/indexer/post_data_cache.py b/hive/indexer/post_data_cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..908ef753efc863dfcd15bccd0de938ecdd8291d0
--- /dev/null
+++ b/hive/indexer/post_data_cache.py
@@ -0,0 +1,105 @@
+import logging
+from hive.utils.normalize import escape_characters
+
+from hive.indexer.db_adapter_holder import DbAdapterHolder
+
+log = logging.getLogger(__name__)
+
+class PostDataCache(DbAdapterHolder):
+    """ Procides cache for DB operations on post data table in order to speed up initial sync """
+    _data = {}
+
+
+    @classmethod
+    def is_cached(cls, pid):
+        """ Check if data is cached """
+        return pid in cls._data
+
+    @classmethod
+    def add_data(cls, pid, post_data, is_new_post):
+        """ Add data to cache """
+        if not cls.is_cached(pid):
+            cls._data[pid] = post_data
+            cls._data[pid]['is_new_post'] = is_new_post
+        else:
+            assert not is_new_post
+            for k, data in post_data.items():
+                if data is not None:
+                    cls._data[pid][k] = data
+
+    @classmethod
+    def get_post_body(cls, pid):
+        """ Returns body of given post from collected cache or from underlying DB storage. """
+        try:
+            post_data = cls._data[pid]
+        except KeyError:
+            sql = """
+                  SELECT hpd.body FROM hive_post_data hpd WHERE hpd.id = :post_id;
+                  """
+            row = cls.db.query_row(sql, post_id = pid)
+            post_data = dict(row)
+        return post_data['body']
+
+    @classmethod
+    def flush(cls, print_query = False):
+        """ Flush data from cache to db """
+        if cls._data:
+            values_insert = []
+            values_update = []
+            cls.beginTx()
+            sql = """
+                INSERT INTO 
+                    hive_post_data (id, title, preview, img_url, body, json) 
+                VALUES 
+            """
+            values = []
+            for k, data in cls._data.items():
+                title = 'NULL' if data['title'] is None else "{}".format(escape_characters(data['title']))
+                body = 'NULL' if data['body'] is None else "{}".format(escape_characters(data['body']))
+                preview = 'NULL' if data['body'] is None else "{}".format(escape_characters(data['body'][0:1024]))
+                json = 'NULL' if data['json'] is None else "{}".format(escape_characters(data['json']))
+                img_url = 'NULL' if data['img_url'] is None else "{}".format(escape_characters(data['img_url']))
+                value = "({},{},{},{},{},{})".format(k, title, preview, img_url, body, json)
+                if data['is_new_post']:
+                    values_insert.append(value)
+                else:
+                    values_update.append(value)
+
+            if values_insert:
+                sql = """
+                    INSERT INTO 
+                        hive_post_data (id, title, preview, img_url, body, json) 
+                    VALUES 
+                """
+                sql += ','.join(values_insert)
+                if print_query:
+                    log.info("Executing query:\n{}".format(sql))
+                cls.db.query(sql)
+
+            if values_update:
+                sql = """
+                    UPDATE hive_post_data AS hpd SET 
+                        title = COALESCE( data_source.title, hpd.title ),
+                        preview = COALESCE( data_source.preview, hpd.preview ),
+                        img_url = COALESCE( data_source.img_url, hpd.img_url ),
+                        body = COALESCE( data_source.body, hpd.body ),
+                        json = COALESCE( data_source.json, hpd.json )
+                    FROM
+                    ( SELECT * FROM
+                    ( VALUES
+                """
+                sql += ','.join(values_update)
+                sql += """
+                    ) AS T(id, title, preview, img_url, body, json)
+                    ) AS data_source
+                    WHERE hpd.id = data_source.id
+                """
+                if print_query:
+                    log.info("Executing query:\n{}".format(sql))
+                cls.db.query(sql)
+
+            cls.commitTx()
+
+        n = len(cls._data.keys())
+        cls._data.clear()
+        return n
diff --git a/hive/indexer/posts.py b/hive/indexer/posts.py
index 9a72cd2433dfd7803db45a84480a4f9ece265b7a..1d04d8a769090211d76b276722fd9faab3f6c34d 100644
--- a/hive/indexer/posts.py
+++ b/hive/indexer/posts.py
@@ -1,248 +1,439 @@
-"""Core posts manager."""
-
-import logging
-import collections
-
-from hive.db.adapter import Db
-from hive.db.db_state import DbState
-
-from hive.indexer.accounts import Accounts
-from hive.indexer.cached_post import CachedPost
-from hive.indexer.feed_cache import FeedCache
-from hive.indexer.community import Community, START_DATE
-from hive.indexer.notify import Notify
-
-log = logging.getLogger(__name__)
-DB = Db.instance()
-
-class Posts:
-    """Handles critical/core post ops and data."""
-
-    # LRU cache for (author-permlink -> id) lookup (~400mb per 1M entries)
-    CACHE_SIZE = 2000000
-    _ids = collections.OrderedDict()
-    _hits = 0
-    _miss = 0
-
-    @classmethod
-    def last_id(cls):
-        """Get the last indexed post id."""
-        sql = "SELECT MAX(id) FROM hive_posts WHERE is_deleted = '0'"
-        return DB.query_one(sql) or 0
-
-    @classmethod
-    def get_id(cls, author, permlink):
-        """Look up id by author/permlink, making use of LRU cache."""
-        url = author+'/'+permlink
-        if url in cls._ids:
-            cls._hits += 1
-            _id = cls._ids.pop(url)
-            cls._ids[url] = _id
-        else:
-            cls._miss += 1
-            sql = """SELECT id FROM hive_posts WHERE
-                     author = :a AND permlink = :p"""
-            _id = DB.query_one(sql, a=author, p=permlink)
-            if _id:
-                cls._set_id(url, _id)
-
-        # cache stats (under 10M every 10K else every 100K)
-        total = cls._hits + cls._miss
-        if total % 100000 == 0:
-            log.info("pid lookups: %d, hits: %d (%.1f%%), entries: %d",
-                     total, cls._hits, 100.0*cls._hits/total, len(cls._ids))
-
-        return _id
-
-    @classmethod
-    def _set_id(cls, url, pid):
-        """Add an entry to the LRU, maintaining max size."""
-        assert pid, "no pid provided for %s" % url
-        if len(cls._ids) > cls.CACHE_SIZE:
-            cls._ids.popitem(last=False)
-        cls._ids[url] = pid
-
-    @classmethod
-    def save_ids_from_tuples(cls, tuples):
-        """Skim & cache `author/permlink -> id` from external queries."""
-        for tup in tuples:
-            pid, author, permlink = (tup[0], tup[1], tup[2])
-            url = author+'/'+permlink
-            if not url in cls._ids:
-                cls._set_id(url, pid)
-        return tuples
-
-    @classmethod
-    def get_id_and_depth(cls, author, permlink):
-        """Get the id and depth of @author/permlink post."""
-        _id = cls.get_id(author, permlink)
-        if not _id:
-            return (None, -1)
-        depth = DB.query_one("SELECT depth FROM hive_posts WHERE id = :id", id=_id)
-        return (_id, depth)
-
-    @classmethod
-    def is_pid_deleted(cls, pid):
-        """Check if the state of post is deleted."""
-        sql = "SELECT is_deleted FROM hive_posts WHERE id = :id"
-        return DB.query_one(sql, id=pid)
-
-    @classmethod
-    def delete_op(cls, op):
-        """Given a delete_comment op, mark the post as deleted.
-
-        Also remove it from post-cache and feed-cache.
-        """
-        cls.delete(op)
-
-    @classmethod
-    def comment_op(cls, op, block_date):
-        """Register new/edited/undeleted posts; insert into feed cache."""
-        pid = cls.get_id(op['author'], op['permlink'])
-        if not pid:
-            # post does not exist, go ahead and process it.
-            cls.insert(op, block_date)
-        elif not cls.is_pid_deleted(pid):
-            # post exists, not deleted, thus an edit. ignore.
-            cls.update(op, block_date, pid)
-        else:
-            # post exists but was deleted. time to reinstate.
-            cls.undelete(op, block_date, pid)
-
-    @classmethod
-    def insert(cls, op, date):
-        """Inserts new post records."""
-        sql = """INSERT INTO hive_posts (is_valid, is_muted, parent_id, author,
-                             permlink, category, community_id, depth, created_at)
-                      VALUES (:is_valid, :is_muted, :parent_id, :author,
-                             :permlink, :category, :community_id, :depth, :date)"""
-        sql += ";SELECT currval(pg_get_serial_sequence('hive_posts','id'))"
-        post = cls._build_post(op, date)
-        result = DB.query(sql, **post)
-        post['id'] = int(list(result)[0][0])
-        cls._set_id(op['author']+'/'+op['permlink'], post['id'])
-
-        if not DbState.is_initial_sync():
-            if post['error']:
-                author_id = Accounts.get_id(post['author'])
-                Notify('error', dst_id=author_id, when=date,
-                       post_id=post['id'], payload=post['error']).write()
-            CachedPost.insert(op['author'], op['permlink'], post['id'])
-            if op['parent_author']: # update parent's child count
-                CachedPost.recount(op['parent_author'],
-                                   op['parent_permlink'], post['parent_id'])
-            cls._insert_feed_cache(post)
-
-    @classmethod
-    def undelete(cls, op, date, pid):
-        """Re-allocates an existing record flagged as deleted."""
-        sql = """UPDATE hive_posts SET is_valid = :is_valid,
-                   is_muted = :is_muted, is_deleted = '0', is_pinned = '0',
-                   parent_id = :parent_id, category = :category,
-                   community_id = :community_id, depth = :depth
-                 WHERE id = :id"""
-        post = cls._build_post(op, date, pid)
-        DB.query(sql, **post)
-
-        if not DbState.is_initial_sync():
-            if post['error']:
-                author_id = Accounts.get_id(post['author'])
-                Notify('error', dst_id=author_id, when=date,
-                       post_id=post['id'], payload=post['error']).write()
-
-            CachedPost.undelete(pid, post['author'], post['permlink'],
-                                post['category'])
-            cls._insert_feed_cache(post)
-
-    @classmethod
-    def delete(cls, op):
-        """Marks a post record as being deleted."""
-        pid, depth = cls.get_id_and_depth(op['author'], op['permlink'])
-        DB.query("UPDATE hive_posts SET is_deleted = '1' WHERE id = :id", id=pid)
-
-        if not DbState.is_initial_sync():
-            CachedPost.delete(pid, op['author'], op['permlink'])
-            if depth == 0:
-                # TODO: delete from hive_reblogs -- otherwise feed cache gets populated with deleted posts somwrimas
-                FeedCache.delete(pid)
-            else:
-                # force parent child recount when child is deleted
-                prnt = cls._get_parent_by_child_id(pid)
-                CachedPost.recount(prnt['author'], prnt['permlink'], prnt['id'])
-
-
-    @classmethod
-    def update(cls, op, date, pid):
-        """Handle post updates.
-
-        Here we could also build content diffs, but for now just used
-        a signal to update cache record.
-        """
-        # pylint: disable=unused-argument
-        if not DbState.is_initial_sync():
-            CachedPost.update(op['author'], op['permlink'], pid)
-
-    @classmethod
-    def _get_parent_by_child_id(cls, child_id):
-        """Get parent's `id`, `author`, `permlink` by child id."""
-        sql = """SELECT id, author, permlink FROM hive_posts
-                  WHERE id = (SELECT parent_id FROM hive_posts
-                               WHERE id = :child_id)"""
-        result = DB.query_row(sql, child_id=child_id)
-        assert result, "parent of %d not found" % child_id
-        return result
-
-    @classmethod
-    def _insert_feed_cache(cls, post):
-        """Insert the new post into feed cache if it's not a comment."""
-        if not post['depth']:
-            account_id = Accounts.get_id(post['author'])
-            FeedCache.insert(post['id'], account_id, post['date'])
-
-    @classmethod
-    def _build_post(cls, op, date, pid=None):
-        """Validate and normalize a post operation.
-
-        Post is muted if:
-         - parent was muted
-         - author unauthorized
-
-        Post is invalid if:
-         - parent is invalid
-         - author unauthorized
-        """
-        # TODO: non-nsfw post in nsfw community is `invalid`
-
-        # if this is a top-level post:
-        if not op['parent_author']:
-            parent_id = None
-            depth = 0
-            category = op['parent_permlink']
-            community_id = None
-            if date > START_DATE:
-                community_id = Community.validated_id(category)
-            is_valid = True
-            is_muted = False
-
-        # this is a comment; inherit parent props.
-        else:
-            parent_id = cls.get_id(op['parent_author'], op['parent_permlink'])
-            sql = """SELECT depth, category, community_id, is_valid, is_muted
-                       FROM hive_posts WHERE id = :id"""
-            (parent_depth, category, community_id, is_valid,
-             is_muted) = DB.query_row(sql, id=parent_id)
-            depth = parent_depth + 1
-            if not is_valid: error = 'replying to invalid post'
-            elif is_muted: error = 'replying to muted post'
-
-        # check post validity in specified context
-        error = None
-        if community_id and is_valid and not Community.is_post_valid(community_id, op):
-            error = 'not authorized'
-            #is_valid = False # TODO: reserved for future blacklist status?
-            is_muted = True
-
-        return dict(author=op['author'], permlink=op['permlink'], id=pid,
-                    is_valid=is_valid, is_muted=is_muted, parent_id=parent_id,
-                    depth=depth, category=category, community_id=community_id,
-                    date=date, error=error)
+"""Core posts manager."""
+
+import logging
+import collections
+
+from ujson import dumps, loads
+
+from diff_match_patch import diff_match_patch
+
+from hive.db.adapter import Db
+from hive.db.db_state import DbState
+
+from hive.indexer.reblog import Reblog
+from hive.indexer.community import Community
+from hive.indexer.notify import Notify
+from hive.indexer.post_data_cache import PostDataCache
+from hive.indexer.db_adapter_holder import DbAdapterHolder
+from hive.utils.misc import chunks
+
+from hive.utils.normalize import sbd_amount, legacy_amount, safe_img_url, escape_characters
+
+log = logging.getLogger(__name__)
+DB = Db.instance()
+
+class Posts(DbAdapterHolder):
+    """Handles critical/core post ops and data."""
+
+    # LRU cache for (author-permlink -> id) lookup (~400mb per 1M entries)
+    CACHE_SIZE = 2000000
+    _ids = collections.OrderedDict()
+    _hits = 0
+    _miss = 0
+
+    comment_payout_ops = {}
+    _comment_payout_ops = []
+
+    @classmethod
+    def last_id(cls):
+        """Get the last indexed post id."""
+        sql = "SELECT MAX(id) FROM hive_posts WHERE counter_deleted = 0"
+        return DB.query_one(sql) or 0
+
+    @classmethod
+    def get_id(cls, author, permlink):
+        """Look up id by author/permlink, making use of LRU cache."""
+        url = author+'/'+permlink
+        if url in cls._ids:
+            cls._hits += 1
+            _id = cls._ids.pop(url)
+            cls._ids[url] = _id
+        else:
+            cls._miss += 1
+            sql = """
+                SELECT hp.id
+                FROM hive_posts hp
+                INNER JOIN hive_accounts ha_a ON ha_a.id = hp.author_id
+                INNER JOIN hive_permlink_data hpd_p ON hpd_p.id = hp.permlink_id
+                WHERE ha_a.name = :a AND hpd_p.permlink = :p
+            """
+            _id = DB.query_one(sql, a=author, p=permlink)
+            if _id:
+                cls._set_id(url, _id)
+
+        # cache stats (under 10M every 10K else every 100K)
+        total = cls._hits + cls._miss
+        if total % 100000 == 0:
+            log.info("pid lookups: %d, hits: %d (%.1f%%), entries: %d",
+                     total, cls._hits, 100.0*cls._hits/total, len(cls._ids))
+
+        return _id
+
+    @classmethod
+    def _set_id(cls, url, pid):
+        """Add an entry to the LRU, maintaining max size."""
+        assert pid, "no pid provided for %s" % url
+        if len(cls._ids) > cls.CACHE_SIZE:
+            cls._ids.popitem(last=False)
+        cls._ids[url] = pid
+
+    @classmethod
+    def delete_op(cls, op, block_date):
+        """Given a delete_comment op, mark the post as deleted.
+
+        Also remove it from post-cache and feed-cache.
+        """
+        cls.delete(op, block_date)
+
+    @classmethod
+    def comment_op(cls, op, block_date):
+        """Register new/edited/undeleted posts; insert into feed cache."""
+
+        md = {}
+        # At least one case where jsonMetadata was double-encoded: condenser#895
+        # jsonMetadata = JSON.parse(jsonMetadata);
+        try:
+            md = loads(op['json_metadata'])
+            if not isinstance(md, dict):
+                md = {}
+        except Exception:
+            pass
+
+        tags = []
+
+        if md and 'tags' in md and isinstance(md['tags'], list):
+            for tag in md['tags']:
+                if tag and isinstance(tag, str):
+                    tags.append(tag) # No escaping needed due to used sqlalchemy formatting features
+
+        sql = """
+            SELECT is_new_post, id, author_id, permlink_id, post_category, parent_id, community_id, is_valid, is_muted, depth
+            FROM process_hive_post_operation((:author)::varchar, (:permlink)::varchar, (:parent_author)::varchar, (:parent_permlink)::varchar, (:date)::timestamp, (:community_support_start_block)::integer, (:block_num)::integer, (:tags)::VARCHAR[]);
+            """
+
+        row = DB.query_row(sql, author=op['author'], permlink=op['permlink'], parent_author=op['parent_author'],
+                   parent_permlink=op['parent_permlink'], date=block_date, community_support_start_block=Community.start_block, block_num=op['block_num'], tags=tags)
+
+        result = dict(row)
+
+        # TODO we need to enhance checking related community post validation and honor is_muted.
+        error = cls._verify_post_against_community(op, result['community_id'], result['is_valid'], result['is_muted'])
+
+        cls._set_id(op['author']+'/'+op['permlink'], result['id'])
+
+        img_url = None
+        if 'image' in md:
+            img_url = md['image']
+            if isinstance(img_url, list) and img_url:
+                img_url = img_url[0]
+        if img_url:
+            img_url = safe_img_url(img_url)
+
+        is_new_post = result['is_new_post']
+        if is_new_post:
+            # add content data to hive_post_data
+            post_data = dict(title=op['title'] if op['title'] else '',
+                             img_url=img_url if img_url else '',
+                             body=op['body'] if op['body'] else '',
+                             json=op['json_metadata'] if op['json_metadata'] else '')
+        else:
+            # edit case. Now we need to (potentially) apply patch to the post body.
+            # empty new body means no body edit, not clear (same with other data)
+            new_body = cls._merge_post_body(id=result['id'], new_body_def=op['body']) if op['body'] else None
+            new_title = op['title'] if op['title'] else None
+            new_json = op['json_metadata'] if op['json_metadata'] else None
+            # when 'new_json' is not empty, 'img_url' should be overwritten even if it is itself empty
+            new_img = img_url if img_url else '' if new_json else None
+            post_data = dict(title=new_title, img_url=new_img, body=new_body, json=new_json)
+
+#        log.info("Adding author: {}  permlink: {}".format(op['author'], op['permlink']))
+        PostDataCache.add_data(result['id'], post_data, is_new_post)
+
+        if not DbState.is_initial_sync():
+            if error:
+                author_id = result['author_id']
+                Notify(block_num=op['block_num'], type_id='error', dst_id=author_id, when=block_date,
+                       post_id=result['id'], payload=error)
+
+    @classmethod
+    def flush_into_db(cls):
+        sql = """
+              UPDATE hive_posts AS ihp SET
+                  total_payout_value    = COALESCE( data_source.total_payout_value,                     ihp.total_payout_value ),
+                  curator_payout_value  = COALESCE( data_source.curator_payout_value,                   ihp.curator_payout_value ),
+                  author_rewards        = CAST( data_source.author_rewards as BIGINT ) + ihp.author_rewards,
+                  author_rewards_hive   = COALESCE( CAST( data_source.author_rewards_hive as BIGINT ),  ihp.author_rewards_hive ),
+                  author_rewards_hbd    = COALESCE( CAST( data_source.author_rewards_hbd as BIGINT ),   ihp.author_rewards_hbd ),
+                  author_rewards_vests  = COALESCE( CAST( data_source.author_rewards_vests as BIGINT ), ihp.author_rewards_vests ),
+                  payout                = COALESCE( CAST( data_source.payout as DECIMAL ),              ihp.payout ),
+                  pending_payout        = COALESCE( CAST( data_source.pending_payout as DECIMAL ),      ihp.pending_payout ),
+                  payout_at             = COALESCE( CAST( data_source.payout_at as TIMESTAMP ),         ihp.payout_at ),
+                  last_payout_at        = COALESCE( CAST( data_source.last_payout_at as TIMESTAMP ),    ihp.last_payout_at ),
+                  cashout_time          = COALESCE( CAST( data_source.cashout_time as TIMESTAMP ),      ihp.cashout_time ),
+                  is_paidout            = COALESCE( CAST( data_source.is_paidout as BOOLEAN ),          ihp.is_paidout ),
+                  total_vote_weight     = COALESCE( CAST( data_source.total_vote_weight as NUMERIC ),   ihp.total_vote_weight )
+              FROM
+              (
+              SELECT  ha_a.id as author_id, hpd_p.id as permlink_id,
+                      t.total_payout_value,
+                      t.curator_payout_value,
+                      t.author_rewards,
+                      t.author_rewards_hive,
+                      t.author_rewards_hbd,
+                      t.author_rewards_vests,
+                      t.payout,
+                      t.pending_payout,
+                      t.payout_at,
+                      t.last_payout_at,
+                      t.cashout_time,
+                      t.is_paidout,
+                      t.total_vote_weight
+              from
+              (
+              VALUES
+                --- put all constant values here
+                {}
+              ) AS T(author, permlink,
+                      total_payout_value,
+                      curator_payout_value,
+                      author_rewards,
+                      author_rewards_hive,
+                      author_rewards_hbd,
+                      author_rewards_vests,
+                      payout,
+                      pending_payout,
+                      payout_at,
+                      last_payout_at,
+                      cashout_time,
+                      is_paidout,
+                      total_vote_weight)
+              INNER JOIN hive_accounts ha_a ON ha_a.name = t.author
+              INNER JOIN hive_permlink_data hpd_p ON hpd_p.permlink = t.permlink
+              ) as data_source
+              WHERE ihp.permlink_id = data_source.permlink_id and ihp.author_id = data_source.author_id
+        """
+
+        for chunk in chunks(cls._comment_payout_ops, 1000):
+            cls.beginTx()
+
+            values_str = ','.join(chunk)
+            actual_query = sql.format(values_str)
+            cls.db.query(actual_query)
+
+            cls.commitTx()
+
+        n = len(cls._comment_payout_ops)
+        cls._comment_payout_ops.clear()
+        return n
+
+    @classmethod
+    def comment_payout_op(cls):
+        values_limit = 1000
+
+        """ Process comment payment operations """
+        for k, v in cls.comment_payout_ops.items():
+            author                    = None
+            permlink                  = None
+
+            # author payouts
+            author_rewards            = 0
+            author_rewards_hive       = None
+            author_rewards_hbd        = None
+            author_rewards_vests      = None
+
+            # total payout for comment
+            #comment_author_reward     = None
+            #curators_vesting_payout   = None
+            total_payout_value        = None;
+            curator_payout_value      = None;
+            #beneficiary_payout_value  = None;
+
+            payout                    = None
+            pending_payout            = None
+
+            payout_at                 = None
+            last_payout_at            = None
+            cashout_time              = None
+
+            is_paidout                = None
+
+            total_vote_weight         = None
+
+            # final payout indicator - by default all rewards are zero, but might be overwritten by other operations
+            if v[ 'comment_payout_update_operation' ] is not None:
+              value, date = v[ 'comment_payout_update_operation' ]
+              if author is None:
+                author = value['author']
+                permlink = value['permlink']
+              is_paidout              = True
+              payout_at               = date
+              last_payout_at          = date
+              cashout_time            = "infinity"
+
+              pending_payout          = 0
+
+            # author rewards in current (final or nonfinal) payout (always comes with comment_reward_operation)
+            if v[ 'author_reward_operation' ] is not None:
+              value, date = v[ 'author_reward_operation' ]
+              if author is None:
+                author = value['author']
+                permlink = value['permlink']
+              author_rewards_hive     = value['hive_payout']['amount']
+              author_rewards_hbd      = value['hbd_payout']['amount']
+              author_rewards_vests    = value['vesting_payout']['amount']
+              #curators_vesting_payout = value['curators_vesting_payout']['amount']
+
+            # summary of comment rewards in current (final or nonfinal) payout (always comes with author_reward_operation)
+            if v[ 'comment_reward_operation' ] is not None:
+              value, date = v[ 'comment_reward_operation' ]
+              if author is None:
+                author = value['author']
+                permlink = value['permlink']
+              #comment_author_reward   = value['payout']
+              author_rewards          = value['author_rewards']
+              total_payout_value      = value['total_payout_value']
+              curator_payout_value    = value['curator_payout_value']
+              #beneficiary_payout_value = value['beneficiary_payout_value']
+
+              payout = sum([ sbd_amount(total_payout_value), sbd_amount(curator_payout_value) ])
+              pending_payout = 0
+              last_payout_at = date
+
+            # estimated pending_payout from vote (if exists with actual payout the value comes from vote cast after payout)
+            if v[ 'effective_comment_vote_operation' ] is not None:
+              value, date = v[ 'effective_comment_vote_operation' ]
+              if author is None:
+                author = value['author']
+                permlink = value['permlink']
+              pending_payout          = sbd_amount( value['pending_payout'] )
+              total_vote_weight       = value['total_vote_weight']
+
+
+            cls._comment_payout_ops.append("('{}', {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})".format(
+              author,
+              escape_characters(permlink),
+              "NULL" if ( total_payout_value is None ) else ( "'{}'".format( legacy_amount(total_payout_value) ) ),
+              "NULL" if ( curator_payout_value is None ) else ( "'{}'".format( legacy_amount(curator_payout_value) ) ),
+              author_rewards,
+              "NULL" if ( author_rewards_hive is None ) else author_rewards_hive,
+              "NULL" if ( author_rewards_hbd is None ) else author_rewards_hbd,
+              "NULL" if ( author_rewards_vests is None ) else author_rewards_vests,
+              "NULL" if ( payout is None ) else payout,
+              "NULL" if ( pending_payout is None ) else pending_payout,
+
+              "NULL" if ( payout_at is None ) else ( "'{}'::timestamp".format( payout_at ) ),
+              "NULL" if ( last_payout_at is None ) else ( "'{}'::timestamp".format( last_payout_at ) ),
+              "NULL" if ( cashout_time is None ) else ( "'{}'::timestamp".format( cashout_time ) ),
+
+              "NULL" if ( is_paidout is None ) else is_paidout,
+
+              "NULL" if ( total_vote_weight is None ) else total_vote_weight ))
+
+
+        n = len(cls.comment_payout_ops)
+        cls.comment_payout_ops.clear()
+        return n
+
+    @classmethod
+    def update_child_count(cls, child_id, op='+'):
+        """ Increase/decrease child count by 1 """
+        sql = """
+            UPDATE
+                hive_posts
+            SET
+                children = GREATEST(0, (
+                    SELECT
+                        CASE
+                            WHEN children is NULL THEN 0
+                            WHEN children=32762 THEN 0
+                            ELSE children
+                        END
+                    FROM
+                        hive_posts
+                    WHERE id = (SELECT parent_id FROM hive_posts WHERE id = :child_id)
+                )::int
+        """
+        if op == '+':
+            sql += """ + 1)"""
+        else:
+            sql += """ - 1)"""
+        sql += """ WHERE id = (SELECT parent_id FROM hive_posts WHERE id = :child_id)"""
+
+        DB.query(sql, child_id=child_id)
+
+    @classmethod
+    def comment_options_op(cls, op):
+        """ Process comment_options_operation """
+        max_accepted_payout = legacy_amount(op['max_accepted_payout']) if 'max_accepted_payout' in op else '1000000.000 HBD'
+        allow_votes = op['allow_votes'] if 'allow_votes' in op else True
+        allow_curation_rewards = op['allow_curation_rewards'] if 'allow_curation_rewards' in op else True
+        percent_hbd = op['percent_hbd'] if 'percent_hbd' in op else 10000
+        extensions = op['extensions'] if 'extensions' in op else []
+        beneficiaries = []
+        for ex in extensions:
+            if 'type' in ex and ex['type'] == 'comment_payout_beneficiaries' and 'beneficiaries' in ex['value']:
+                beneficiaries = ex['value']['beneficiaries']
+        sql = """
+            UPDATE
+                hive_posts hp
+            SET
+                max_accepted_payout = :max_accepted_payout,
+                percent_hbd = :percent_hbd,
+                allow_votes = :allow_votes,
+                allow_curation_rewards = :allow_curation_rewards,
+                beneficiaries = :beneficiaries
+            WHERE
+            hp.author_id = (SELECT id FROM hive_accounts WHERE name = :author) AND
+            hp.permlink_id = (SELECT id FROM hive_permlink_data WHERE permlink = :permlink)
+        """
+        DB.query(sql, author=op['author'], permlink=op['permlink'], max_accepted_payout=max_accepted_payout,
+                 percent_hbd=percent_hbd, allow_votes=allow_votes, allow_curation_rewards=allow_curation_rewards,
+                 beneficiaries=dumps(beneficiaries))
+
+    @classmethod
+    def delete(cls, op, block_date):
+        """Marks a post record as being deleted."""
+        sql = "SELECT delete_hive_post((:author)::varchar, (:permlink)::varchar, (:block_num)::int, (:date)::timestamp);"
+        DB.query_no_return(sql, author=op['author'], permlink = op['permlink'], block_num=op['block_num'], date=block_date)
+
+    @classmethod
+    def _verify_post_against_community(cls, op, community_id, is_valid, is_muted):
+        error = None
+        if community_id and is_valid and not Community.is_post_valid(community_id, op):
+            error = 'not authorized'
+            #is_valid = False # TODO: reserved for future blacklist status?
+            is_muted = True
+        return error
+
+    @classmethod
+    def _merge_post_body(cls, id, new_body_def):
+        new_body = ''
+        old_body = ''
+
+        try:
+            dmp = diff_match_patch()
+            patch = dmp.patch_fromText(new_body_def)
+            if patch is not None and len(patch):
+                old_body = PostDataCache.get_post_body(id)
+                new_body, _ = dmp.patch_apply(patch, old_body)
+                #new_utf8_body = new_body.decode('utf-8')
+                #new_body = new_utf8_body
+            else:
+                new_body = new_body_def
+        except ValueError as e:
+#            log.info("Merging a body post id: {} caused an ValueError exception {}".format(id, e))
+#            log.info("New body definition: {}".format(new_body_def))
+#            log.info("Old body definition: {}".format(old_body))
+            new_body = new_body_def
+        except Exception as ex:
+            log.info("Merging a body post id: {} caused an unknown exception {}".format(id, ex))
+            log.info("New body definition: {}".format(new_body_def))
+            log.info("Old body definition: {}".format(old_body))
+            new_body = new_body_def
+
+        return new_body
+
+
+    @classmethod
+    def flush(cls):
+      return cls.comment_payout_op() + cls.flush_into_db()
diff --git a/hive/indexer/reblog.py b/hive/indexer/reblog.py
new file mode 100644
index 0000000000000000000000000000000000000000..f126b8024c686b2c72c70e55074a1193b42be872
--- /dev/null
+++ b/hive/indexer/reblog.py
@@ -0,0 +1,130 @@
+""" Class for reblog operations """
+
+import logging
+
+from hive.db.adapter import Db
+from hive.db.db_state import DbState
+
+from hive.indexer.accounts import Accounts
+from hive.indexer.db_adapter_holder import DbAdapterHolder
+from hive.utils.normalize import escape_characters
+
+log = logging.getLogger(__name__)
+DB = Db.instance()
+
+class Reblog(DbAdapterHolder):
+    """ Class for reblog operations """
+    deleted_reblog_items = {}
+    reblog_items_to_flush = {}
+
+    @classmethod
+    def _validated_op(cls, actor, op, block_date, block_num):
+        if 'account' not in op or \
+            'author' not in op or \
+            'permlink' not in op:
+            return None
+
+        if op['account'] != actor:
+            return None # impersonation
+
+        if not Accounts.exists(op['account']):
+            return None
+        if not Accounts.exists(op['author']):
+            return None
+
+        _delete = True if ('delete' in op and op['delete'] == 'delete') else False
+
+        return dict(author = op['author'],
+                    permlink = op['permlink'],
+                    account = op['account'],
+                    block_date = block_date,
+                    block_num = block_num,
+                    delete = _delete )
+
+    @classmethod
+    def reblog_op(cls, actor, op, block_date, block_num):
+        """ Process reblog operation """
+        op = cls._validated_op(actor, op, block_date, block_num)
+        if not op:
+            return
+
+        key = "{}/{}/{}".format(op['author'], op['permlink'], op['account'])
+
+        if op['delete']:
+            cls.deleted_reblog_items[key] = {}
+            cls.delete( op['author'], op['permlink'], op['account'] )
+        else:
+            cls.reblog_items_to_flush[key] = { 'op': op }
+
+    @classmethod
+    def delete(cls, author, permlink, account ):
+        """Remove a reblog from hive_reblogs + feed from hive_feed_cache.
+        """
+        sql = "SELECT delete_reblog_feed_cache( (:author)::VARCHAR, (:permlink)::VARCHAR, (:account)::VARCHAR );"
+        status = DB.query_col(sql, author=author, permlink=permlink, account=account);
+        assert status is not None
+        if status == 0:
+          log.debug("reblog: post not found: %s/%s", author, permlink)
+
+    @classmethod
+    def flush(cls):
+        """ Flush collected data to database """
+        sql_prefix = """
+            INSERT INTO hive_reblogs (blogger_id, post_id, created_at, block_num)
+            SELECT 
+                data_source.blogger_id, data_source.post_id, data_source.created_at, data_source.block_num
+            FROM
+            (
+                SELECT 
+                    ha_b.id as blogger_id, hp.id as post_id, t.block_date as created_at, t.block_num 
+                FROM
+                    (VALUES
+                        {}
+                    ) AS T(blogger, author, permlink, block_date, block_num)
+                    INNER JOIN hive_accounts ha ON ha.name = t.author
+                    INNER JOIN hive_accounts ha_b ON ha_b.name = t.blogger
+                    INNER JOIN hive_permlink_data hpd ON hpd.permlink = t.permlink
+                    INNER JOIN hive_posts hp ON hp.author_id = ha.id AND hp.permlink_id = hpd.id AND hp.counter_deleted = 0
+            ) AS data_source (blogger_id, post_id, created_at, block_num)
+            ON CONFLICT ON CONSTRAINT hive_reblogs_ux1 DO NOTHING
+        """
+
+        item_count = len(cls.reblog_items_to_flush)
+        if item_count > 0:
+            values = []
+            limit = 1000
+            count = 0
+            cls.beginTx()
+            for k, v in cls.reblog_items_to_flush.items():
+                if k in cls.deleted_reblog_items:
+                  continue
+                reblog_item = v['op']
+                if count < limit:
+                    values.append("({}, {}, {}, '{}'::timestamp, {})".format(escape_characters(reblog_item['account']),
+                                                                                escape_characters(reblog_item['author']),
+                                                                                escape_characters(reblog_item['permlink']),
+                                                                                reblog_item['block_date'],
+                                                                                reblog_item['block_num']))
+                    count = count + 1
+                else:
+                    values_str = ",".join(values)
+                    query = sql_prefix.format(values_str, values_str)
+                    cls.db.query(query)
+                    values.clear()
+                    values.append("({}, {}, {}, '{}'::timestamp, {})".format(escape_characters(reblog_item['account']),
+                                                                                escape_characters(reblog_item['author']),
+                                                                                escape_characters(reblog_item['permlink']),
+                                                                                reblog_item['block_date'],
+                                                                                reblog_item['block_num']))
+                    count = 1
+
+            if len(values) > 0:
+                values_str = ",".join(values)
+                query = sql_prefix.format(values_str, values_str)
+                cls.db.query(query)
+            cls.commitTx()
+            cls.reblog_items_to_flush.clear()
+
+        cls.deleted_reblog_items.clear();
+
+        return item_count
diff --git a/hive/indexer/reputations.py b/hive/indexer/reputations.py
new file mode 100644
index 0000000000000000000000000000000000000000..0797c677d106eebf3992fbc7914411a335413995
--- /dev/null
+++ b/hive/indexer/reputations.py
@@ -0,0 +1,67 @@
+""" Reputation update support """
+
+import logging
+from hive.indexer.db_adapter_holder import DbAdapterHolder
+from hive.utils.normalize import escape_characters
+
+log = logging.getLogger(__name__)
+
+CACHED_ITEMS_LIMIT = 200
+
+class Reputations(DbAdapterHolder):
+    _values = []
+    _total_values = 0
+
+    @classmethod
+    def process_vote(self, block_num, effective_vote_op):
+        tuple = "('{}', '{}', {}, {}, {})".format(effective_vote_op['author'], effective_vote_op['voter'],
+            escape_characters(effective_vote_op['permlink']), effective_vote_op['rshares'], block_num)
+        self._values.append(tuple)
+
+    @classmethod
+    def flush(self):
+        if not self._values:
+            log.info("Written total reputation data records: {}".format(self._total_values))
+            return 0
+
+        sql = """
+              INSERT INTO hive_reputation_data
+              (voter_id, author_id, permlink, rshares, block_num)
+
+              SELECT (SELECT ha_v.id FROM hive_accounts ha_v WHERE ha_v.name = t.voter) as voter_id,
+                     (SELECT ha.id FROM hive_accounts ha WHERE ha.name = t.author) as author_id,
+                     t.permlink as permlink, t.rshares, t.block_num
+              FROM
+              (
+              VALUES
+                -- author, voter, permlink, rshares, block_num
+                {}
+              ) AS T(author, voter, permlink, rshares, block_num)
+              """
+
+        self.beginTx()
+
+        begin = 0
+        end = 0
+        value_limit = 1000
+        size = len(self._values)
+        while begin < size:
+            end = begin + value_limit
+            if end > size:
+                end = size
+
+            param = ",".join(self._values[begin:end])
+            query = sql.format(param)
+            self.db.query_no_return(query)
+            begin = end
+
+        self.commitTx()
+
+        n = len(self._values)
+        self._values.clear()
+
+        self._total_values = self._total_values + n
+
+        log.info("Written total reputation data records: {}".format(self._total_values))
+
+        return n
diff --git a/hive/indexer/sync.py b/hive/indexer/sync.py
index 729dcf5ad8e04b50896862a023ab66001ffe38ec..3925a7a1bdf7d8c1e7d1f703181c730559184327 100644
--- a/hive/indexer/sync.py
+++ b/hive/indexer/sync.py
@@ -1,31 +1,212 @@
 """Hive sync manager."""
 
 import logging
-import glob
 from time import perf_counter as perf
-import os
 import ujson as json
 
-from funcy.seqs import drop
-from toolz import partition_all
+import queue
+from concurrent.futures import ThreadPoolExecutor
 
 from hive.db.db_state import DbState
 
 from hive.utils.timer import Timer
 from hive.steem.block.stream import MicroForkException
+from hive.steem.massive_blocks_data_provider import MassiveBlocksDataProvider
 
 from hive.indexer.blocks import Blocks
 from hive.indexer.accounts import Accounts
-from hive.indexer.cached_post import CachedPost
-from hive.indexer.feed_cache import FeedCache
 from hive.indexer.follow import Follow
 from hive.indexer.community import Community
+
+from hive.server.common.payout_stats import PayoutStats
+from hive.server.common.mentions import Mentions
+
 from hive.server.common.mutes import Mutes
 
-#from hive.indexer.jobs import audit_cache_missing, audit_cache_deleted
+from hive.utils.stats import OPStatusManager as OPSM
+from hive.utils.stats import FlushStatusManager as FSM
+from hive.utils.stats import WaitingStatusManager as WSM
+from hive.utils.stats import PrometheusClient as PC
+from hive.utils.stats import BroadcastObject
+from hive.utils.communities_rank import update_communities_posts_and_rank
+
+from hive.indexer.mock_block_provider import MockBlockProvider
+from hive.indexer.mock_vops_provider import MockVopsProvider
+
+from datetime import datetime
+
+from signal import signal, SIGINT, SIGTERM
+from atomic import AtomicLong
+from threading import Thread
+from collections import deque
+
 
 log = logging.getLogger(__name__)
 
+CONTINUE_PROCESSING = True
+
+EXCEPTION_THROWN = AtomicLong(0)
+FINISH_SIGNAL_DURING_SYNC = AtomicLong(0)
+
+
+def finish_signals_handler(signal, frame):
+    global FINISH_SIGNAL_DURING_SYNC
+    FINISH_SIGNAL_DURING_SYNC += 1
+    log.info("""
+                  **********************************************************
+                  CAUGHT {}. PLEASE WAIT... PROCESSING DATA IN QUEUES...
+                  **********************************************************
+    """.format( "SIGINT" if signal == SIGINT else "SIGTERM" ) )
+
+def set_exception_thrown():
+    global EXCEPTION_THROWN
+    EXCEPTION_THROWN += 1
+
+def can_continue_thread():
+    return EXCEPTION_THROWN.value == 0 and FINISH_SIGNAL_DURING_SYNC.value == 0
+
+
+def prepare_vops(vops_by_block):
+    preparedVops = {}
+
+    for blockNum, blockDict in vops_by_block.items():
+        vopsList = blockDict['ops']
+        preparedVops[blockNum] = vopsList
+
+    return preparedVops
+
+def _blocks_data_provider(blocks_data_provider):
+    try:
+        futures = blocks_data_provider.start()
+
+        for future in futures:
+            exception = future.exception()
+            if exception:
+                raise exception
+    except:
+        log.exception("Exception caught during fetching blocks data")
+        raise
+
+def _block_consumer(blocks_data_provider, is_initial_sync, lbound, ubound):
+    from hive.utils.stats import minmax
+    is_debug = log.isEnabledFor(10)
+    num = 0
+    time_start = OPSM.start()
+    rate = {}
+    LIMIT_FOR_PROCESSED_BLOCKS = 1000;
+
+    rate = minmax(rate, 0, 1.0, 0)
+
+    def print_summary():
+        stop = OPSM.stop(time_start)
+        log.info("=== TOTAL STATS ===")
+        wtm = WSM.log_global("Total waiting times")
+        ftm = FSM.log_global("Total flush times")
+        otm = OPSM.log_global("All operations present in the processed blocks")
+        ttm = ftm + otm + wtm
+        log.info(f"Elapsed time: {stop :.4f}s. Calculated elapsed time: {ttm :.4f}s. Difference: {stop - ttm :.4f}s")
+        if rate:
+            log.info(f"Highest block processing rate: {rate['max'] :.4f} bps. From: {rate['max_from']} To: {rate['max_to']}")
+            log.info(f"Lowest block processing rate: {rate['min'] :.4f} bps. From: {rate['min_from']} To: {rate['min_to']}")
+        log.info("=== TOTAL STATS ===")
+
+    try:
+        count = ubound - lbound
+        timer = Timer(count, entity='block', laps=['rps', 'wps'])
+
+        while lbound < ubound:
+            preparedVops = {}
+            number_of_blocks_to_proceed = min( [ LIMIT_FOR_PROCESSED_BLOCKS, ubound - lbound  ] )
+            time_before_waiting_for_data = perf()
+            vops_and_blocks = blocks_data_provider.get( number_of_blocks_to_proceed )
+
+            if not can_continue_thread():
+                break;
+
+            assert len(vops_and_blocks[ 'vops' ]) == number_of_blocks_to_proceed
+            assert len(vops_and_blocks[ 'blocks' ]) == number_of_blocks_to_proceed
+
+            to = min(lbound + number_of_blocks_to_proceed, ubound)
+            timer.batch_start()
+
+            for vop_nr in range( number_of_blocks_to_proceed):
+                preparedVops[ vop_nr + lbound ] = vops_and_blocks[ 'vops' ][ vop_nr ]
+
+            block_start = perf()
+            Blocks.process_multi(vops_and_blocks['blocks'], preparedVops, is_initial_sync)
+            block_end = perf()
+
+            timer.batch_lap()
+            timer.batch_finish(len(vops_and_blocks[ 'blocks' ]))
+            time_current = perf()
+
+            prefix = ("[INITIAL SYNC] Got block %d @ %s" % (
+                to - 1, vops_and_blocks['blocks'][-1]['timestamp']))
+            log.info(timer.batch_status(prefix))
+            log.info("[INITIAL SYNC] Time elapsed: %fs", time_current - time_start)
+            log.info("[INITIAL SYNC] Current system time: %s", datetime.now().strftime("%H:%M:%S"))
+            rate = minmax(rate, len(vops_and_blocks['blocks']), time_current - time_before_waiting_for_data, lbound)
+
+            if block_end - block_start > 1.0 or is_debug:
+                otm = OPSM.log_current("Operations present in the processed blocks")
+                ftm = FSM.log_current("Flushing times")
+                wtm = WSM.log_current("Waiting times")
+                log.info(f"Calculated time: {otm+ftm+wtm :.4f} s.")
+
+            OPSM.next_blocks()
+            FSM.next_blocks()
+            WSM.next_blocks()
+
+            lbound = to
+            PC.broadcast(BroadcastObject('sync_current_block', lbound, 'blocks'))
+
+            num = num + 1
+
+            if not can_continue_thread():
+                break
+    except Exception:
+        log.exception("Exception caught during processing blocks...")
+        set_exception_thrown()
+        print_summary()
+        raise
+
+    print_summary()
+    return num
+
+def _node_data_provider(self, is_initial_sync, lbound, ubound, chunk_size):
+    blocksQueue = queue.Queue(maxsize=10000)
+    vopsQueue = queue.Queue(maxsize=10000)
+    old_sig_int_handler = signal(SIGINT, finish_signals_handler)
+    old_sig_term_handler = signal(SIGTERM, finish_signals_handler)
+
+    massive_blocks_data_provier = MassiveBlocksDataProvider(
+          self._conf
+        , self._steem
+        , self._conf.get( 'max_workers' )
+        , self._conf.get( 'max_workers' )
+        , self._conf.get( 'max_batch' )
+        , lbound
+        , ubound
+        , can_continue_thread
+    )
+    with ThreadPoolExecutor(max_workers = 4) as pool:
+        block_data_provider_future = pool.submit(_blocks_data_provider, massive_blocks_data_provier)
+        blockConsumerFuture = pool.submit(_block_consumer, massive_blocks_data_provier, is_initial_sync, lbound, ubound)
+
+        consumer_exception = blockConsumerFuture.exception()
+        block_data_provider_future = block_data_provider_future.exception()
+
+        if consumer_exception:
+            raise consumer_exception
+
+        if block_data_provider_future:
+            raise block_exception
+
+    signal(SIGINT, old_sig_int_handler)
+    signal(SIGTERM, old_sig_term_handler)
+    blocksQueue.queue.clear()
+    vopsQueue.queue.clear()
+
 class Sync:
     """Manages the sync/index process.
 
@@ -35,119 +216,146 @@ class Sync:
     def __init__(self, conf):
         self._conf = conf
         self._db = conf.db()
+
+        log.info("Using hived url: `%s'", self._conf.get('steemd_url'))
+
         self._steem = conf.steem()
 
+    def load_mock_data(self,mock_block_data_path):
+        if mock_block_data_path:
+            MockBlockProvider.load_block_data(mock_block_data_path)
+            MockBlockProvider.print_data()
+
+    def refresh_sparse_stats(self):
+        # normally it should be refreshed in various time windows
+        # but we need the ability to do it all at the same time
+        self._update_chain_state()
+        update_communities_posts_and_rank()
+        with ThreadPoolExecutor(max_workers=2) as executor:
+            executor.submit(PayoutStats.generate)
+            executor.submit(Mentions.refresh)
+
     def run(self):
         """Initialize state; setup/recovery checks; sync and runloop."""
+        from hive.version import VERSION, GIT_REVISION
+        log.info("hivemind_version : %s", VERSION)
+        log.info("hivemind_git_rev : %s", GIT_REVISION)
+
+        from hive.db.schema import DB_VERSION as SCHEMA_DB_VERSION
+        log.info("database_schema_version : %s", SCHEMA_DB_VERSION)
+
+        Community.start_block = self._conf.get("community_start_block")
+
+        paths = self._conf.get("mock_block_data_path") or []
+        for path in paths:
+          self.load_mock_data(path)
+
+        mock_vops_data_path = self._conf.get("mock_vops_data_path")
+        if mock_vops_data_path:
+            MockVopsProvider.load_block_data(mock_vops_data_path)
+            MockVopsProvider.print_data()
 
         # ensure db schema up to date, check app status
         DbState.initialize()
+        Blocks.setup_own_db_access(self._db)
 
         # prefetch id->name and id->rank memory maps
         Accounts.load_ids()
-        Accounts.fetch_ranks()
-
-        # load irredeemables
-        mutes = Mutes(
-            self._conf.get('muted_accounts_url'),
-            self._conf.get('blacklist_api_url'))
-        Mutes.set_shared_instance(mutes)
 
         # community stats
-        Community.recalc_pending_payouts()
+        update_communities_posts_and_rank()
+
+        last_imported_block = Blocks.head_num()
+        hived_head_block = self._conf.get('test_max_block') or self._steem.last_irreversible()
+
+        log.info("database_head_block : %s", last_imported_block)
+        log.info("target_head_block : %s", hived_head_block)
 
         if DbState.is_initial_sync():
+            DbState.before_initial_sync(last_imported_block, hived_head_block)
             # resume initial sync
             self.initial()
-            DbState.finish_initial_sync()
-
+            if not can_continue_thread():
+                return
+            current_imported_block = Blocks.head_num()
+            DbState.finish_initial_sync(current_imported_block)
         else:
             # recover from fork
             Blocks.verify_head(self._steem)
 
-            # perform cleanup if process did not exit cleanly
-            CachedPost.recover_missing_posts(self._steem)
-
-        #audit_cache_missing(self._db, self._steem)
-        #audit_cache_deleted(self._db)
-
         self._update_chain_state()
 
+        trail_blocks = self._conf.get('trail_blocks')
+        assert trail_blocks >= 0
+        assert trail_blocks <= 100
+
+        import sys
+        max_block_limit = sys.maxsize
+        do_stale_block_check = True
         if self._conf.get('test_max_block'):
-            # debug mode: partial sync
-            return self.from_steemd()
+            max_block_limit = self._conf.get('test_max_block')
+            do_stale_block_check = False
+            # Correct max_block_limit by trail_blocks
+            max_block_limit = max_block_limit - trail_blocks
+            log.info("max_block_limit corrected by specified trail_blocks number: %d is: %d", trail_blocks, max_block_limit)
+
         if self._conf.get('test_disable_sync'):
             # debug mode: no sync, just stream
-            return self.listen()
+            return self.listen(trail_blocks, max_block_limit, do_stale_block_check)
 
         while True:
             # sync up to irreversible block
             self.from_steemd()
+            if not can_continue_thread():
+                return
 
-            # take care of payout backlog
-            CachedPost.dirty_paidouts(Blocks.head_date())
-            CachedPost.flush(self._steem, trx=True)
+            head = Blocks.head_num()
+            if head >= max_block_limit:
+                self.refresh_sparse_stats()
+                log.info("Exiting [LIVE SYNC] because irreversible block sync reached specified block limit: %d", max_block_limit)
+                break;
 
             try:
                 # listen for new blocks
-                self.listen()
+                self.listen(trail_blocks, max_block_limit, do_stale_block_check)
             except MicroForkException as e:
                 # attempt to recover by restarting stream
                 log.error("microfork: %s", repr(e))
 
+            head = Blocks.head_num()
+            if head >= max_block_limit:
+                self.refresh_sparse_stats()
+                log.info("Exiting [LIVE SYNC] because of specified block limit: %d", max_block_limit)
+                break;
+
     def initial(self):
         """Initial sync routine."""
         assert DbState.is_initial_sync(), "already synced"
 
         log.info("[INIT] *** Initial fast sync ***")
-        self.from_checkpoints()
         self.from_steemd(is_initial_sync=True)
-
-        log.info("[INIT] *** Initial cache build ***")
-        CachedPost.recover_missing_posts(self._steem)
-        FeedCache.rebuild()
-        Follow.force_recount()
-
-    def from_checkpoints(self, chunk_size=1000):
-        """Initial sync strategy: read from blocks on disk.
-
-        This methods scans for files matching ./checkpoints/*.json.lst
-        and uses them for hive's initial sync. Each line must contain
-        exactly one block in JSON format.
-        """
-        # pylint: disable=no-self-use
-        last_block = Blocks.head_num()
-
-        tuplize = lambda path: [int(path.split('/')[-1].split('.')[0]), path]
-        basedir = os.path.dirname(os.path.realpath(__file__ + "/../.."))
-        files = glob.glob(basedir + "/checkpoints/*.json.lst")
-        tuples = sorted(map(tuplize, files), key=lambda f: f[0])
-
-        last_read = 0
-        for (num, path) in tuples:
-            if last_block < num:
-                log.info("[SYNC] Load %s. Last block: %d", path, last_block)
-                with open(path) as f:
-                    # each line in file represents one block
-                    # we can skip the blocks we already have
-                    skip_lines = last_block - last_read
-                    remaining = drop(skip_lines, f)
-                    for lines in partition_all(chunk_size, remaining):
-                        Blocks.process_multi(map(json.loads, lines), True)
-                last_block = num
-            last_read = num
+        if not can_continue_thread():
+            return
 
     def from_steemd(self, is_initial_sync=False, chunk_size=1000):
         """Fast sync strategy: read/process blocks in batches."""
         steemd = self._steem
+
         lbound = Blocks.head_num() + 1
-        ubound = self._conf.get('test_max_block') or steemd.last_irreversible()
+        ubound = steemd.last_irreversible()
+
+        if self._conf.get('test_max_block') and self._conf.get('test_max_block') < ubound:
+            ubound = self._conf.get('test_max_block')
 
         count = ubound - lbound
         if count < 1:
             return
 
-        log.info("[SYNC] start block %d, +%d to sync", lbound, count)
+        if is_initial_sync:
+            _node_data_provider(self, is_initial_sync, lbound, ubound, self._conf.get("max_batch") )
+            return
+
+        log.info("[FAST SYNC] start block %d, +%d to sync", lbound, count)
         timer = Timer(count, entity='block', laps=['rps', 'wps'])
         while lbound < ubound:
             timer.batch_start()
@@ -155,32 +363,29 @@ class Sync:
             # fetch blocks
             to = min(lbound + chunk_size, ubound)
             blocks = steemd.get_blocks_range(lbound, to)
+            vops = steemd.enum_virtual_ops(self._conf, lbound, to)
+            prepared_vops = prepare_vops(vops)
             lbound = to
             timer.batch_lap()
 
             # process blocks
-            Blocks.process_multi(blocks, is_initial_sync)
+            Blocks.process_multi(blocks, prepared_vops, is_initial_sync)
             timer.batch_finish(len(blocks))
 
-            _prefix = ("[SYNC] Got block %d @ %s" % (
+            otm = OPSM.log_current("Operations present in the processed blocks")
+            ftm = FSM.log_current("Flushing times")
+
+            _prefix = ("[FAST SYNC] Got block %d @ %s" % (
                 to - 1, blocks[-1]['timestamp']))
             log.info(timer.batch_status(_prefix))
 
-        if not is_initial_sync:
-            # This flush is low importance; accounts are swept regularly.
-            Accounts.flush(steemd, trx=True)
+            OPSM.next_blocks()
+            FSM.next_blocks()
 
-            # If this flush fails, all that could potentially be lost here is
-            # edits and pre-payout votes. If the post has not been paid out yet,
-            # then the worst case is it will be synced upon payout. If the post
-            # is already paid out, worst case is to lose an edit.
-            CachedPost.flush(steemd, trx=True)
+            PC.broadcast(BroadcastObject('sync_current_block', to, 'blocks'))
 
-    def listen(self):
+    def listen(self, trail_blocks, max_sync_block, do_stale_block_check):
         """Live (block following) mode."""
-        trail_blocks = self._conf.get('trail_blocks')
-        assert trail_blocks >= 0
-        assert trail_blocks <= 100
 
         # debug: no max gap if disable_sync in effect
         max_gap = None if self._conf.get('test_disable_sync') else 100
@@ -188,37 +393,54 @@ class Sync:
         steemd = self._steem
         hive_head = Blocks.head_num()
 
-        for block in steemd.stream_blocks(hive_head + 1, trail_blocks, max_gap):
+        log.info("[LIVE SYNC] Entering listen with HM head: %d", hive_head)
+
+        if hive_head >= max_sync_block:
+            self.refresh_sparse_stats()
+            log.info("[LIVE SYNC] Exiting due to block limit exceeded: synced block number: %d, max_sync_block: %d", hive_head, max_sync_block)
+            return
+
+        for block in steemd.stream_blocks(hive_head + 1, trail_blocks, max_gap, do_stale_block_check):
+
+            num = int(block['block_id'][:8], base=16)
+            log.info("[LIVE SYNC] =====> About to process block %d with timestamp %s", num, block['timestamp'])
+
             start_time = perf()
 
-            self._db.query("START TRANSACTION")
-            num = Blocks.process(block)
-            follows = Follow.flush(trx=False)
-            accts = Accounts.flush(steemd, trx=False, spread=8)
-            CachedPost.dirty_paidouts(block['timestamp'])
-            cnt = CachedPost.flush(steemd, trx=False)
-            self._db.query("COMMIT")
+            vops = steemd.enum_virtual_ops(self._conf, num, num + 1)
+            prepared_vops = prepare_vops(vops)
+
+            Blocks.process_multi([block], prepared_vops, False)
+            otm = OPSM.log_current("Operations present in the processed blocks")
+            ftm = FSM.log_current("Flushing times")
 
             ms = (perf() - start_time) * 1000
-            log.info("[LIVE] Got block %d at %s --% 4d txs,% 3d posts,% 3d edits,"
-                     "% 3d payouts,% 3d votes,% 3d counts,% 3d accts,% 3d follows"
+            log.info("[LIVE SYNC] <===== Processed block %d at %s --% 4d txs"
                      " --% 5dms%s", num, block['timestamp'], len(block['transactions']),
-                     cnt['insert'], cnt['update'], cnt['payout'], cnt['upvote'],
-                     cnt['recount'], accts, follows, ms, ' SLOW' if ms > 1000 else '')
+                     ms, ' SLOW' if ms > 1000 else '')
+            log.info("[LIVE SYNC] Current system time: %s", datetime.now().strftime("%H:%M:%S"))
 
-            if num % 1200 == 0: #1hr
+            if num % 1200 == 0: #1hour
                 log.warning("head block %d @ %s", num, block['timestamp'])
-                log.info("[LIVE] hourly stats")
-                Accounts.fetch_ranks()
-                #Community.recalc_pending_payouts()
+                log.info("[LIVE SYNC] hourly stats")
+
+                log.info("[LIVE SYNC] filling payout_stats_view executed")
+                with ThreadPoolExecutor(max_workers=2) as executor:
+                    executor.submit(PayoutStats.generate)
+                    executor.submit(Mentions.refresh)
             if num % 200 == 0: #10min
-                Community.recalc_pending_payouts()
-            if num % 100 == 0: #5min
-                log.info("[LIVE] 5-min stats")
-                Accounts.dirty_oldest(500)
+                update_communities_posts_and_rank()
             if num % 20 == 0: #1min
                 self._update_chain_state()
 
+            PC.broadcast(BroadcastObject('sync_current_block', num, 'blocks'))
+            FSM.next_blocks()
+            OPSM.next_blocks()
+
+            if num >= max_sync_block:
+                log.info("Stopping [LIVE SYNC] because of specified block limit: %d", max_sync_block)
+                break
+
     # refetch dynamic_global_properties, feed price, etc
     def _update_chain_state(self):
         """Update basic state props (head block, feed price) in db."""
@@ -226,7 +448,7 @@ class Sync:
         self._db.query("""UPDATE hive_state SET block_num = :block_num,
                        steem_per_mvest = :spm, usd_per_steem = :ups,
                        sbd_per_steem = :sps, dgpo = :dgpo""",
-                       block_num=state['dgpo']['head_block_number'],
+                       block_num=Blocks.head_num(),
                        spm=state['steem_per_mvest'],
                        ups=state['usd_per_steem'],
                        sps=state['sbd_per_steem'],
diff --git a/hive/indexer/votes.py b/hive/indexer/votes.py
new file mode 100644
index 0000000000000000000000000000000000000000..d27cc935e64de7214b0c9c1e474320d24c0690a7
--- /dev/null
+++ b/hive/indexer/votes.py
@@ -0,0 +1,138 @@
+""" Votes indexing and processing """
+
+import logging
+import collections
+
+from hive.indexer.db_adapter_holder import DbAdapterHolder
+from hive.utils.normalize import escape_characters
+
+log = logging.getLogger(__name__)
+
+class Votes(DbAdapterHolder):
+    """ Class for managing posts votes """
+    _votes_data = collections.OrderedDict()
+
+    inside_flush = False
+
+    @classmethod
+    def vote_op(cls, vote_operation, date):
+        """ Process vote_operation """
+        voter     = vote_operation['voter']
+        author    = vote_operation['author']
+        permlink  = vote_operation['permlink']
+        weight    = vote_operation['weight']
+        block_num = vote_operation['block_num']
+
+        if cls.inside_flush:
+            log.exception("Adding new vote-info into '_votes_data' dict")
+            raise RuntimeError("Fatal error")
+
+        key = "{}/{}/{}".format(voter, author, permlink)
+
+        if key in cls._votes_data:
+            cls._votes_data[key]["vote_percent"] = weight
+            cls._votes_data[key]["last_update"] = date
+            # only effective vote edits increase num_changes counter
+        else:
+            cls._votes_data[key] = dict(voter=voter,
+                                        author=author,
+                                        permlink=escape_characters(permlink),
+                                        vote_percent=weight,
+                                        weight=0,
+                                        rshares=0,
+                                        last_update=date,
+                                        is_effective=False,
+                                        num_changes=0,
+                                        block_num=block_num)
+
+    @classmethod
+    def effective_comment_vote_op(cls, vop):
+        """ Process effective_comment_vote_operation """
+
+        key = "{}/{}/{}".format(vop['voter'], vop['author'], vop['permlink'])
+
+        if key in cls._votes_data:
+            cls._votes_data[key]["weight"]       = vop["weight"]
+            cls._votes_data[key]["rshares"]      = vop["rshares"]
+            cls._votes_data[key]["is_effective"] = True
+            cls._votes_data[key]["num_changes"] += 1
+            cls._votes_data[key]["block_num"]    = vop["block_num"]
+        else:
+            cls._votes_data[key] = dict(voter=vop["voter"],
+                                        author=vop["author"],
+                                        permlink=escape_characters(vop["permlink"]),
+                                        vote_percent=0,
+                                        weight=vop["weight"],
+                                        rshares=vop["rshares"],
+                                        last_update="1970-01-01 00:00:00",
+                                        is_effective=True,
+                                        num_changes=0,
+                                        block_num=vop["block_num"])
+    @classmethod
+    def flush(cls):
+        """ Flush vote data from cache to database """
+
+        cls.inside_flush = True
+        n = 0
+        if cls._votes_data:
+            cls.beginTx()
+
+            sql = """
+                INSERT INTO hive_votes
+                (post_id, voter_id, author_id, permlink_id, weight, rshares, vote_percent, last_update, num_changes, block_num, is_effective)
+
+                SELECT hp.id as post_id, ha_v.id as voter_id, ha_a.id as author_id, hpd_p.id as permlink_id,
+                t.weight, t.rshares, t.vote_percent, t.last_update, t.num_changes, t.block_num, t.is_effective
+                FROM
+                (
+                VALUES
+                  -- order_id, voter, author, permlink, weight, rshares, vote_percent, last_update, num_changes, block_num, is_effective
+                  {}
+                ) AS T(order_id, voter, author, permlink, weight, rshares, vote_percent, last_update, num_changes, block_num, is_effective)
+                INNER JOIN hive_accounts ha_v ON ha_v.name = t.voter
+                INNER JOIN hive_accounts ha_a ON ha_a.name = t.author
+                INNER JOIN hive_permlink_data hpd_p ON hpd_p.permlink = t.permlink
+                INNER JOIN hive_posts hp ON hp.author_id = ha_a.id AND hp.permlink_id = hpd_p.id
+                WHERE hp.counter_deleted = 0
+                ORDER BY t.order_id
+                ON CONFLICT ON CONSTRAINT hive_votes_voter_id_author_id_permlink_id_uk DO
+                UPDATE
+                  SET
+                    weight = CASE EXCLUDED.is_effective WHEN true THEN EXCLUDED.weight ELSE hive_votes.weight END,
+                    rshares = CASE EXCLUDED.is_effective WHEN true THEN EXCLUDED.rshares ELSE hive_votes.rshares END,
+                    vote_percent = EXCLUDED.vote_percent,
+                    last_update = EXCLUDED.last_update,
+                    num_changes = hive_votes.num_changes + EXCLUDED.num_changes + 1,
+                    block_num = EXCLUDED.block_num
+                  WHERE hive_votes.voter_id = EXCLUDED.voter_id and hive_votes.author_id = EXCLUDED.author_id and hive_votes.permlink_id = EXCLUDED.permlink_id;
+                """
+            # WHERE clause above seems superfluous (and works all the same without it, at least up to 5mln)
+
+            values = []
+            values_limit = 1000
+
+            for _, vd in cls._votes_data.items():
+                values.append("({}, '{}', '{}', {}, {}, {}, {}, '{}'::timestamp, {}, {}, {})".format(
+                    len(values), # for ordering
+                    vd['voter'], vd['author'], vd['permlink'], vd['weight'], vd['rshares'],
+                    vd['vote_percent'], vd['last_update'], vd['num_changes'], vd['block_num'], vd['is_effective']))
+
+                if len(values) >= values_limit:
+                    values_str = ','.join(values)
+                    actual_query = sql.format(values_str)
+                    cls.db.query(actual_query)
+                    values.clear()
+
+            if len(values) > 0:
+                values_str = ','.join(values)
+                actual_query = sql.format(values_str)
+                cls.db.query(actual_query)
+                values.clear()
+
+            n = len(cls._votes_data)
+            cls._votes_data.clear()
+            cls.commitTx()
+
+        cls.inside_flush = False
+
+        return n
diff --git a/hive/server/bridge_api/cursor.py b/hive/server/bridge_api/cursor.py
deleted file mode 100644
index 8d9477468a877d142a817fd93c6708710e7c80a9..0000000000000000000000000000000000000000
--- a/hive/server/bridge_api/cursor.py
+++ /dev/null
@@ -1,406 +0,0 @@
-"""Cursor-based pagination queries, mostly supporting bridge_api."""
-
-from datetime import datetime
-from dateutil.relativedelta import relativedelta
-
-# pylint: disable=too-many-lines
-
-DEFAULT_CID = 1317453
-PAYOUT_WINDOW = "now() + interval '12 hours' AND now() + interval '36 hours'"
-
-def last_month():
-    """Get the date 1 month ago."""
-    return datetime.now() + relativedelta(months=-1)
-
-async def _get_post_id(db, author, permlink):
-    """Get post_id from hive db. (does NOT filter on is_deleted)"""
-    sql = "SELECT id FROM hive_posts WHERE author = :a AND permlink = :p"
-    post_id = await db.query_one(sql, a=author, p=permlink)
-    assert post_id, 'invalid author/permlink'
-    return post_id
-
-async def _get_account_id(db, name):
-    """Get account id from hive db."""
-    assert name, 'no account name specified'
-    _id = await db.query_one("SELECT id FROM hive_accounts WHERE name = :n", n=name)
-    assert _id, "account not found: `%s`" % name
-    return _id
-
-async def _get_community_id(db, name):
-    """Get community id from hive db."""
-    assert name, 'no comm name specified'
-    _id = await db.query_one("SELECT id FROM hive_communities WHERE name = :n", n=name)
-    return _id
-
-#TODO: async def posts_by_ranked
-async def pids_by_ranked(db, sort, start_author, start_permlink, limit, tag, observer_id=None):
-    """Get a list of post_ids for a given posts query.
-
-    if `tag` is blank: global trending
-    if `tag` is `my`: personal trending
-    if `tag` is `hive-*`: community trending
-    else `tag` is a tag: tag trending
-
-    Valid `sort` values:
-     - legacy: trending, hot, created, promoted, payout, payout_comments
-     - hive: trending, hot, created, promoted, payout, muted
-    """
-    # TODO: `payout` should limit to ~24hrs
-    # pylint: disable=too-many-arguments
-
-    # list of comm ids to query, if tag is comms key
-    cids = None
-    single = None
-    if tag == 'my':
-        cids = await _subscribed(db, observer_id)
-        if not cids: return []
-    elif tag == 'all':
-        cids = []
-    elif tag[:5] == 'hive-':
-        single = await _get_community_id(db, tag)
-        if single: cids = [single]
-
-    # if tag was comms key, then no tag filter
-    if cids is not None: tag = None
-
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, start_author, start_permlink)
-
-    if cids is None:
-        pids = await pids_by_category(db, tag, sort, start_id, limit)
-    else:
-        pids = await pids_by_community(db, cids, sort, start_id, limit)
-
-    # if not filtered by tag, is first page trending: prepend pinned
-    if not tag and not start_id and sort in ('trending', 'created'):
-        prepend = await _pinned(db, single or DEFAULT_CID)
-        for pid in prepend:
-            if pid in pids:
-                pids.remove(pid)
-        pids = prepend + pids
-
-    return pids
-
-
-async def pids_by_community(db, ids, sort, seek_id, limit):
-    """Get a list of post_ids for a given posts query.
-
-    `sort` can be trending, hot, created, promoted, payout, or payout_comments.
-    """
-    # pylint: disable=bad-whitespace, line-too-long
-
-    # TODO: `payout` should limit to ~24hrs
-    definitions = {#         field         pending toponly gray   promoted
-        'trending':        ('sc_trend',    False,  True,   False, False),
-        'hot':             ('sc_hot',      False,  True,   False, False),
-        'created':         ('created_at',  False,  True,   False, False),
-        'promoted':        ('promoted',    True,   True,   False, True),
-        'payout':          ('payout',      True,   False,  False, False),
-        'muted':           ('payout',      True,   False,  True,  False)}
-
-    # validate
-    assert sort in definitions, 'unknown sort %s' % sort
-
-    # setup
-    field, pending, toponly, gray, promoted = definitions[sort]
-    table = 'hive_posts_cache'
-    where = ["community_id IN :ids"] if ids else ["community_id IS NOT NULL AND community_id != 1337319"]
-
-    # select
-    if gray:     where.append("is_grayed = '1'")
-    if not gray: where.append("is_grayed = '0'")
-    if toponly:  where.append("depth = 0")
-    if pending:  where.append("is_paidout = '0'")
-    if promoted: where.append('promoted > 0')
-    if sort == 'payout': where.append("payout_at BETWEEN %s" % PAYOUT_WINDOW)
-
-    # seek
-    if seek_id:
-        sval = "(SELECT %s FROM %s WHERE post_id = :seek_id)" % (field, table)
-        sql = """((%s < %s) OR (%s = %s AND post_id > :seek_id))"""
-        where.append(sql % (field, sval, field, sval))
-
-        # simpler `%s <= %s` eval has edge case: many posts with payout 0
-        #sql = "SELECT %s FROM %s WHERE post_id = :id)"
-        #seek_val = await db.query_col(sql % (field, table), id=seek_id)
-        #sql = """((%s < :seek_val) OR
-        #          (%s = :seek_val AND post_id > :seek_id))"""
-        #where.append(sql % (field, sval, field, sval))
-
-    # build
-    sql = ("""SELECT post_id FROM %s WHERE %s
-              ORDER BY %s DESC, post_id LIMIT :limit
-              """ % (table, ' AND '.join(where), field))
-
-    # execute
-    return await db.query_col(sql, ids=tuple(ids), seek_id=seek_id, limit=limit)
-
-
-
-async def pids_by_category(db, tag, sort, last_id, limit):
-    """Get a list of post_ids for a given posts query.
-
-    `sort` can be trending, hot, created, promoted, payout, or payout_comments.
-    """
-    # pylint: disable=bad-whitespace
-    assert sort in ['trending', 'hot', 'created', 'promoted',
-                    'payout', 'payout_comments', 'muted']
-
-    params = {             # field      pending posts   comment promoted
-        'trending':        ('sc_trend', True,   True,   False,  False),
-        'hot':             ('sc_hot',   True,   True,   False,  False),
-        'created':         ('post_id',  False,  True,   False,  False),
-        'promoted':        ('promoted', True,   False,  False,  True),
-        'payout':          ('payout',   True,   False,  False,  False),
-        'payout_comments': ('payout',   True,   False,  True,   False),
-        'muted':           ('payout',   True,   False,  False,  False),
-    }[sort]
-
-    table = 'hive_posts_cache'
-    field = params[0]
-    where = []
-
-    # primary filters
-    if params[1]: where.append("is_paidout = '0'")
-    if params[2]: where.append('depth = 0')
-    if params[3]: where.append('depth > 0')
-    if params[4]: where.append('promoted > 0')
-    if sort == 'muted': where.append("is_grayed = '1' AND payout > 0")
-    if sort == 'payout': where.append("payout_at BETWEEN %s" % PAYOUT_WINDOW)
-
-    # filter by category or tag
-    if tag:
-        if sort in ['payout', 'payout_comments']:
-            where.append('category = :tag')
-        else:
-            sql = "SELECT post_id FROM hive_post_tags WHERE tag = :tag"
-            where.append("post_id IN (%s)" % sql)
-
-    if last_id:
-        sval = "(SELECT %s FROM %s WHERE post_id = :last_id)" % (field, table)
-        sql = """((%s < %s) OR (%s = %s AND post_id > :last_id))"""
-        where.append(sql % (field, sval, field, sval))
-
-    sql = ("""SELECT post_id FROM %s WHERE %s
-              ORDER BY %s DESC, post_id LIMIT :limit
-              """ % (table, ' AND '.join(where), field))
-
-    return await db.query_col(sql, tag=tag, last_id=last_id, limit=limit)
-
-async def _subscribed(db, account_id):
-    sql = """SELECT community_id FROM hive_subscriptions
-              WHERE account_id = :account_id"""
-    return await db.query_col(sql, account_id=account_id)
-
-async def _pinned(db, community_id):
-    """Get a list of pinned post `id`s in `community`."""
-    sql = """SELECT id FROM hive_posts
-              WHERE is_pinned = '1'
-                AND is_deleted = '0'
-                AND community_id = :community_id
-            ORDER BY id DESC"""
-    return await db.query_col(sql, community_id=community_id)
-
-
-async def pids_by_blog(db, account: str, start_author: str = '',
-                       start_permlink: str = '', limit: int = 20):
-    """Get a list of post_ids for an author's blog."""
-    account_id = await _get_account_id(db, account)
-
-    seek = ''
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, start_author, start_permlink)
-        seek = """
-          AND created_at <= (
-            SELECT created_at
-              FROM hive_feed_cache
-             WHERE account_id = :account_id
-               AND post_id = :start_id)
-        """
-
-    # ignore community posts which were not reblogged
-    skip = """
-        SELECT id FROM hive_posts
-         WHERE author = :account
-           AND is_deleted = '0'
-           AND depth = 0
-           AND community_id IS NOT NULL
-           AND id NOT IN (SELECT post_id FROM hive_reblogs
-                           WHERE account = :account)"""
-
-    sql = """
-        SELECT post_id
-          FROM hive_feed_cache
-         WHERE account_id = :account_id %s
-           AND post_id NOT IN (%s)
-      ORDER BY created_at DESC
-         LIMIT :limit
-    """ % (seek, skip)
-
-    # alternate implementation -- may be more efficient
-    #sql = """
-    #    SELECT id
-    #      FROM (
-    #             SELECT id, author account, created_at FROM hive_posts
-    #              WHERE depth = 0 AND is_deleted = '0' AND community_id IS NULL
-    #              UNION ALL
-    #             SELECT post_id id, account, created_at FROM hive_reblogs
-    #           ) blog
-    #     WHERE account = :account %s
-    #  ORDER BY created_at DESC
-    #     LIMIT :limit
-    #""" % seek
-
-    return await db.query_col(sql, account_id=account_id, account=account,
-                              start_id=start_id, limit=limit)
-
-async def pids_by_feed_with_reblog(db, account: str, start_author: str = '',
-                                   start_permlink: str = '', limit: int = 20):
-    """Get a list of [post_id, reblogged_by_str] for an account's feed."""
-    account_id = await _get_account_id(db, account)
-
-    seek = ''
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, start_author, start_permlink)
-        if not start_id:
-            return []
-
-        seek = """
-          HAVING MIN(hive_feed_cache.created_at) <= (
-            SELECT MIN(created_at) FROM hive_feed_cache WHERE post_id = :start_id
-               AND account_id IN (SELECT following FROM hive_follows
-                                  WHERE follower = :account AND state = 1))
-        """
-
-    sql = """
-        SELECT post_id, string_agg(name, ',') accounts
-          FROM hive_feed_cache
-          JOIN hive_follows ON account_id = hive_follows.following AND state = 1
-          JOIN hive_accounts ON hive_follows.following = hive_accounts.id
-         WHERE hive_follows.follower = :account
-           AND hive_feed_cache.created_at > :cutoff
-      GROUP BY post_id %s
-      ORDER BY MIN(hive_feed_cache.created_at) DESC LIMIT :limit
-    """ % seek
-
-    result = await db.query_all(sql, account=account_id, start_id=start_id,
-                                limit=limit, cutoff=last_month())
-    return [(row[0], row[1]) for row in result]
-
-
-async def pids_by_posts(db, account: str, start_permlink: str = '', limit: int = 20):
-    """Get a list of post_ids representing top-level posts by an author."""
-    seek = ''
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, account, start_permlink)
-        if not start_id:
-            return []
-
-        seek = "AND id <= :start_id"
-
-    # `depth` in ORDER BY is a no-op, but forces an ix3 index scan (see #189)
-    sql = """
-        SELECT id FROM hive_posts
-         WHERE author = :account %s
-           AND is_deleted = '0'
-           AND depth = '0'
-      ORDER BY id DESC
-         LIMIT :limit
-    """ % seek
-
-    return await db.query_col(sql, account=account, start_id=start_id, limit=limit)
-
-async def pids_by_comments(db, account: str, start_permlink: str = '', limit: int = 20):
-    """Get a list of post_ids representing comments by an author."""
-    seek = ''
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, account, start_permlink)
-        if not start_id:
-            return []
-
-        seek = "AND id <= :start_id"
-
-    # `depth` in ORDER BY is a no-op, but forces an ix3 index scan (see #189)
-    sql = """
-        SELECT id FROM hive_posts
-         WHERE author = :account %s
-           AND is_deleted = '0'
-           AND depth > 0
-      ORDER BY id DESC, depth
-         LIMIT :limit
-    """ % seek
-
-    return await db.query_col(sql, account=account, start_id=start_id, limit=limit)
-
-
-async def pids_by_replies(db, start_author: str, start_permlink: str = '',
-                          limit: int = 20):
-    """Get a list of post_ids representing replies to an author.
-
-    To get the first page of results, specify `start_author` as the
-    account being replied to. For successive pages, provide the
-    last loaded reply's author/permlink.
-    """
-    seek = ''
-    start_id = None
-    if start_permlink:
-        sql = """
-          SELECT parent.author,
-                 child.id
-            FROM hive_posts child
-            JOIN hive_posts parent
-              ON child.parent_id = parent.id
-           WHERE child.author = :author
-             AND child.permlink = :permlink
-        """
-
-        row = await db.query_row(sql, author=start_author, permlink=start_permlink)
-        if not row:
-            return []
-
-        parent_account = row[0]
-        start_id = row[1]
-        seek = "AND id <= :start_id"
-    else:
-        parent_account = start_author
-
-    sql = """
-       SELECT id FROM hive_posts
-        WHERE parent_id IN (SELECT id FROM hive_posts
-                             WHERE author = :parent
-                               AND is_deleted = '0'
-                          ORDER BY id DESC
-                             LIMIT 10000) %s
-          AND is_deleted = '0'
-     ORDER BY id DESC
-        LIMIT :limit
-    """ % seek
-
-    return await db.query_col(sql, parent=parent_account, start_id=start_id, limit=limit)
-
-async def pids_by_payout(db, account: str, start_author: str = '',
-                         start_permlink: str = '', limit: int = 20):
-    """Get a list of post_ids for an author's blog."""
-    seek = ''
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, start_author, start_permlink)
-        last = "(SELECT payout FROM hive_posts_cache WHERE post_id = :start_id)"
-        seek = ("""AND (payout < %s OR (payout = %s AND post_id > :start_id))"""
-                % (last, last))
-
-    sql = """
-        SELECT post_id
-          FROM hive_posts_cache
-         WHERE author = :account
-           AND is_paidout = '0' %s
-      ORDER BY payout DESC, post_id
-         LIMIT :limit
-    """ % seek
-
-    return await db.query_col(sql, account=account, start_id=start_id, limit=limit)
diff --git a/hive/server/bridge_api/methods.py b/hive/server/bridge_api/methods.py
index 89d06c007dc7db1065b097629c6a7c40118bb940..9735dd01fcda73f1c6695fd64260f2926b646fcc 100644
--- a/hive/server/bridge_api/methods.py
+++ b/hive/server/bridge_api/methods.py
@@ -1,7 +1,7 @@
 """Bridge API public endpoints for posts"""
 
-import hive.server.bridge_api.cursor as cursor
-from hive.server.bridge_api.objects import load_posts, load_posts_reblogs, load_profiles, _condenser_post_object
+from hive.server.bridge_api.objects import load_profiles, _bridge_post_object, append_statistics_to_post
+from hive.server.database_api.methods import find_votes_impl, VotesPresentation
 from hive.server.common.helpers import (
     return_error_info,
     valid_account,
@@ -13,45 +13,17 @@ from hive.server.hive_api.objects import _follow_contexts
 from hive.server.hive_api.community import list_top_communities
 from hive.server.common.mutes import Mutes
 
-
-ROLES = {-2: 'muted', 0: 'guest', 2: 'member', 4: 'mod', 6: 'admin', 8: 'owner'}
-
-SELECT_FRAGMENT = """
-    SELECT hive_posts_cache.post_id, hive_posts_cache.author, hive_posts_cache.permlink,
-           hive_posts_cache.title, hive_posts_cache.body, hive_posts_cache.category, hive_posts_cache.depth,
-           hive_posts_cache.promoted, hive_posts_cache.payout, hive_posts_cache.payout_at,
-           hive_posts_cache.is_paidout, hive_posts_cache.children, hive_posts_cache.votes,
-           hive_posts_cache.created_at, hive_posts_cache.updated_at, hive_posts_cache.rshares,
-           hive_posts_cache.raw_json, hive_posts_cache.json, hive_accounts.reputation AS author_rep,
-           hive_posts_cache.is_hidden AS is_hidden, hive_posts_cache.is_grayed AS is_grayed,
-           hive_posts_cache.total_votes AS total_votes, hive_posts_cache.flag_weight AS flag_weight,
-           hive_posts_cache.sc_trend AS sc_trend, hive_accounts.id AS acct_author_id,
-           hive_roles.title as role_title, hive_communities.title AS community_title, hive_roles.role_id AS role_id,
-           hive_posts.is_pinned AS is_pinned
-           FROM hive_posts_cache JOIN hive_posts ON (hive_posts_cache.post_id = hive_posts.id)
-                                 JOIN hive_accounts ON (hive_posts_cache.author = hive_accounts.name)
-                                 LEFT OUTER JOIN hive_communities ON (hive_posts_cache.community_id = hive_communities.id)
-                                 LEFT OUTER JOIN hive_roles ON (hive_accounts.id = hive_roles.account_id AND hive_posts_cache.community_id = hive_roles.community_id) """
-
 #pylint: disable=too-many-arguments, no-else-return
 
-async def _get_post_id(db, author, permlink):
-    """Get post_id from hive db."""
-    sql = """SELECT id FROM hive_posts
-              WHERE author = :a
-                AND permlink = :p
-                AND is_deleted = '0'"""
-    post_id = await db.query_one(sql, a=author, p=permlink)
-    assert post_id, 'invalid author/permlink'
-    return post_id
-
 @return_error_info
 async def get_profile(context, account, observer=None):
     """Load account/profile data."""
     db = context['db']
+    account = valid_account(account)
+    observer = valid_account(observer, allow_empty=True)
+
     ret = await load_profiles(db, [valid_account(account)])
-    if not ret:
-        return None
+    assert ret, 'Account \'{}\' does not exist'.format(account) # should not be needed
 
     observer_id = await get_account_id(db, observer) if observer else None
     if observer_id:
@@ -59,13 +31,13 @@ async def get_profile(context, account, observer=None):
     return ret[0]
 
 @return_error_info
-async def get_trending_topics(context, limit=10, observer=None):
+async def get_trending_topics(context, limit:int=10, observer:str=None):
     """Return top trending topics across pending posts."""
     # pylint: disable=unused-argument
     #db = context['db']
     #observer_id = await get_account_id(db, observer) if observer else None
     #assert not observer, 'observer not supported'
-    limit = valid_limit(limit, 25)
+    limit = valid_limit(limit, 25, 10)
     out = []
     cells = await list_top_communities(context, limit)
     for name, title in cells:
@@ -83,219 +55,262 @@ async def get_post(context, author, permlink, observer=None):
     #TODO: `observer` logic for user-post state
     db = context['db']
     valid_account(author)
+    valid_account(observer, allow_empty=True)
     valid_permlink(permlink)
 
-    blacklists_for_user = None
-    if observer and context:
-        blacklists_for_user = await Mutes.get_blacklists_for_observer(observer, context)
-
-    sql = "---bridge_api.get_post\n" + SELECT_FRAGMENT + """ WHERE hive_posts_cache.author = :author AND hive_posts_cache.permlink = :permlink AND NOT hive_posts.is_deleted """
-
+    sql = "SELECT * FROM bridge_get_post( (:author)::VARCHAR, (:permlink)::VARCHAR )"
     result = await db.query_all(sql, author=author, permlink=permlink)
-    assert len(result) == 1, 'invalid author/permlink or post not found in cache'
-    post = _condenser_post_object(result[0])
-    post = await append_statistics_to_post(post, result[0], False, blacklists_for_user)
+
+    post = _bridge_post_object(result[0])
+    post['active_votes'] = await find_votes_impl(db, author, permlink, VotesPresentation.BridgeApi)
+    post = append_statistics_to_post(post, result[0], False)
     return post
 
 @return_error_info
-async def get_ranked_posts(context, sort, start_author='', start_permlink='',
-                           limit=20, tag=None, observer=None):
-    """Query posts, sorted by given method."""
+async def _get_ranked_posts_for_observer_communities( db, sort:str, start_author:str, start_permlink:str, limit, observer:str):
+    async def execute_observer_community_query(db, sql, limit):
+        return await db.query_all(sql, observer=observer, author=start_author, permlink=start_permlink, limit=limit )
 
-    assert sort in ['trending', 'hot', 'created', 'promoted',
-                    'payout', 'payout_comments', 'muted'], 'invalid sort'
+    if sort == 'trending':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_trends_for_observer_communities( (:observer)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
+        return await execute_observer_community_query(db, sql, limit)
 
-    valid_account(start_author, allow_empty=True)
-    valid_permlink(start_permlink, allow_empty=True)
-    valid_limit(limit, 100)
-    valid_tag(tag, allow_empty=True)
+    if sort == 'created':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_created_for_observer_communities( (:observer)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
+        return await execute_observer_community_query(db, sql, limit)
 
-    db = context['db']
+    if sort == 'hot':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_hot_for_observer_communities( (:observer)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
+        return await execute_observer_community_query(db, sql, limit)
+
+    if sort == 'promoted':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_promoted_for_observer_communities( (:observer)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
+        return await execute_observer_community_query(db, sql, limit)
 
-    sql = ''
-    pinned_sql = ''
+    if sort == 'payout':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout_for_observer_communities( (:observer)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
+        return await execute_observer_community_query(db, sql, limit)
+
+    if sort == 'payout_comments':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout_comments_for_observer_communities( (:observer)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
+        return await execute_observer_community_query(db, sql, limit)
+
+    if sort == 'muted':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_muted_for_observer_communities( (:observer)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
+        return await execute_observer_community_query(db, sql, limit)
+
+    assert False, "Unknown sort order"
+
+@return_error_info
+async def _get_ranked_posts_for_communities( db, sort:str, community, start_author:str, start_permlink:str, limit, observer:str ):
+    async def execute_community_query(db, sql, limit):
+        return await db.query_all(sql, community=community, author=start_author, permlink=start_permlink, limit=limit, observer=observer )
+
+    pinned_sql = "SELECT * FROM bridge_get_ranked_post_pinned_for_community( (:community)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+    # missing paging which results in inability to get all pinned posts
+    # and/or causes the same posts to be on each page (depending on limit and number of pinned)
+    if sort == 'hot':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_hot_for_community( (:community)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_community_query(db, sql, limit)
 
     if sort == 'trending':
-        sql = SELECT_FRAGMENT + """ WHERE NOT hive_posts_cache.is_paidout AND hive_posts_cache.depth = 0 AND NOT hive_posts.is_deleted
-                                    %s ORDER BY sc_trend desc, post_id LIMIT :limit """
-    elif sort == 'hot':
-        sql = SELECT_FRAGMENT + """ WHERE NOT hive_posts_cache.is_paidout AND hive_posts_cache.depth = 0 AND NOT hive_posts.is_deleted
-                                    %s ORDER BY sc_hot desc, post_id LIMIT :limit """
-    elif sort == 'created':
-        sql = SELECT_FRAGMENT + """ WHERE hive_posts_cache.depth = 0 AND NOT hive_posts.is_deleted AND NOT hive_posts_cache.is_grayed
-                                    %s ORDER BY hive_posts_cache.created_at DESC, post_id LIMIT :limit """
-    elif sort == 'promoted':
-        sql = SELECT_FRAGMENT + """ WHERE hive_posts_cache.depth > 0 AND hive_posts_cache.promoted > 0 AND NOT hive_posts.is_deleted
-                                    AND NOT hive_posts_cache.is_paidout %s ORDER BY hive_posts_cache.promoted DESC, post_id LIMIT :limit """
-    elif sort == 'payout':
-        sql = SELECT_FRAGMENT + """ WHERE NOT hive_posts_cache.is_paidout AND NOT hive_posts.is_deleted %s
-                                    AND payout_at BETWEEN now() + interval '12 hours' AND now() + interval '36 hours'
-                                    ORDER BY hive_posts_cache.payout DESC, post_id LIMIT :limit """
-    elif sort == 'payout_comments':
-        sql = SELECT_FRAGMENT + """ WHERE NOT hive_posts_cache.is_paidout AND NOT hive_posts.is_deleted AND hive_posts_cache.depth > 0
-                                    %s ORDER BY hive_posts_cache.payout DESC, post_id LIMIT :limit """
-    elif sort == 'muted':
-        sql = SELECT_FRAGMENT + """ WHERE NOT hive_posts_cache.is_paidout AND NOT hive_posts.is_deleted AND hive_posts_cache.is_grayed
-                                    AND hive_posts_cache.payout > 0 %s ORDER BY hive_posts_cache.payout DESC, post_id LIMIT :limit """
-
-    sql = "---bridge_api.get_ranked_posts\n" + sql
-
-    if start_author and start_permlink:
-        if sort == 'trending':
-            sql = sql % """ AND hive_posts_cache.sc_trend <= (SELECT sc_trend FROM hive_posts_cache WHERE permlink = :permlink AND author = :author) 
-                            AND hive_posts_cache.post_id != (SELECT post_id FROM hive_posts_cache WHERE permlink = :permlink AND author=:author) %s """
-        elif sort == 'hot':
-            sql = sql % """ AND hive_posts_cache.sc_hot <= (SELECT sc_hot FROM hive_posts_cache WHERE permlink = :permlink AND author = :author)
-                            AND hive_posts_cache.post_id != (SELECT post_id FROM hive_posts_cache WHERE permlink = :permlink AND author = :author) %s """
-        elif sort == 'created':
-            sql = sql % """ AND hive_posts_cache.post_id < (SELECT post_id FROM hive_posts_cache WHERE permlink = :permlink AND author = :author) %s """
-        elif sort == 'promoted':
-            sql = sql % """ AND hive_posts_cache.promoted <= (SELECT promoted FROM hive_posts_cache WHERE permlink = :permlink AND author = :author)
-                                AND hive_posts_cache.post_id != (SELECT post_id FROM hive_posts_cache WHERE permlink = :permlink AND author = :author) %s """
-        else:
-            sql = sql % """ AND hive_posts_cache.payout <= (SELECT payout FROM hive_posts_cache WHERE permlink = :permlink AND author = :author)
-                                AND hive_posts_cache.post_id != (SELECT post_id FROM hive_posts_cache WHERE permlink = :permlink AND author = :author) %s """
-    else:
-        sql = sql % """ %s """
-
-    if not tag or tag == 'all':
-        sql = sql % """ """
-    elif tag == 'my':
-        sql = sql % """ AND hive_posts_cache.community_id IN (SELECT community_id FROM hive_subscriptions WHERE account_id =
-                        (SELECT id FROM hive_accounts WHERE name = :observer) ) """
-    elif tag[:5] == 'hive-':
-        if start_author and start_permlink:
-            sql = sql % """  AND hive_posts_cache.community_id = (SELECT hive_communities.id FROM hive_communities WHERE name = :community_name ) """
-        else:
-            sql = sql % """ AND hive_communities.name = :community_name """
-
-        if (sort == 'trending' or sort == 'created') and not start_author and not start_permlink:
-                pinned_sql = SELECT_FRAGMENT + """ WHERE is_pinned AND hive_communities.name = :community_name ORDER BY hive_posts_cache.created_at DESC """
+        result_with_pinned_posts = []
+        sql = "SELECT * FROM bridge_get_ranked_post_by_trends_for_community( (:community)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, True, (:observer)::VARCHAR )"
+        result_with_pinned_posts = await execute_community_query(db, pinned_sql, limit)
+        limit -= len(result_with_pinned_posts)
+        if limit > 0:
+            result_with_pinned_posts += await execute_community_query(db, sql, limit)
+        return result_with_pinned_posts
 
-    else:
-        if sort in ['payout', 'payout_comments']:
-            sql = sql % """ AND hive_posts_cache.category = :tag """
-        else:
-            sql = sql % """ AND hive_posts_cache.post_id IN (SELECT post_id FROM hive_post_tags WHERE tag = :tag)"""
+    if sort == 'promoted':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_promoted_for_community( (:community)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_community_query(db, sql, limit)
 
-    if not observer:
-        observer = ''
+    if sort == 'created':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_created_for_community( (:community)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, True, (:observer)::VARCHAR )"
+        result_with_pinned_posts = await execute_community_query(db, pinned_sql, limit)
+        limit -= len(result_with_pinned_posts)
+        if limit > 0:
+            result_with_pinned_posts += await execute_community_query(db, sql, limit)
+        return result_with_pinned_posts
 
-    posts = []
-    pinned_post_ids = []
-
-    blacklists_for_user = None
-    if observer and context:
-        blacklists_for_user = await Mutes.get_blacklists_for_observer(observer, context)
-
-    if pinned_sql:
-        pinned_result = await db.query_all(pinned_sql, author=start_author, limit=limit, tag=tag, permlink=start_permlink, community_name=tag, observer=observer)
-        for row in pinned_result:
-            post = _condenser_post_object(row)
-            post = await append_statistics_to_post(post, row, True, blacklists_for_user)
-            limit = limit - 1
-            posts.append(post)
-            pinned_post_ids.append(post['post_id'])
+    if sort == 'muted':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_muted_for_community( (:community)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_community_query(db, sql, limit)
 
-    sql_result = await db.query_all(sql, author=start_author, limit=limit, tag=tag, permlink=start_permlink, community_name=tag, observer=observer)
-    for row in sql_result:
-        post = _condenser_post_object(row)
-        post = await append_statistics_to_post(post, row, False, blacklists_for_user)
-        if post['post_id'] in pinned_post_ids:
-            continue
-        posts.append(post)
-    return posts
+    if sort == 'payout':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout_for_community( (:community)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_community_query(db, sql, limit)
 
-async def append_statistics_to_post(post, row, is_pinned, blacklists_for_user=None):
-    """ apply information such as blacklists and community names/roles to a given post """
-    if not blacklists_for_user:
-        post['blacklists'] = Mutes.lists(row['author'], row['author_rep'])
-    else:
-        post['blacklists'] = []
-        if row['author'] in blacklists_for_user:
-            blacklists = blacklists_for_user[row['author']]
-            for blacklist in blacklists:
-                post['blacklists'].append(blacklist)
-        reputation = int(row['author_rep'])
-        if reputation < 1:
-            post['blacklists'].append('reputation-0')
-        elif reputation  == 1:
-            post['blacklists'].append('reputation-1')
-
-    if 'community_title' in row and row['community_title']:
-        post['community'] = row['category']
-        post['community_title'] = row['community_title']
-        if row['role_id']:
-            post['author_role'] = ROLES[row['role_id']]
-            post['author_title'] = row['role_title']
-        else:
-            post['author_role'] = 'guest'
-            post['author_title'] = ''
-    else:
-        post['stats']['gray'] = row['is_grayed']
-    post['stats']['hide'] = 'irredeemables' in post['blacklists']
+    if sort == 'payout_comments':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout_comments_for_community( (:community)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_community_query(db, sql, limit)
+
+    assert False, "Unknown sort order"
 
-    if is_pinned:
-        post['stats']['is_pinned'] = True
-    return post
 
 @return_error_info
-async def get_account_posts(context, sort, account, start_author='', start_permlink='',
-                            limit=20, observer=None):
-    """Get posts for an account -- blog, feed, comments, or replies."""
-    valid_sorts = ['blog', 'feed', 'posts', 'comments', 'replies', 'payout']
-    assert sort in valid_sorts, 'invalid account sort'
-    assert account, 'account is required'
+async def _get_ranked_posts_for_tag( db, sort:str, tag, start_author:str, start_permlink:str, limit, observer:str ):
+    async def execute_tags_query(db, sql):
+        return await db.query_all(sql, tag=tag, author=start_author, permlink=start_permlink, limit=limit, observer=observer )
+
+    if sort == 'hot':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_hot_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_tags_query(db, sql)
+
+    if sort == 'promoted':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_promoted_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_tags_query(db, sql)
+
+    if sort == 'payout':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout_for_category( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, True, (:observer)::VARCHAR )"
+        return await execute_tags_query(db, sql)
+
+    if sort == 'payout_comments':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout_comments_for_category( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_tags_query(db, sql)
+
+    if sort == 'muted':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_muted_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_tags_query(db, sql)
+
+    if sort == 'trending':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_trends_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_tags_query(db, sql)
+
+    if sort == 'created':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_created_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_tags_query(db, sql)
+
+    assert False, "Unknown sort order"
+
+@return_error_info
+async def _get_ranked_posts_for_all( db, sort:str, start_author:str, start_permlink:str, limit, observer:str ):
+    async def execute_query(db, sql):
+        return await db.query_all(sql, author=start_author, permlink=start_permlink, limit=limit, observer=observer )
+
+    if sort == 'trending':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_trends( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_query(db, sql)
+
+    if sort == 'created':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_created( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_query(db, sql)
+
+    if sort == 'hot':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_hot( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_query(db, sql)
+
+    if sort == 'promoted':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_promoted( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_query(db, sql)
+
+    if sort == 'payout':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, True, (:observer)::VARCHAR )"
+        return await execute_query(db, sql)
+
+    if sort == 'payout_comments':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout_comments( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_query(db, sql)
+
+    if sort == 'muted':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_muted( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+        return await execute_query(db, sql)
+
+    assert False, "Unknown sort order"
+
+@return_error_info
+async def get_ranked_posts(context, sort:str, start_author:str='', start_permlink:str='',
+                           limit:int=20, tag:str='', observer:str=''):
+    """Query posts, sorted by given method."""
+    supported_sort_list = ['trending', 'hot', 'created', 'promoted', 'payout', 'payout_comments', 'muted']
+    assert sort in supported_sort_list, "Unsupported sort, valid sorts: {}".format(", ".join(supported_sort_list))
 
     db = context['db']
-    account = valid_account(account)
+
+    async def process_query_results( sql_result ):
+        posts = []
+        for row in sql_result:
+            post = _bridge_post_object(row)
+            post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.BridgeApi)
+            post = append_statistics_to_post(post, row, row['is_pinned'])
+            posts.append(post)
+        return posts
+
     start_author = valid_account(start_author, allow_empty=True)
     start_permlink = valid_permlink(start_permlink, allow_empty=True)
-    start = (start_author, start_permlink)
-    limit = valid_limit(limit, 100)
+    limit = valid_limit(limit, 100, 20)
+    tag = valid_tag(tag, allow_empty=True)
+    observer = valid_account(observer, allow_empty=(tag != "my"))
 
-    # pylint: disable=unused-variable
-    observer_id = await get_account_id(db, observer) if observer else None # TODO
-     
-    sql = "---bridge_api.get_account_posts\n " + SELECT_FRAGMENT + """ %s """      
-        
+    if tag == "my":
+        result = await _get_ranked_posts_for_observer_communities(db, sort, start_author, start_permlink, limit, observer)
+        return await process_query_results(result)
+
+    if tag and tag[:5] == 'hive-':
+        result = await _get_ranked_posts_for_communities(db, sort, tag, start_author, start_permlink, limit, observer)
+        return await process_query_results(result)
+
+    if ( tag and tag != "all" ):
+        result = await _get_ranked_posts_for_tag(db, sort, tag, start_author, start_permlink, limit, observer)
+        return await process_query_results(result)
+
+    result = await _get_ranked_posts_for_all(db, sort, start_author, start_permlink, limit, observer)
+    return await process_query_results(result)
+
+@return_error_info
+async def get_account_posts(context, sort:str, account:str, start_author:str='', start_permlink:str='',
+                            limit:int=20, observer:str=None):
+    """Get posts for an account -- blog, feed, comments, or replies."""
+    supported_sort_list = ['blog', 'feed', 'posts', 'comments', 'replies', 'payout']
+    assert sort in supported_sort_list, "Unsupported sort, valid sorts: {}".format(", ".join(supported_sort_list))
+
+    db = context['db']
+
+    account =         valid_account(account)
+    start_author =    valid_account(start_author, allow_empty=True)
+    start_permlink =  valid_permlink(start_permlink, allow_empty=True)
+    observer =        valid_account(observer, allow_empty=True)
+    limit =           valid_limit(limit, 100, 20)
+
+    sql = None
+    account_posts = True # set when only posts (or reblogs) of given account are supposed to be in results
     if sort == 'blog':
-        ids = await cursor.pids_by_blog(db, account, *start, limit)
-        posts = await load_posts(context['db'], ids)
-        for post in posts:
-            if post['author'] != account:
-                post['reblogged_by'] = [account]
-        return posts
+        sql = "SELECT * FROM bridge_get_account_posts_by_blog( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::INTEGER, True )"
+    elif sort == 'feed':
+        sql = "SELECT * FROM bridge_get_by_feed_with_reblog((:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::INTEGER)"
     elif sort == 'posts':
-        sql = sql % """ WHERE hive_posts_cache.author = :account AND NOT hive_posts.is_deleted AND hive_posts_cache.depth = 0 %s ORDER BY hive_posts_cache.post_id DESC LIMIT :limit"""
+        sql = "SELECT * FROM bridge_get_account_posts_by_posts( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
     elif sort == 'comments':
-        sql = sql % """ WHERE hive_posts_cache.author = :account AND NOT hive_posts.is_deleted AND hive_posts_cache.depth > 0 %s ORDER BY hive_posts_cache.post_id DESC, depth LIMIT :limit"""
-    elif sort == 'payout':
-        sql = sql % """ WHERE hive_posts_cache.author = :account AND NOT hive_posts.is_deleted AND NOT hive_posts_cache.is_paidout %s ORDER BY payout DESC, post_id LIMIT :limit"""
-    elif sort == 'feed':
-        res = await cursor.pids_by_feed_with_reblog(db, account, *start, limit)
-        return await load_posts_reblogs(context['db'], res)
+        sql = "SELECT * FROM bridge_get_account_posts_by_comments( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
     elif sort == 'replies':
-        start = start if start_permlink else (account, None)
-        ids = await cursor.pids_by_replies(db, *start, limit)
-        return await load_posts(context['db'], ids)
+        account_posts = False
+        sql = "SELECT * FROM bridge_get_account_posts_by_replies( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, True )"
+    elif sort == 'payout':
+        sql = "SELECT * FROM bridge_get_account_posts_by_payout( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
 
-    if start_author and start_permlink:
-        sql = sql % """ AND hive_posts_cache.post_id < (SELECT post_id FROM hive_posts_cache WHERE author = :author AND permlink = :permlink) """
-    else:
-        sql = sql % """ """
-        
+    sql_result = await db.query_all(sql, account=account, author=start_author, permlink=start_permlink, limit=limit )
     posts = []
-    blacklists_for_user = None
-    if observer:
-        blacklists_for_user = await Mutes.get_blacklists_for_observer(observer, context)
-    sql_result = await db.query_all(sql, account=account, author=start_author, permlink=start_permlink, limit=limit)
+
     for row in sql_result:
-        post = _condenser_post_object(row)
-        post = await append_statistics_to_post(post, row, False, blacklists_for_user)
+        post = _bridge_post_object(row)
+        post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.BridgeApi)
+        if sort == 'blog':
+            if post['author'] != account:
+                post['reblogged_by'] = [account]
+        elif sort == 'feed':
+            reblogged_by = set(row['reblogged_by'])
+            reblogged_by.discard(row['author']) # Eliminate original author of reblogged post
+            if reblogged_by:
+                reblogged_by_list = list(reblogged_by)
+                reblogged_by_list.sort()
+                post['reblogged_by'] = reblogged_by_list
+
+        post = append_statistics_to_post(post, row, False if account_posts else row['is_pinned'])
         posts.append(post)
     return posts
 
+
 @return_error_info
 async def get_relationship_between_accounts(context, account1, account2, observer=None):
     valid_account(account1)
@@ -303,19 +318,15 @@ async def get_relationship_between_accounts(context, account1, account2, observe
 
     db = context['db']
 
-    sql = """
-        SELECT state, blacklisted, follow_blacklists FROM hive_follows WHERE
-        follower = (SELECT id FROM hive_accounts WHERE name = :account1) AND 
-        following = (SELECT id FROM hive_accounts WHERE name = :account2)
-    """
-
+    sql = "SELECT * FROM bridge_get_relationship_between_accounts( (:account1)::VARCHAR, (:account2)::VARCHAR )"
     sql_result = await db.query_all(sql, account1=account1, account2=account2)
 
     result = {
         'follows': False,
         'ignores': False,
-        'is_blacklisted': False,
-        'follows_blacklists': False
+        'blacklists': False,
+        'follows_blacklists': False,
+        'follows_muted': False
     }
 
     for row in sql_result:
@@ -324,11 +335,47 @@ async def get_relationship_between_accounts(context, account1, account2, observe
             result['follows'] = True
         elif state == 2:
             result['ignores'] = True
-        
+
         if row['blacklisted']:
-            result['is_blacklisted'] = True
+            result['blacklists'] = True
         if row['follow_blacklists']:
             result['follows_blacklists'] = True
+        if row['follow_muted']:
+            result['follows_muted'] = True
 
     return result
 
+@return_error_info
+async def does_user_follow_any_lists(context, observer):
+    """ Tells if given observer follows any blacklist or mute list """
+    blacklists_for_user = await Mutes.get_blacklists_for_observer(observer, context)
+
+    if len(blacklists_for_user) == 0:
+        return False
+    else:
+        return True
+
+@return_error_info
+async def get_follow_list(context, observer, follow_type='blacklisted'):
+    """ For given observer gives directly blacklisted/muted accounts or
+        list of blacklists/mute lists followed by observer
+    """
+    observer = valid_account(observer)
+    valid_types = dict(blacklisted=1, follow_blacklist=2, muted=4, follow_muted=8)
+    assert follow_type in valid_types, "Unsupported follow_type, valid values: {}".format(", ".join(valid_types.keys()))
+
+    results = []
+    if follow_type == 'follow_blacklist' or follow_type == 'follow_muted':
+        blacklists_for_user = await Mutes.get_blacklists_for_observer(observer, context, follow_type == 'follow_blacklist', follow_type == 'follow_muted')
+        for row in blacklists_for_user:
+            list_data = await get_profile(context, row['list'])
+            metadata = list_data["metadata"]["profile"]
+            blacklist_description = metadata["blacklist_description"] if "blacklist_description" in metadata else ''
+            muted_list_description = metadata["muted_list_description"] if "muted_list_description" in metadata else ''
+            results.append({'name': row['list'], 'blacklist_description': blacklist_description, 'muted_list_description': muted_list_description})
+    else: # blacklisted or muted
+        blacklisted_for_user = await Mutes.get_blacklisted_for_observer(observer, context, valid_types[follow_type])
+        for account in blacklisted_for_user.keys():
+            results.append({'name': account, 'blacklist_description': '', 'muted_list_description': ''})
+
+    return results
diff --git a/hive/server/bridge_api/objects.py b/hive/server/bridge_api/objects.py
index 9846e3f4f2308c54b4b5aa256f6701deb717ef64..a891f84747ca4ec1591a86579119023db3eaf755 100644
--- a/hive/server/bridge_api/objects.py
+++ b/hive/server/bridge_api/objects.py
@@ -2,154 +2,60 @@
 
 import logging
 import ujson as json
+
 from hive.server.common.mutes import Mutes
-from hive.server.common.helpers import json_date
+from hive.server.common.helpers import json_date, get_hive_accounts_info_view_query_string
+from hive.server.database_api.methods import find_votes_impl, VotesPresentation
+from hive.utils.normalize import sbd_amount, rep_log10
+from hive.utils.account import safe_db_profile_metadata
 
-from hive.utils.normalize import sbd_amount
+ROLES = {-2: 'muted', 0: 'guest', 2: 'member', 4: 'mod', 6: 'admin', 8: 'owner'}
 
 log = logging.getLogger(__name__)
 
 # pylint: disable=too-many-lines
 
-async def load_profiles(db, names):
-    """`get_accounts`-style lookup for `get_state` compat layer."""
-    sql = """SELECT id, name, display_name, about, reputation, vote_weight,
-                    created_at, post_count, profile_image, location, website,
-                    cover_image, rank, following, followers, active_at
-               FROM hive_accounts WHERE name IN :names"""
-    rows = await db.query_all(sql, names=tuple(names))
-    return [_condenser_profile_object(row) for row in rows]
-
-async def load_posts_reblogs(db, ids_with_reblogs, truncate_body=0):
-    """Given a list of (id, reblogged_by) tuples, return posts w/ reblog key."""
-    post_ids = [r[0] for r in ids_with_reblogs]
-    reblog_by = dict(ids_with_reblogs)
-    posts = await load_posts(db, post_ids, truncate_body=truncate_body)
-
-    # Merge reblogged_by data into result set
-    for post in posts:
-        rby = set(reblog_by[post['post_id']].split(','))
-        rby.discard(post['author'])
-        if rby:
-            post['reblogged_by'] = list(rby)
-
-    return posts
-
-ROLES = {-2: 'muted', 0: 'guest', 2: 'member', 4: 'mod', 6: 'admin', 8: 'owner'}
-
-async def load_posts_keyed(db, ids, truncate_body=0):
-    """Given an array of post ids, returns full posts objects keyed by id."""
-    # pylint: disable=too-many-locals
-    assert ids, 'no ids passed to load_posts_keyed'
-
-    # fetch posts and associated author reps
-    sql = """SELECT post_id, community_id, author, permlink, title, body, category, depth,
-                    promoted, payout, payout_at, is_paidout, children, votes,
-                    created_at, updated_at, rshares, raw_json, json,
-                    is_hidden, is_grayed, total_votes, flag_weight
-               FROM hive_posts_cache WHERE post_id IN :ids"""
-    result = await db.query_all(sql, ids=tuple(ids))
-    author_map = await _query_author_map(db, result)
-
-    # TODO: author affiliation?
-    ctx = {}
-    posts_by_id = {}
-    author_ids = {}
-    post_cids = {}
-    for row in result:
-        row = dict(row)
-        author = author_map[row['author']]
-        author_ids[author['id']] = author['name']
-
-        row['author_rep'] = author['reputation']
-        post = _condenser_post_object(row, truncate_body=truncate_body)
-
-        post['blacklists'] = Mutes.lists(post['author'], author['reputation'])
-
-        posts_by_id[row['post_id']] = post
-        post_cids[row['post_id']] = row['community_id']
-
-        cid = row['community_id']
-        if cid:
-            if cid not in ctx:
-                ctx[cid] = []
-            ctx[cid].append(author['id'])
-
-    # TODO: optimize
-    titles = {}
-    roles = {}
-    for cid, account_ids in ctx.items():
-        sql = "SELECT title FROM hive_communities WHERE id = :id"
-        titles[cid] = await db.query_one(sql, id=cid)
-        sql = """SELECT account_id, role_id, title
-                   FROM hive_roles
-                  WHERE community_id = :cid
-                    AND account_id IN :ids"""
-        roles[cid] = {}
-        ret = await db.query_all(sql, cid=cid, ids=tuple(account_ids))
-        for row in ret:
-            name = author_ids[row['account_id']]
-            roles[cid][name] = (row['role_id'], row['title'])
-
-    for pid, post in posts_by_id.items():
-        author = post['author']
-        cid = post_cids[pid]
-        if cid:
-            post['community'] = post['category'] # TODO: True?
-            post['community_title'] = titles[cid] or post['category']
-            role = roles[cid][author] if author in roles[cid] else (0, '')
-            post['author_role'] = ROLES[role[0]]
-            post['author_title'] = role[1]
+def append_statistics_to_post(post, row, is_pinned):
+    """ apply information such as blacklists and community names/roles to a given post """
+    
+    post['blacklists'] = []
+    if 'blacklists' in row and row['blacklists']:
+        split_lists = row['blacklists'].split(',')
+        for blacklist_source in split_lists:
+            post['blacklists'].append(blacklist_source)
+    reputation = post['author_reputation']
+    if reputation < 1:
+        post['blacklists'].append('reputation-0')
+    elif reputation  == 1:
+        post['blacklists'].append('reputation-1')
+
+    if 'community_title' in row and row['community_title']:
+        post['community'] = row['category']
+        post['community_title'] = row['community_title']
+        if row['role_id']:
+            post['author_role'] = ROLES[row['role_id']]
+            post['author_title'] = row['role_title']
         else:
-            post['stats']['gray'] = ('irredeemables' in post['blacklists']
-                                     or len(post['blacklists']) >= 2)
-        post['stats']['hide'] = 'irredeemables' in post['blacklists']
-
-
-    sql = """SELECT id FROM hive_posts
-              WHERE id IN :ids AND is_pinned = '1' AND is_deleted = '0'"""
-    for pid in await db.query_col(sql, ids=tuple(ids)):
-        if pid in posts_by_id:
-            posts_by_id[pid]['stats']['is_pinned'] = True
-
-    return posts_by_id
+            post['author_role'] = 'guest'
+            post['author_title'] = ''
 
-async def load_posts(db, ids, truncate_body=0):
-    """Given an array of post ids, returns full objects in the same order."""
-    if not ids:
-        return []
-
-    # posts are keyed by id so we can return output sorted by input order
-    posts_by_id = await load_posts_keyed(db, ids, truncate_body=truncate_body)
-
-    # in rare cases of cache inconsistency, recover and warn
-    missed = set(ids) - posts_by_id.keys()
-    if missed:
-        log.info("get_posts do not exist in cache: %s", repr(missed))
-        for _id in missed:
-            ids.remove(_id)
-            sql = ("SELECT id, author, permlink, depth, created_at, is_deleted "
-                   "FROM hive_posts WHERE id = :id")
-            post = await db.query_row(sql, id=_id)
-            if not post['is_deleted']:
-                # TODO: This should never happen. See #173 for analysis
-                log.error("missing post: %s", dict(post))
-            else:
-                log.info("requested deleted post: %s", dict(post))
-
-    return [posts_by_id[_id] for _id in ids]
+    post['stats']['gray'] = row['is_grayed'] or row['is_muted']
+    if is_pinned:
+        post['stats']['is_pinned'] = True
+    return post
 
-async def _query_author_map(db, posts):
-    """Given a list of posts, returns an author->reputation map."""
-    if not posts: return {}
-    names = tuple({post['author'] for post in posts})
-    sql = "SELECT id, name, reputation FROM hive_accounts WHERE name IN :names"
-    return {r['name']: r for r in await db.query_all(sql, names=names)}
+async def load_profiles(db, names):
+    """`get_accounts`-style lookup for `get_state` compat layer."""
+    sql = get_hive_accounts_info_view_query_string( names )
+    rows = await db.query_all(sql, names=tuple(names))
+    return [_bridge_profile_object(row) for row in rows]
 
-def _condenser_profile_object(row):
+def _bridge_profile_object(row):
     """Convert an internal account record into legacy-steemd style."""
 
-    blacklists = Mutes.lists(row['name'], row['reputation'])
+    #Important. The member `sp` in `stats` is removed, because currently the hivemind doesn't hold any balances.
+    # The member `vote_weight` from `hive_accounts` is removed as well.
+    profile = safe_db_profile_metadata(row['posting_json_metadata'], row['json_metadata'])
 
     return {
         'id': row['id'],
@@ -157,40 +63,40 @@ def _condenser_profile_object(row):
         'created': json_date(row['created_at']),
         'active': json_date(row['active_at']),
         'post_count': row['post_count'],
-        'reputation': row['reputation'],
-        'blacklists': blacklists,
+        'reputation': rep_log10(row['reputation']),
+        'blacklists': [],
         'stats': {
-            'sp': int(row['vote_weight'] * 0.0005037),
             'rank': row['rank'],
             'following': row['following'],
             'followers': row['followers'],
         },
         'metadata': {
-            'profile': {'name': row['display_name'],
-                        'about': row['about'],
-                        'website': row['website'],
-                        'location': row['location'],
-                        'cover_image': row['cover_image'],
-                        'profile_image': row['profile_image'],
+            'profile': {'name': profile['name'],
+                        'about': profile['about'],
+                        'website': profile['website'],
+                        'location': profile['location'],
+                        'cover_image': profile['cover_image'],
+                        'profile_image': profile['profile_image'],
+                        'blacklist_description': profile['blacklist_description'] if 'blacklist_description' in profile else '',
+                        'muted_list_description': profile['muted_list_description'] if 'muted_list_description' in profile else ''
                        }}}
 
-def _condenser_post_object(row, truncate_body=0):
-    """Given a hive_posts_cache row, create a legacy-style post object."""
+def _bridge_post_object(row, truncate_body=0):
+    """Given a hive_posts row, create a legacy-style post object."""
     paid = row['is_paidout']
 
-    # condenser#3424 mitigation
-    if not row['category']:
-        row['category'] = 'undefined'
-
     post = {}
-    post['post_id'] = row['post_id']
+    post['post_id'] = row['id']
     post['author'] = row['author']
     post['permlink'] = row['permlink']
-    post['category'] = row['category']
+    post['category'] = row.get('category', 'undefined')
 
     post['title'] = row['title']
     post['body'] = row['body'][0:truncate_body] if truncate_body else row['body']
-    post['json_metadata'] = json.loads(row['json'])
+    try:
+        post['json_metadata'] = json.loads(row['json'])
+    except Exception:
+        post['json_metadata'] = {}
 
     post['created'] = json_date(row['created_at'])
     post['updated'] = json_date(row['updated_at'])
@@ -200,45 +106,45 @@ def _condenser_post_object(row, truncate_body=0):
 
     post['is_paidout'] = row['is_paidout']
     post['payout_at'] = json_date(row['payout_at'])
-    post['payout'] = float(row['payout'])
-    post['pending_payout_value'] = _amount(0 if paid else row['payout'])
-    post['author_payout_value'] = _amount(row['payout'] if paid else 0)
-    post['curator_payout_value'] = _amount(0)
+    post['payout'] = float(row['payout'] + row['pending_payout'])
+    post['pending_payout_value'] = _amount(0 if paid else post['payout'])
+    post['author_payout_value'] = _amount(0) # supplemented below
+    post['curator_payout_value'] = _amount(0) # supplemented below
     post['promoted'] = _amount(row['promoted'])
 
     post['replies'] = []
-    post['active_votes'] = _hydrate_active_votes(row['votes'])
-    post['author_reputation'] = row['author_rep']
+    post['author_reputation'] = rep_log10(row['author_rep'])
+
+    neg_rshares = ( row['rshares'] - row['abs_rshares'] ) // 2 # effectively sum of all negative rshares
+    # take negative rshares, divide by 2, truncate 10 digits (plus neg sign),
+    #   and count digits. creates a cheap log10, stake-based flag weight.
+    #   result: 1 = approx $400 of downvoting stake; 2 = $4,000; etc
+    flag_weight = max((len(str(int(neg_rshares / 2))) - 11, 0))
 
     post['stats'] = {
         'hide': row['is_hidden'],
         'gray': row['is_grayed'],
         'total_votes': row['total_votes'],
-        'flag_weight': row['flag_weight']} # TODO: down_weight
+        'flag_weight': float(flag_weight)} # TODO: down_weight
+
 
-    # import fields from legacy object
-    assert row['raw_json']
-    assert len(row['raw_json']) > 32
-    raw_json = json.loads(row['raw_json'])
+    #post['author_reputation'] = rep_to_raw(row['author_rep'])
+
+    post['url'] = row['url']
+    post['beneficiaries'] = row['beneficiaries']
+    post['max_accepted_payout'] = row['max_accepted_payout']
+    post['percent_hbd'] = row['percent_hbd']
 
-    # TODO: move to core, or payout_details
-    post['beneficiaries'] = raw_json['beneficiaries']
-    post['max_accepted_payout'] = raw_json['max_accepted_payout']
-    post['percent_steem_dollars'] = raw_json['percent_steem_dollars'] # TODO: systag?
     if paid:
-        curator_payout = sbd_amount(raw_json['curator_payout_value'])
+        curator_payout = sbd_amount(row['curator_payout_value'])
         post['author_payout_value'] = _amount(row['payout'] - curator_payout)
         post['curator_payout_value'] = _amount(curator_payout)
 
     # TODO: re-evaluate
     if row['depth'] > 0:
-        post['parent_author'] = raw_json['parent_author']
-        post['parent_permlink'] = raw_json['parent_permlink']
-        post['title'] = 'RE: ' + raw_json['root_title'] # PostSummary & comment context
-    #else:
-    #    post['parent_author'] = ''
-    #    post['parent_permlink'] = ''
-    post['url'] = raw_json['url']
+        post['parent_author'] = row['parent_author']
+        post['parent_permlink'] = row['parent_permlink_or_category']
+        post['title'] = 'RE: ' + row['root_title'] # PostSummary & comment context
 
     return post
 
@@ -246,13 +152,3 @@ def _amount(amount, asset='HBD'):
     """Return a steem-style amount string given a (numeric, asset-str)."""
     assert asset == 'HBD', 'unhandled asset %s' % asset
     return "%.3f HBD" % amount
-
-def _hydrate_active_votes(vote_csv):
-    """Convert minimal CSV representation into steemd-style object."""
-    if not vote_csv: return []
-    #return [line.split(',')[:2] for line in vote_csv.split("\n")]
-    votes = []
-    for line in vote_csv.split("\n"):
-        voter, rshares, _, _ = line.split(',')
-        votes.append(dict(voter=voter, rshares=rshares))
-    return votes
diff --git a/hive/server/bridge_api/support.py b/hive/server/bridge_api/support.py
index b71b4017992a755b144ae19546ccf3ef16a95e18..8d27541a6962fdaf6e0026cab5e735233169794f 100644
--- a/hive/server/bridge_api/support.py
+++ b/hive/server/bridge_api/support.py
@@ -1,34 +1,40 @@
 """Handles building condenser-compatible response objects."""
 
 import logging
+from hive.server.common.helpers import (
+    valid_account,
+    valid_permlink)
 #import ujson as json
-import traceback
 
-from hive.server.bridge_api.objects import _condenser_post_object
-from hive.utils.post import post_to_internal
-from hive.utils.normalize import sbd_amount
+from hive.server.bridge_api.methods import get_post
 from hive.server.common.helpers import (
     #ApiError,
     return_error_info)
 
 log = logging.getLogger(__name__)
 
-ROLES = {-2: 'muted', 0: 'guest', 2: 'member', 4: 'admin', 6: 'mod', 8: 'admin'}
-
 @return_error_info
 async def get_post_header(context, author, permlink):
     """Fetch basic post data"""
     db = context['db']
+    valid_account(author)
+    valid_permlink(permlink)
+    sql = """
+        SELECT 
+            hp.id, ha_a.name as author, hpd_p.permlink as permlink, hcd.category as category, depth
+        FROM 
+            hive_posts hp
+        INNER JOIN hive_accounts ha_a ON ha_a.id = hp.author_id
+        INNER JOIN hive_permlink_data hpd_p ON hpd_p.id = hp.permlink_id
+        LEFT JOIN hive_category_data hcd ON hcd.id = hp.category_id
+        WHERE ha_a.name = :author
+            AND hpd_p.permlink = :permlink
+            AND counter_deleted = 0
+    """
 
-    sql = """SELECT id, parent_id, author, permlink, category, depth
-               FROM hive_posts
-              WHERE author = :author
-                AND permlink = :permlink
-                AND is_deleted = '0'"""
     row = await db.query_row(sql, author=author, permlink=permlink)
 
-    if not row:
-        return None
+    assert row, 'Post {}/{} does not exist'.format(author,permlink)
 
     return dict(
         author=row['author'],
@@ -40,67 +46,23 @@ async def get_post_header(context, author, permlink):
 @return_error_info
 async def normalize_post(context, post):
     """Takes a steemd post object and outputs bridge-api normalized version."""
-    db = context['db']
-
-    # load core md
-    sql = """SELECT id, category, community_id, is_muted, is_valid
-               FROM hive_posts
-              WHERE author = :author AND permlink = :permlink"""
-    core = await db.query_row(sql, author=post['author'], permlink=post['permlink'])
-    if not core:
-        core = dict(id=None,
-                    category=post['category'],
-                    community_id=None,
-                    is_muted=False,
-                    is_valid=True)
-
-    # load author
-    sql = """SELECT id, reputation FROM hive_accounts WHERE name = :name"""
-    author = await db.query_row(sql, name=post['author'])
-
-    # append core md
-    post['category'] = core['category']
-    post['community_id'] = core['community_id']
-    post['gray'] = core['is_muted']
-    post['hide'] = not core['is_valid']
-
-    # TODO: there is a bug in steemd.. promoted value returned as 0.000 LIQUID
-    #       until it's promoted.. then returned as X.XXX STABLE. So here we
-    #       ignore the non-promoted case because sbd_amount  asserts proper input units.
-    promoted = sbd_amount(post['promoted']) if post['promoted'] != '0.000 HIVE' else None
-
-    # convert to internal object
-    row = None
-    try:
-        row = post_to_internal(post, core['id'], 'insert', promoted=promoted)
-        row = dict(row)
-    except Exception as e:
-        log.error("post_to_internal: %s %s", repr(e), traceback.format_exc())
-        raise e
-
-    # normalized response
-    ret = None
-    try:
-        if 'promoted' not in row: row['promoted'] = 0
-        row['author_rep'] = author['reputation']
-        ret = _condenser_post_object(row)
-    except Exception as e:
-        log.error("post_to_internal: %s %s", repr(e), traceback.format_exc())
-        raise e
+    # ABW: at the moment it makes zero sense to have that API method since there is
+    # no fat node that would be source of unnormalized posts
+    return await get_post(context, post['author'], post['permlink'])
 
     # decorate
-    if core['community_id']:
-        sql = """SELECT title FROM hive_communities WHERE id = :id"""
-        title = await db.query_one(sql, id=core['community_id'])
+    #if core['community_id']:
+    #    sql = """SELECT title FROM hive_communities WHERE id = :id"""
+    #    title = await db.query_one(sql, id=core['community_id'])
 
-        sql = """SELECT role_id, title
-                   FROM hive_roles
-                  WHERE community_id = :cid
-                    AND account_id = :aid"""
-        role = await db.query_row(sql, cid=core['community_id'], aid=author['id'])
+    #    sql = """SELECT role_id, title
+    #               FROM hive_roles
+    #              WHERE community_id = :cid
+    #                AND account_id = :aid"""
+    #    role = await db.query_row(sql, cid=core['community_id'], aid=author['id'])
 
-        ret['community_title'] = title
-        ret['author_role'] = ROLES[role[0] if role else 0]
-        ret['author_title'] = role[1] if role else ''
+    #    ret['community_title'] = title
+    #    ret['author_role'] = ROLES[role[0] if role else 0]
+    #    ret['author_title'] = role[1] if role else ''
 
-    return ret
+    #return ret
diff --git a/hive/server/bridge_api/thread.py b/hive/server/bridge_api/thread.py
index 570d846137d7ed9c21180424c7fb058b2cb6ab6d..e184dc43c61146c60d5c5012031362faf2d12e85 100644
--- a/hive/server/bridge_api/thread.py
+++ b/hive/server/bridge_api/thread.py
@@ -2,8 +2,8 @@
 
 import logging
 
-from hive.server.bridge_api.objects import load_posts_keyed, _condenser_post_object
-from hive.server.bridge_api.methods import append_statistics_to_post
+from hive.server.bridge_api.objects import _bridge_post_object, append_statistics_to_post
+from hive.server.database_api.methods import find_votes_impl, VotesPresentation
 from hive.server.common.helpers import (
     return_error_info,
     valid_account,
@@ -13,69 +13,44 @@ from hive.server.common.mutes import Mutes
 log = logging.getLogger(__name__)
 
 @return_error_info
-async def get_discussion(context, author, permlink, observer=None):
+async def get_discussion(context, author:str, permlink:str, observer:str=''):
     """Modified `get_state` thread implementation."""
-    # New index was created: hive_posts_parent_id_btree (CREATE INDEX "hive_posts_parent_id_btree" ON hive_posts btree(parent_id)
-    # We thougth this would be covered by "hive_posts_ix4" btree (parent_id, id) WHERE is_deleted = false but it was not
     db = context['db']
 
     author = valid_account(author)
     permlink = valid_permlink(permlink)
+    observer = valid_account(observer, allow_empty=True)
 
-    sql = """
-        ---get_discussion
-        WITH RECURSIVE child_posts AS (
-            SELECT id, parent_id FROM hive_posts WHERE author = :author AND permlink = :permlink AND NOT is_deleted AND NOT is_muted
-            UNION ALL
-            SELECT children.id, children.parent_id FROM hive_posts children JOIN child_posts ON (children.parent_id = child_posts.id)
-            WHERE NOT children.is_deleted AND NOT children.is_muted
-        )
-        SELECT child_posts.id, child_posts.parent_id, hive_posts_cache.post_id, hive_posts_cache.author, hive_posts_cache.permlink,
-           hive_posts_cache.title, hive_posts_cache.body, hive_posts_cache.category, hive_posts_cache.depth,
-           hive_posts_cache.promoted, hive_posts_cache.payout, hive_posts_cache.payout_at,
-           hive_posts_cache.is_paidout, hive_posts_cache.children, hive_posts_cache.votes,
-           hive_posts_cache.created_at, hive_posts_cache.updated_at, hive_posts_cache.rshares,
-           hive_posts_cache.raw_json, hive_posts_cache.json, hive_accounts.reputation AS author_rep,
-           hive_posts_cache.is_hidden AS is_hidden, hive_posts_cache.is_grayed AS is_grayed,
-           hive_posts_cache.total_votes AS total_votes, hive_posts_cache.flag_weight AS flag_weight,
-           hive_posts_cache.sc_trend AS sc_trend, hive_accounts.id AS acct_author_id
-           FROM child_posts JOIN hive_posts_cache ON (child_posts.id = hive_posts_cache.post_id)
-                            JOIN hive_accounts ON (hive_posts_cache.author = hive_accounts.name)
-    """
-
-    blacklists_for_user = None
-    if observer:
-        blacklists_for_user = await Mutes.get_blacklists_for_observer(observer, context)
-
-    rows = await db.query_all(sql, author=author, permlink=permlink)
+    sql = "SELECT * FROM bridge_get_discussion(:author,:permlink,:observer)"
+    rows = await db.query_all(sql, author=author, permlink=permlink, observer=observer)
     if not rows or len(rows) == 0:
         return {}
     root_id = rows[0]['id']
     all_posts = {}
-    root_post = _condenser_post_object(rows[0])
-    root_post = await append_statistics_to_post(root_post, rows[0], False, blacklists_for_user)
+    root_post = _bridge_post_object(rows[0])
+    root_post['active_votes'] = await find_votes_impl(db, rows[0]['author'], rows[0]['permlink'], VotesPresentation.BridgeApi)
+    root_post = append_statistics_to_post(root_post, rows[0], False)
     root_post['replies'] = []
     all_posts[root_id] = root_post
 
-    id_to_parent_id_map = {}
-    id_to_parent_id_map[root_id] = None
+    parent_to_children_id_map = {}
 
     for index in range(1, len(rows)):
-        id_to_parent_id_map[rows[index]['id']] = rows[index]['parent_id']
-        post = _condenser_post_object(rows[index])
-        post = await append_statistics_to_post(post, rows[index], False, blacklists_for_user)
+        parent_id = rows[index]['parent_id']
+        if parent_id not in parent_to_children_id_map:
+            parent_to_children_id_map[parent_id] = []
+        parent_to_children_id_map[parent_id].append(rows[index]['id'])
+        post = _bridge_post_object(rows[index])
+        post['active_votes'] = await find_votes_impl(db, rows[index]['author'], rows[index]['permlink'], VotesPresentation.BridgeApi)
+        post = append_statistics_to_post(post, rows[index], False)
         post['replies'] = []
         all_posts[post['post_id']] = post
 
-    discussion_map = {}
-    build_discussion_map(root_id, id_to_parent_id_map, discussion_map)
-
-    for key in discussion_map:
-        children = discussion_map[key]
-        if children and len(children) > 0:
-            post = all_posts[key]
-            for child_id in children:
-                post['replies'].append(_ref(all_posts[child_id]))
+    for key in parent_to_children_id_map:
+        children = parent_to_children_id_map[key]
+        post = all_posts[key]
+        for child_id in children:
+            post['replies'].append(_ref(all_posts[child_id]))
 
     #result has to be in form of dictionary of dictionaries {post_ref: post}
     results = {}
@@ -84,77 +59,5 @@ async def get_discussion(context, author, permlink, observer=None):
         results[post_ref] = all_posts[key]
     return results
 
-def build_discussion_map(parent_id, posts, results):
-    results[parent_id] = get_children(parent_id, posts)
-    if (results[parent_id] == []):
-        return
-    else:
-        for post_id in results[parent_id]:
-            build_discussion_map(post_id, posts, results)
-
-def get_children(parent_id, posts):
-    results = []
-    for key in posts:
-        if posts[key] == parent_id:
-            results.append(key)
-    return results;
-
-async def _get_post_id(db, author, permlink):
-    """Given an author/permlink, retrieve the id from db."""
-    sql = ("SELECT id FROM hive_posts WHERE author = :a "
-           "AND permlink = :p AND is_deleted = '0' LIMIT 1")
-    return await db.query_one(sql, a=author, p=permlink)
-
 def _ref(post):
     return post['author'] + '/' + post['permlink']
-
-async def _child_ids(db, parent_ids):
-    """Load child ids for multuple parent ids."""
-    sql = """
-             SELECT parent_id, array_agg(id)
-               FROM hive_posts
-              WHERE parent_id IN :ids
-                AND is_deleted = '0'
-           GROUP BY parent_id
-    """
-    rows = await db.query_all(sql, ids=tuple(parent_ids))
-    return [[row[0], row[1]] for row in rows]
-
-async def _load_discussion(db, root_id):
-    """Load a full discussion thread."""
-    # build `ids` list and `tree` map
-    ids = []
-    tree = {}
-    todo = [root_id]
-    while todo:
-        ids.extend(todo)
-        rows = await _child_ids(db, todo)
-        todo = []
-        for pid, cids in rows:
-            tree[pid] = cids
-            todo.extend(cids)
-
-    # load all post objects, build ref-map
-    posts = await load_posts_keyed(db, ids)
-
-    # remove posts/comments from muted accounts
-    rem_pids = []
-    for pid, post in posts.items():
-        if post['stats']['hide']:
-            rem_pids.append(pid)
-    for pid in rem_pids:
-        if pid in posts:
-            del posts[pid]
-        if pid in tree:
-            rem_pids.extend(tree[pid])
-
-    refs = {pid: _ref(post) for pid, post in posts.items()}
-
-    # add child refs to parent posts
-    for pid, post in posts.items():
-        if pid in tree:
-            post['replies'] = [refs[cid] for cid in tree[pid]
-                               if cid in refs]
-
-    # return all nodes keyed by ref
-    return {refs[pid]: post for pid, post in posts.items()}
diff --git a/hive/server/common/helpers.py b/hive/server/common/helpers.py
index 6edbb81f6763f36b3ec809cfa7abb2f4f2cd10ea..966f104969f8a0c6bfd7d90da59348d07aa4fc08 100644
--- a/hive/server/common/helpers.py
+++ b/hive/server/common/helpers.py
@@ -4,6 +4,10 @@ import re
 from functools import wraps
 import traceback
 import logging
+import datetime
+from dateutil.relativedelta import relativedelta
+from psycopg2.errors import RaiseException
+from jsonrpcserver.exceptions import ApiError as RPCApiError
 
 log = logging.getLogger(__name__)
 
@@ -12,13 +16,22 @@ class ApiError(Exception):
     # pylint: disable=unnecessary-pass
     pass
 
+# values -32768..-32000 are reserved
+ACCESS_TO_DELETED_POST_ERROR_CODE = -31999
+
 def return_error_info(function):
     """Async API method decorator which catches and formats exceptions."""
     @wraps(function)
     async def wrapper(*args, **kwargs):
-        """Catch ApiError and AssersionError (always due to user error)."""
+        """Catch ApiError and AssertionError (always due to user error)."""
         try:
             return await function(*args, **kwargs)
+        except (RaiseException) as e:
+            msg = e.diag.message_primary
+            if 'was deleted' in msg:
+                raise RPCApiError('Invalid parameters', ACCESS_TO_DELETED_POST_ERROR_CODE, msg)
+            else:
+                raise AssertionError(msg)
         except (ApiError, AssertionError, TypeError, Exception) as e:
             if isinstance(e, KeyError):
                 #TODO: KeyError overloaded for method not found. Any KeyErrors
@@ -50,9 +63,28 @@ def return_error_info(function):
 
 def json_date(date=None):
     """Given a db datetime, return a steemd/json-friendly version."""
-    if not date: return '1969-12-31T23:59:59'
+    if not date or date == datetime.datetime.max: return '1969-12-31T23:59:59'
     return 'T'.join(str(date).split(' '))
 
+def get_hive_accounts_info_view_query_string(names, lite = False):
+    values = []
+    for name in names:
+      values.append("('{}')".format( name ))
+    values_str = ','.join(values)
+    sql = """
+              SELECT *
+              FROM {} v
+              JOIN
+                (
+                  VALUES {}
+                )T( _name ) ON v.name = T._name
+          """.format( ( 'hive_accounts_info_view_lite' if lite else 'hive_accounts_info_view' ), values_str )
+    return sql
+   
+def last_month():
+    """Get the date 1 month ago."""
+    return datetime.datetime.now() + relativedelta(months=-1)
+
 def valid_account(name, allow_empty=False):
     """Returns validated account name or throws Assert."""
     if not name:
@@ -94,13 +126,27 @@ def valid_tag(tag, allow_empty=False):
     assert re.match('^[a-z0-9-_]+$', tag), 'invalid tag `%s`' % tag
     return tag
 
-def valid_limit(limit, ubound=100):
-    """Given a user-provided limit, return a valid int, or raise."""
-    assert limit is not None, 'limit must be provided'
-    limit = int(limit)
-    assert limit > 0, "limit must be positive"
-    assert limit <= ubound, "limit exceeds max (%d > %d)" % (limit, ubound)
-    return limit
+def valid_number(num, default=None, name='integer value', lbound=None, ubound=None):
+    """Given a user-provided number, return a valid int, or raise."""
+    if not num and num != 0:
+      assert default is not None, "%s must be provided" % name
+      num = default
+    try:
+      num = int(num)
+    except (TypeError, ValueError) as e:
+      raise AssertionError(str(e))
+    if lbound is not None and ubound is not None:
+      assert lbound <= num and num <= ubound, "%s = %d outside valid range [%d:%d]" % (name, num, lbound, ubound)
+    return num
+
+def valid_limit(limit, ubound, default):
+    return valid_number(limit, default, "limit", 1, ubound)
+
+def valid_score(score, ubound, default):
+    return valid_number(score, default, "score", 0, ubound)
+
+def valid_truncate(truncate_body):
+    return valid_number(truncate_body, 0, "truncate_body")
 
 def valid_offset(offset, ubound=None):
     """Given a user-provided offset, return a valid int, or raise."""
@@ -112,5 +158,27 @@ def valid_offset(offset, ubound=None):
 
 def valid_follow_type(follow_type: str):
     """Ensure follow type is valid steemd type."""
-    assert follow_type in ['blog', 'ignore'], 'invalid follow_type `%s`' % follow_type
-    return follow_type
+    # ABW: should be extended with blacklists etc. (and those should be implemented as next 'state' values)
+    supported_follow_types = dict(blog=1, ignore=2)
+    assert follow_type in supported_follow_types, "Unsupported follow type, valid types: {}".format(", ".join(supported_follow_types.keys()))
+    return supported_follow_types[follow_type]
+
+def valid_date(date, allow_empty=False):
+    """ Ensure that date is in correct format """
+    if not date:
+        assert allow_empty, 'Date is blank'
+    check_date = False
+    # check format "%Y-%m-%d %H:%M:%S"
+    try:
+        check_date = (date == datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S").strftime('%Y-%m-%d %H:%M:%S'))
+    except ValueError:
+        check_date = False
+    # if check failed for format above try another format
+    # check format "%Y-%m-%dT%H:%M:%S"
+    if not check_date:
+        try:
+            check_date = (date == datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S").strftime('%Y-%m-%dT%H:%M:%S'))
+        except ValueError:
+            pass
+
+    assert check_date, "Date should be in format Y-m-d H:M:S or Y-m-dTH:M:S"
diff --git a/hive/server/common/mentions.py b/hive/server/common/mentions.py
new file mode 100644
index 0000000000000000000000000000000000000000..2caa30df8affae6722bc8fe3615371ed239cd50a
--- /dev/null
+++ b/hive/server/common/mentions.py
@@ -0,0 +1,17 @@
+"""Utility stats functions."""
+
+import logging
+
+from hive.indexer.db_adapter_holder import DbAdapterHolder
+
+log = logging.getLogger(__name__)
+
+class Mentions(DbAdapterHolder):
+
+    @classmethod
+    def refresh(cls):
+        """Deleting too old mentions"""
+
+        log.warning("Deleting too old mentions")
+
+        cls.db.query_no_return("SELECT delete_hive_posts_mentions();" )
diff --git a/hive/server/common/mutes.py b/hive/server/common/mutes.py
index d663b5f30ecd8bd434a8adfe80db3a666b819690..9c3f1c303d8a49dffa059e10522820a05009eeda 100644
--- a/hive/server/common/mutes.py
+++ b/hive/server/common/mutes.py
@@ -1,132 +1,42 @@
 """List of muted accounts for server process."""
 
-import logging
-from time import perf_counter as perf
-from urllib.request import urlopen, Request
-import ujson as json
-from hive.server.common.helpers import valid_account
-from hive.db.adapter import Db
-
-log = logging.getLogger(__name__)
-
-GET_BLACKLISTED_ACCOUNTS_SQL = """
-WITH blacklisted_users AS (
-    SELECT following, 'my_blacklist' AS source FROM hive_follows WHERE follower =
-        (SELECT id FROM hive_accounts WHERE name = :observer )
-    AND blacklisted
-    UNION ALL
-    SELECT following, 'my_followed_blacklists' AS source FROM hive_follows WHERE follower IN
-    (SELECT following FROM hive_follows WHERE follower =
-        (SELECT id FROM hive_accounts WHERE name = :observer )
-    AND follow_blacklists) AND blacklisted
-)
-SELECT following, source FROM blacklisted_users
-"""
-
-def _read_url(url):
-    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
-    return urlopen(req).read()
-
 class Mutes:
     """Singleton tracking muted accounts."""
 
-    _instance = None
-    url = None
-    accounts = set() # list/irredeemables
-    blist = set() # list/any-blacklist
-    blist_map = dict() # cached account-list map
-    fetched = None
-    all_accounts = dict()
-
-    @classmethod
-    def instance(cls):
-        """Get the shared instance."""
-        assert cls._instance, 'set_shared_instance was never called'
-        return cls._instance
-
     @classmethod
-    def set_shared_instance(cls, instance):
-        """Set the global/shared instance."""
-        cls._instance = instance
-
-    def __init__(self, url, blacklist_api_url):
-        """Initialize a muted account list by loading from URL"""
-        self.url = url
-        self.blacklist_api_url = blacklist_api_url
-        if url:
-            self.load()
-
-    def load(self):
-        """Reload all accounts from irredeemables endpoint and global lists."""
-        self.accounts = set(_read_url(self.url).decode('utf8').split())
-        jsn = _read_url(self.blacklist_api_url + "/blacklists")
-        self.blist = set(json.loads(jsn))
-        log.warning("%d muted, %d blacklisted", len(self.accounts), len(self.blist))
-
-        self.all_accounts.clear()
-        sql = "select id, name from hive_accounts"
-        db = Db.instance()
-        sql_result = db.query_all(sql)
-        for row in sql_result:
-            self.all_accounts[row['id']] = row['name']
-        self.fetched = perf()
-
-    @classmethod
-    def all(cls):
-        """Return the set of all muted accounts from singleton instance."""
-        return cls.instance().accounts
-
-    @classmethod
-    async def get_blacklists_for_observer(cls, observer=None, context=None):
-        """ fetch the list of users that the observer has blacklisted """
+    async def get_blacklisted_for_observer(cls, observer, context, flags=1+2+4+8):
+        """ fetch the list of users that the observer has blacklisted
+            flags allow filtering the query:
+            1 - accounts blacklisted by observer
+            2 - accounts blacklisted by observer's follow_blacklist lists
+            4 - accounts muted by observer
+            8 - accounts muted by observer's follow_mutes lists
+            by default all flags are set
+        """
         if not observer or not context:
             return {}
 
-        if int(perf() - cls.instance().fetched) > 3600:
-            cls.instance().load()
-
         blacklisted_users = {}
 
         db = context['db']
-        sql = GET_BLACKLISTED_ACCOUNTS_SQL
-        sql_result = await db.query_all(sql, observer=observer)
+        sql = "SELECT * FROM mutes_get_blacklisted_for_observer( (:observer)::VARCHAR, (:flags)::INTEGER )"
+        sql_result = await db.query_all(sql, observer=observer, flags=flags)
         for row in sql_result:
-            account_name = cls.all_accounts[row['following']]
+            account_name = row['account']
             if account_name not in blacklisted_users:
-                blacklisted_users[account_name] = []
-            blacklisted_users[account_name].append(row['source'])
+                blacklisted_users[account_name] = ([], [])
+            if row['is_blacklisted']:
+                blacklisted_users[account_name][0].append(row['source'])
+            else: # muted
+                blacklisted_users[account_name][1].append(row['source'])
         return blacklisted_users
 
     @classmethod
-    def lists(cls, name, rep):
-        """Return blacklists the account belongs to."""
-        assert name
-        inst = cls.instance()
-
-        # update hourly
-        if perf() - inst.fetched > 3600:
-            inst.load()
-
-        if name not in inst.blist and name not in inst.accounts:
-            if name in inst.blist_map: #this user was blacklisted, but has been removed from the blacklists since the last check
-                inst.blist_map.pop(name)    #so just pop them from the cache
-            return []
-        else:   # user is on at least 1 list
-            blacklists_for_user = []
-            if name not in inst.blist_map:  #user has been added to a blacklist since the last check so figure out what lists they belong to
-                if name in inst.blist: #blacklisted accounts
-                    url = "%s/user/%s" % (inst.blacklist_api_url, name)
-                    lists = json.loads(_read_url(url))
-                    blacklists_for_user.extend(lists['blacklisted'])
-
-                if name in inst.accounts:   #muted accounts
-                    if 'irredeemables' not in blacklists_for_user:
-                        blacklists_for_user.append('irredeemables')
-
-            if int(rep) < 1:
-                blacklists_for_user.append('reputation-0')  #bad reputation
-            if int(rep) == 1:
-                blacklists_for_user.append('reputation-1') #bad reputation
+    async def get_blacklists_for_observer(cls, observer, context, follow_blacklist = True, follow_muted = True):
+        """ fetch the list of accounts that are followed by observer through follow_blacklist/follow_muted """
+        if not observer or not context:
+            return {}
 
-            inst.blist_map[name] = blacklists_for_user
-            return inst.blist_map[name]
+        db = context['db']
+        sql = "SELECT * FROM mutes_get_blacklists_for_observer( (:observer)::VARCHAR, (:fb)::BOOLEAN, (:fm)::BOOLEAN )"
+        return await db.query_all(sql, observer=observer, fb=follow_blacklist, fm=follow_muted)
diff --git a/hive/server/common/payout_stats.py b/hive/server/common/payout_stats.py
index 7a3e908738ecdee6d98c3384ca6a811b096aab1c..59365ee842590649e9956196ce00274aac3db9f5 100644
--- a/hive/server/common/payout_stats.py
+++ b/hive/server/common/payout_stats.py
@@ -1,72 +1,17 @@
 """Utility stats functions."""
 
 import logging
-from time import perf_counter as perf
 
-log = logging.getLogger(__name__)
-
-class PayoutStats:
-    """Singleton responsible for maintaining payout_stats temp table."""
-
-    _instance = None
-    _updated = None
-    _db = None
+from hive.indexer.db_adapter_holder import DbAdapterHolder
 
-    @classmethod
-    def instance(cls):
-        """Get the shared instance."""
-        assert cls._instance, 'set_shared_instance was never called'
-        return cls._instance
-
-    @classmethod
-    def set_shared_instance(cls, instance):
-        """Set the global/shared instance."""
-        cls._instance = instance
+log = logging.getLogger(__name__)
 
-    def __init__(self, db):
-        self._db = db
+class PayoutStats(DbAdapterHolder):
 
     @classmethod
-    def all(cls):
-        """Return the set of all muted accounts from singleton instance."""
-        return cls.instance().accounts
-
-    async def generate(self):
-        """Re-generate payout stats temp table."""
-        if self._updated and perf() - self._updated < 60 * 60:
-            return # only update if age > 1hr
-
-        sql = """
-            SELECT community_id,
-                   author,
-                   SUM(payout) payout,
-                   COUNT(*) posts,
-                   NULL authors
-              FROM hive_posts_cache
-             WHERE is_paidout = '0'
-          GROUP BY community_id, author
-
-             UNION ALL
-
-            SELECT community_id,
-                   NULL author,
-                   SUM(payout) payout,
-                   COUNT(*) posts,
-                   COUNT(DISTINCT(author)) authors
-              FROM hive_posts_cache
-             WHERE is_paidout = '0'
-          GROUP BY community_id
-        """
-
-        log.warning("Rebuilding payout_stats")
+    def generate(cls):
+        """Re-generate payout_stats_view."""
 
-        await self._db.query("""
-            BEGIN;
-              DROP TABLE IF EXISTS payout_stats;
-            CREATE TABLE payout_stats AS %s;
-            CREATE INDEX payout_stats_ix1
-                ON payout_stats (community_id, author, payout);
-            COMMIT;
-        """ % sql)
+        log.warning("Rebuilding payout_stats_view")
 
-        self._updated = perf()
+        cls.db.query_no_return("REFRESH MATERIALIZED VIEW CONCURRENTLY payout_stats_view;" )
diff --git a/hive/server/condenser_api/call.py b/hive/server/condenser_api/call.py
index 71f27d59fee0bcde30d7a1b54adeae21fe3f89f3..a41ab23425160d2e0a94cdc624106dbb18f90b61 100644
--- a/hive/server/condenser_api/call.py
+++ b/hive/server/condenser_api/call.py
@@ -10,24 +10,31 @@ from hive.server.condenser_api.methods import (
     get_followers,
     get_following,
     get_follow_count,
+
+    get_reblogged_by,
+    get_account_reputations,
+
     get_content,
     get_content_replies,
+    
     get_discussions_by_trending,
     get_discussions_by_hot,
     get_discussions_by_promoted,
     get_discussions_by_created,
+    get_post_discussions_by_payout,
+    get_comment_discussions_by_payout,
+
     get_discussions_by_blog,
     get_discussions_by_feed,
     get_discussions_by_comments,
     get_replies_by_last_update,
 
-    get_post_discussions_by_payout,
-    get_comment_discussions_by_payout,
-
     get_discussions_by_author_before_date,
     get_blog,
     get_blog_entries,
+
     get_account_votes,
+    get_active_votes
 )
 
 def _strict_list(params, expected_len, min_len=None):
@@ -92,6 +99,11 @@ async def call(context, api, method, params):
     elif method == 'get_follow_count':
         return await get_follow_count(context, *_strict_list(params, 1))
 
+    elif method == 'get_reblogged_by':
+        return await get_reblogged_by(context, *_strict_list(params, 2))
+    elif method == 'get_account_reputations':
+        return await get_account_reputations(context, *_strict_list(params, 2))
+
     # Content primitives
     elif method == 'get_content':
         return await get_content(context, *_strict_list(params, 3, 2))
@@ -141,5 +153,7 @@ async def call(context, api, method, params):
     # Misc/dummy
     elif method == 'get_account_votes':
         return await get_account_votes(context, *_strict_list(params, 1))
+    elif method == 'get_active_votes':
+        return await get_active_votes(context, *_strict_list(params, 2))
 
-    raise ApiError("unknown method: %s.%s" % (api, method))
+    assert False, "unknown method: %s.%s" % (api, method)
diff --git a/hive/server/condenser_api/cursor.py b/hive/server/condenser_api/cursor.py
index 2e3b42ba437f74b92b74d46987daad292bb51928..70ed8c626ef9a1fe268cb63ac2e36ab166fd5727 100644
--- a/hive/server/condenser_api/cursor.py
+++ b/hive/server/condenser_api/cursor.py
@@ -1,376 +1,64 @@
 """Cursor-based pagination queries, mostly supporting condenser_api."""
 
-from datetime import datetime
-from dateutil.relativedelta import relativedelta
+from hive.server.common.helpers import last_month
 
-from hive.utils.normalize import rep_to_raw
+from hive.server.condenser_api.objects import _condenser_post_object
+from hive.server.database_api.methods import find_votes_impl, VotesPresentation
 
 # pylint: disable=too-many-lines
 
-def last_month():
-    """Get the date 1 month ago."""
-    return datetime.now() + relativedelta(months=-1)
+async def get_followers(db, account: str, start: str, state: int, limit: int):
+    """Get a list of accounts following given account."""
+    sql = "SELECT * FROM condenser_get_followers( (:account)::VARCHAR, (:start)::VARCHAR, :type, :limit )"
+    return await db.query_col(sql, account=account, start=start, type=state, limit=limit)
 
-async def get_post_id(db, author, permlink):
-    """Given an author/permlink, retrieve the id from db."""
-    sql = ("SELECT id FROM hive_posts WHERE author = :a "
-           "AND permlink = :p AND is_deleted = '0' LIMIT 1")
-    return await db.query_one(sql, a=author, p=permlink)
-
-async def get_child_ids(db, post_id):
-    """Given a parent post id, retrieve all child ids."""
-    sql = "SELECT id FROM hive_posts WHERE parent_id = :id AND is_deleted = '0'"
-    return await db.query_col(sql, id=post_id)
-
-async def _get_post_id(db, author, permlink):
-    """Get post_id from hive db."""
-    sql = "SELECT id FROM hive_posts WHERE author = :a AND permlink = :p"
-    return await db.query_one(sql, a=author, p=permlink)
-
-async def _get_account_id(db, name):
-    """Get account id from hive db."""
-    assert name, 'no account name specified'
-    _id = await db.query_one("SELECT id FROM hive_accounts WHERE name = :n", n=name)
-    assert _id, "account not found: `%s`" % name
-    return _id
-
-
-async def get_followers(db, account: str, start: str, follow_type: str, limit: int):
-    """Get a list of accounts following a given account."""
-    account_id = await _get_account_id(db, account)
-    start_id = await _get_account_id(db, start) if start else None
-    state = 2 if follow_type == 'ignore' else 1
-
-    seek = ''
-    if start_id:
-        seek = """AND hf.created_at <= (
-                     SELECT created_at FROM hive_follows
-                      WHERE following = :account_id
-                        AND follower = :start_id)"""
-
-    sql = """
-        SELECT name FROM hive_follows hf
-     LEFT JOIN hive_accounts ON hf.follower = id
-         WHERE hf.following = :account_id
-           AND state = :state %s
-      ORDER BY hf.created_at DESC
-         LIMIT :limit
-    """ % seek
-
-    return await db.query_col(sql, account_id=account_id, start_id=start_id,
-                              state=state, limit=limit)
-
-
-async def get_following(db, account: str, start: str, follow_type: str, limit: int):
+async def get_following(db, account: str, start: str, state: int, limit: int):
     """Get a list of accounts followed by a given account."""
-    account_id = await _get_account_id(db, account)
-    start_id = await _get_account_id(db, start) if start else None
-    state = 2 if follow_type == 'ignore' else 1
-
-    seek = ''
-    if start_id:
-        seek = """AND hf.created_at <= (
-                     SELECT created_at FROM hive_follows
-                      WHERE follower = :account_id
-                        AND following = :start_id)"""
-
-    sql = """
-        SELECT name FROM hive_follows hf
-     LEFT JOIN hive_accounts ON hf.following = id
-         WHERE hf.follower = :account_id
-           AND state = :state %s
-      ORDER BY hf.created_at DESC
-         LIMIT :limit
-    """ % seek
-
-    return await db.query_col(sql, account_id=account_id, start_id=start_id,
-                              state=state, limit=limit)
-
-
-async def get_follow_counts(db, account: str):
-    """Return following/followers count for `account`."""
-    account_id = await _get_account_id(db, account)
-    sql = """SELECT following, followers
-               FROM hive_accounts
-              WHERE id = :account_id"""
-    return dict(await db.query_row(sql, account_id=account_id))
+    sql = "SELECT * FROM condenser_get_following( (:account)::VARCHAR, (:start)::VARCHAR, :type, :limit )"
+    return await db.query_col(sql, account=account, start=start, type=state, limit=limit)
 
 
 async def get_reblogged_by(db, author: str, permlink: str):
     """Return all rebloggers of a post."""
-    post_id = await _get_post_id(db, author, permlink)
-    assert post_id, "post not found"
-    sql = """SELECT name FROM hive_accounts
-               JOIN hive_feed_cache ON id = account_id
-              WHERE post_id = :post_id"""
-    names = await db.query_col(sql, post_id=post_id)
-    names.remove(author)
-    return names
-
-
-async def get_account_reputations(db, account_lower_bound, limit):
-    """Enumerate account reputations."""
-    seek = ''
-    if account_lower_bound:
-        seek = "WHERE name >= :start"
-
-    sql = """SELECT name, reputation
-               FROM hive_accounts %s
-           ORDER BY name
-              LIMIT :limit""" % seek
-    rows = await db.query_all(sql, start=account_lower_bound, limit=limit)
-    return [dict(name=r[0], reputation=rep_to_raw(r[1])) for r in rows]
-
-
-async def pids_by_query(db, sort, start_author, start_permlink, limit, tag):
-    """Get a list of post_ids for a given posts query.
-
-    `sort` can be trending, hot, created, promoted, payout, or payout_comments.
-    """
-    # pylint: disable=too-many-arguments,bad-whitespace,line-too-long
-    assert sort in ['trending', 'hot', 'created', 'promoted',
-                    'payout', 'payout_comments']
-
-    params = {             # field      pending posts   comment promoted    todo        community
-        'trending':        ('sc_trend', True,   False,  False,  False),   # posts=True  pending=False
-        'hot':             ('sc_hot',   True,   False,  False,  False),   # posts=True  pending=False
-        'created':         ('post_id',  False,  True,   False,  False),
-        'promoted':        ('promoted', True,   False,  False,  True),    # posts=True
-        'payout':          ('payout',   True,   True,   False,  False),
-        'payout_comments': ('payout',   True,   False,  True,   False),
-    }[sort]
-
-    table = 'hive_posts_cache'
-    field = params[0]
-    where = []
-
-    # primary filters
-    if params[1]: where.append("is_paidout = '0'")
-    if params[2]: where.append('depth = 0')
-    if params[3]: where.append('depth > 0')
-    if params[4]: where.append('promoted > 0')
-
-    # filter by community, category, or tag
-    if tag:
-        #if tag[:5] == 'hive-'
-        #    cid = get_community_id(tag)
-        #    where.append('community_id = :cid')
-        if sort in ['payout', 'payout_comments']:
-            where.append('category = :tag')
-        else:
-            if tag[:5] == 'hive-':
-                where.append('category = :tag')
-                if sort in ('trending', 'hot'):
-                    where.append('depth = 0')
-            sql = "SELECT post_id FROM hive_post_tags WHERE tag = :tag"
-            where.append("post_id IN (%s)" % sql)
-
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, start_author, start_permlink)
-        if not start_id:
-            return []
-
-        sql = "%s <= (SELECT %s FROM %s WHERE post_id = :start_id)"
-        where.append(sql % (field, field, table))
-
-    sql = ("SELECT post_id FROM %s WHERE %s ORDER BY %s DESC LIMIT :limit"
-           % (table, ' AND '.join(where), field))
-
-    return await db.query_col(sql, tag=tag, start_id=start_id, limit=limit)
-
-
-async def pids_by_blog(db, account: str, start_author: str = '',
-                       start_permlink: str = '', limit: int = 20):
-    """Get a list of post_ids for an author's blog."""
-    account_id = await _get_account_id(db, account)
-
-    seek = ''
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, start_author, start_permlink)
-        if not start_id:
-            return []
 
-        seek = """
-          AND created_at <= (
-            SELECT created_at
-              FROM hive_feed_cache
-             WHERE account_id = :account_id
-               AND post_id = :start_id)
-        """
+    sql = "SELECT * FROM condenser_get_names_by_reblogged( (:author)::VARCHAR, (:permlink)::VARCHAR )"
+    names = await db.query_col(sql, author=author, permlink=permlink)
 
-    sql = """
-        SELECT post_id
-          FROM hive_feed_cache
-         WHERE account_id = :account_id %s
-      ORDER BY created_at DESC
-         LIMIT :limit
-    """ % seek
-
-    return await db.query_col(sql, account_id=account_id, start_id=start_id, limit=limit)
-
-
-async def pids_by_blog_by_index(db, account: str, start_index: int, limit: int = 20):
-    """Get post_ids for an author's blog (w/ reblogs), paged by index/limit.
-
-    Examples:
-    (acct, 2) = returns blog entries 0 up to 2 (3 oldest)
-    (acct, 0) = returns all blog entries (limit 0 means return all?)
-    (acct, 2, 1) = returns 1 post starting at idx 2
-    (acct, 2, 3) = returns 3 posts: idxs (2,1,0)
-    """
-
-    account_id = await _get_account_id(db, account)
-
-    if start_index in (-1, 0):
-        sql = """SELECT COUNT(*) - 1 FROM hive_feed_cache
-                  WHERE account_id = :account_id"""
-        start_index = await db.query_one(sql, account_id=account_id)
-        if start_index < 0:
-            return (0, [])
-
-    offset = start_index - limit + 1
-    assert offset >= 0, ('start_index and limit combination is invalid (%d, %d)'
-                         % (start_index, limit))
-
-    sql = """
-        SELECT post_id
-          FROM hive_feed_cache
-         WHERE account_id = :account_id
-      ORDER BY created_at
-         LIMIT :limit
-        OFFSET :offset
-    """
-
-    ids = await db.query_col(sql, account_id=account_id, limit=limit, offset=offset)
-    return (start_index, list(reversed(ids)))
-
-
-async def pids_by_blog_without_reblog(db, account: str, start_permlink: str = '', limit: int = 20):
-    """Get a list of post_ids for an author's blog without reblogs."""
-
-    seek = ''
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, account, start_permlink)
-        if not start_id:
-            return []
-        seek = "AND id <= :start_id"
-
-    sql = """
-        SELECT id
-          FROM hive_posts
-         WHERE author = :account %s
-           AND is_deleted = '0'
-           AND depth = 0
-      ORDER BY id DESC
-         LIMIT :limit
-    """ % seek
-
-    return await db.query_col(sql, account=account, start_id=start_id, limit=limit)
-
-
-async def pids_by_feed_with_reblog(db, account: str, start_author: str = '',
-                                   start_permlink: str = '', limit: int = 20):
-    """Get a list of [post_id, reblogged_by_str] for an account's feed."""
-    account_id = await _get_account_id(db, account)
-
-    seek = ''
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, start_author, start_permlink)
-        if not start_id:
-            return []
-
-        seek = """
-          HAVING MIN(hive_feed_cache.created_at) <= (
-            SELECT MIN(created_at) FROM hive_feed_cache WHERE post_id = :start_id
-               AND account_id IN (SELECT following FROM hive_follows
-                                  WHERE follower = :account AND state = 1))
-        """
-
-    sql = """
-        SELECT post_id, string_agg(name, ',') accounts
-          FROM hive_feed_cache
-          JOIN hive_follows ON account_id = hive_follows.following AND state = 1
-          JOIN hive_accounts ON hive_follows.following = hive_accounts.id
-         WHERE hive_follows.follower = :account
-           AND hive_feed_cache.created_at > :cutoff
-      GROUP BY post_id %s
-      ORDER BY MIN(hive_feed_cache.created_at) DESC LIMIT :limit
-    """ % seek
-
-    result = await db.query_all(sql, account=account_id, start_id=start_id,
-                                limit=limit, cutoff=last_month())
-    return [(row[0], row[1]) for row in result]
-
-
-async def pids_by_account_comments(db, account: str, start_permlink: str = '', limit: int = 20):
-    """Get a list of post_ids representing comments by an author."""
-    seek = ''
-    start_id = None
-    if start_permlink:
-        start_id = await _get_post_id(db, account, start_permlink)
-        if not start_id:
-            return []
-
-        seek = "AND id <= :start_id"
-
-    # `depth` in ORDER BY is a no-op, but forces an ix3 index scan (see #189)
-    sql = """
-        SELECT id FROM hive_posts
-         WHERE author = :account %s
-           AND depth > 0
-           AND is_deleted = '0'
-      ORDER BY id DESC, depth
-         LIMIT :limit
-    """ % seek
-
-    return await db.query_col(sql, account=account, start_id=start_id, limit=limit)
-
-
-async def pids_by_replies_to_account(db, start_author: str, start_permlink: str = '',
-                                     limit: int = 20):
-    """Get a list of post_ids representing replies to an author.
-
-    To get the first page of results, specify `start_author` as the
-    account being replied to. For successive pages, provide the
-    last loaded reply's author/permlink.
-    """
-    seek = ''
-    start_id = None
-    if start_permlink:
-        sql = """
-          SELECT parent.author,
-                 child.id
-            FROM hive_posts child
-            JOIN hive_posts parent
-              ON child.parent_id = parent.id
-           WHERE child.author = :author
-             AND child.permlink = :permlink
-        """
-
-        row = await db.query_row(sql, author=start_author, permlink=start_permlink)
-        if not row:
-            return []
-
-        parent_account = row[0]
-        start_id = row[1]
-        seek = "AND id <= :start_id"
-    else:
-        parent_account = start_author
-
-    sql = """
-       SELECT id FROM hive_posts
-        WHERE parent_id IN (SELECT id FROM hive_posts
-                             WHERE author = :parent
-                               AND is_deleted = '0'
-                          ORDER BY id DESC
-                             LIMIT 10000) %s
-          AND is_deleted = '0'
-     ORDER BY id DESC
-        LIMIT :limit
-    """ % seek
+    if author in names:
+        names.remove(author)
+    return names
 
-    return await db.query_col(sql, parent=parent_account, start_id=start_id, limit=limit)
+async def process_posts(db, sql_result, truncate_body: int = 0):
+    posts = []
+    for row in sql_result:
+        row = dict(row)
+        post = _condenser_post_object(row, truncate_body=truncate_body)
+
+        post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.CondenserApi)
+        posts.append(post)
+
+    return posts
+
+async def get_by_blog_without_reblog(db, account: str, start_permlink: str = '', limit: int = 20, truncate_body: int = 0):
+  """Get a list of posts for an author's blog without reblogs."""
+  sql = "SELECT * FROM condenser_get_by_blog_without_reblog( (:author)::VARCHAR, (:permlink)::VARCHAR, :limit )"
+  result = await db.query_all(sql, author=account, permlink=start_permlink, limit=limit);
+  return await process_posts(db, result, truncate_body)
+
+async def get_by_account_comments(db, account: str, start_permlink: str = '', limit: int = 20, truncate_body: int = 0):
+  """Get a list of posts representing comments by an author."""
+  sql = "SELECT * FROM condenser_get_by_account_comments( (:author)::VARCHAR, (:permlink)::VARCHAR, :limit )"
+  result = await db.query_all(sql, author=account, permlink=start_permlink, limit=limit);
+  return await process_posts(db, result, truncate_body)
+
+async def get_by_replies_to_account(db, start_author: str, start_permlink: str = '', limit: int = 20, truncate_body: int = 0):
+  """Get a list of posts representing replies to an author."""
+  sql = "SELECT * FROM bridge_get_account_posts_by_replies( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False )"
+  result = await db.query_all(sql, account=start_author, author=start_author if start_permlink else '', permlink=start_permlink, limit=limit);
+  return await process_posts(db, result, truncate_body)
+
+async def get_by_blog(db, account: str = '', start_author: str = '', start_permlink: str = '', limit: int = 20):
+  """Get a list of posts for an author's blog."""
+  sql = "SELECT * FROM condenser_get_by_blog( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, :limit )"
+  result = await db.query_all(sql, account=account, author=start_author, permlink=start_permlink, limit=limit);
+  return await process_posts(db, result)
diff --git a/hive/server/condenser_api/get_state.py b/hive/server/condenser_api/get_state.py
index a66502d545021def091e636a94379d970c5cddca..c4f5bf4d0b8622ee7ff3f4a3951c4c9299b6ecde 100644
--- a/hive/server/condenser_api/get_state.py
+++ b/hive/server/condenser_api/get_state.py
@@ -11,9 +11,7 @@ from hive.server.common.mutes import Mutes
 
 from hive.server.condenser_api.objects import (
     load_accounts,
-    load_posts,
-    load_posts_keyed,
-    load_posts_reblogs)
+    _condenser_post_object)
 from hive.server.common.helpers import (
     ApiError,
     return_error_info,
@@ -27,6 +25,9 @@ from hive.server.condenser_api.tags import (
 
 import hive.server.condenser_api.cursor as cursor
 
+from hive.server.condenser_api.methods import get_posts_by_given_sort, get_discussions_by_feed_impl
+from hive.server.database_api.methods import find_votes_impl, VotesPresentation
+
 log = logging.getLogger(__name__)
 
 # steemd account 'tabs' - specific post list queries
@@ -128,15 +129,15 @@ async def get_state(context, path: str):
         author = valid_account(part[1][1:])
         permlink = valid_permlink(part[2])
         state['content'] = await _load_discussion(db, author, permlink)
-        state['accounts'] = await _load_content_accounts(db, state['content'])
+        state['accounts'] = await _load_content_accounts(db, state['content'], True)
 
     # ranked posts - `/sort/category`
     elif part[0] in POST_LIST_SORTS:
         assert not part[2], "unexpected discussion path part[2] %s" % path
         sort = valid_sort(part[0])
         tag = valid_tag(part[1].lower(), allow_empty=True)
-        pids = await cursor.pids_by_query(db, sort, '', '', 20, tag)
-        state['content'] = _keyed_posts(await load_posts(db, pids))
+        pids = await get_posts_by_given_sort(context, sort, '', '', 20, tag)
+        state['content'] = _keyed_posts(pids)
         state['discussion_idx'] = {tag: {sort: list(state['content'].keys())}}
         state['tag_idx'] = {'trending': await get_top_trending_tags_summary(context)}
 
@@ -160,17 +161,13 @@ async def _get_account_discussion_by_key(db, account, key):
     assert key, 'discussion key must be specified'
 
     if key == 'recent_replies':
-        pids = await cursor.pids_by_replies_to_account(db, account, '', 20)
-        posts = await load_posts(db, pids)
+        posts = await cursor.get_by_replies_to_account(db, account, '', 20)
     elif key == 'comments':
-        pids = await cursor.pids_by_account_comments(db, account, '', 20)
-        posts = await load_posts(db, pids)
+        posts = await cursor.get_by_account_comments(db, account, '', 20)
     elif key == 'blog':
-        pids = await cursor.pids_by_blog(db, account, '', '', 20)
-        posts = await load_posts(db, pids)
+        posts = await cursor.get_by_blog(db, account, '', '', 20)
     elif key == 'feed':
-        res = await cursor.pids_by_feed_with_reblog(db, account, '', '', 20)
-        posts = await load_posts_reblogs(db, res)
+        posts = await get_discussions_by_feed_impl(db, account, '', '', 20)
     else:
         raise ApiError("unknown account discussion key %s" % key)
 
@@ -206,12 +203,15 @@ def _keyed_posts(posts):
 def _ref(post):
     return post['author'] + '/' + post['permlink']
 
-async def _load_content_accounts(db, content):
+def _ref_parent(post):
+    return post['parent_author'] + '/' + post['parent_permlink']
+
+async def _load_content_accounts(db, content, lite = False):
     if not content:
         return {}
     posts = content.values()
     names = set(map(lambda p: p['author'], posts))
-    accounts = await load_accounts(db, names)
+    accounts = await load_accounts(db, names, lite)
     return {a['name']: a for a in accounts}
 
 async def _load_account(db, name):
@@ -228,55 +228,44 @@ async def _child_ids(db, parent_ids):
              SELECT parent_id, array_agg(id)
                FROM hive_posts
               WHERE parent_id IN :ids
-                AND is_deleted = '0'
+                AND counter_deleted = 0
            GROUP BY parent_id
     """
     rows = await db.query_all(sql, ids=tuple(parent_ids))
     return [[row[0], row[1]] for row in rows]
 
-async def _load_discussion(db, author, permlink):
+async def _load_discussion(db, author, permlink, observer=None):
     """Load a full discussion thread."""
-    root_id = await cursor.get_post_id(db, author, permlink)
-    if not root_id:
-        return {}
 
-    # build `ids` list and `tree` map
-    ids = []
-    tree = {}
-    todo = [root_id]
-    while todo:
-        ids.extend(todo)
-        rows = await _child_ids(db, todo)
-        todo = []
-        for pid, cids in rows:
-            tree[pid] = cids
-            todo.extend(cids)
-
-    # load all post objects, build ref-map
-    posts = await load_posts_keyed(db, ids)
-
-    # remove posts/comments from muted accounts
-    muted_accounts = Mutes.all()
-    rem_pids = []
-    for pid, post in posts.items():
-        if post['author'] in muted_accounts:
-            rem_pids.append(pid)
-    for pid in rem_pids:
-        if pid in posts:
-            del posts[pid]
-        if pid in tree:
-            rem_pids.extend(tree[pid])
-
-    refs = {pid: _ref(post) for pid, post in posts.items()}
-
-    # add child refs to parent posts
-    for pid, post in posts.items():
-        if pid in tree:
-            post['replies'] = [refs[cid] for cid in tree[pid]
-                               if cid in refs]
-
-    # return all nodes keyed by ref
-    return {refs[pid]: post for pid, post in posts.items()}
+    sql = "SELECT * FROM bridge_get_discussion(:author,:permlink,:observer)"
+    sql_result = await db.query_all(sql, author=author, permlink=permlink, observer=observer)
+
+    posts = []
+    posts_by_id = {}
+    replies = {}
+
+    for row in sql_result:
+      post = _condenser_post_object(row)
+
+      post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.CondenserApi)
+      posts.append(post)
+
+      parent_key = _ref_parent(post)
+      _key = _ref(post)
+      if parent_key not in replies:
+        replies[parent_key] = []
+      replies[parent_key].append(_key)
+
+    for post in posts:
+      _key = _ref(post)
+      if _key in replies:
+        replies[_key].sort()
+        post['replies'] = replies[_key]
+
+    for post in posts:
+      posts_by_id[_ref(post)] = post
+
+    return posts_by_id
 
 @cached(ttl=1800, timeout=1200)
 async def _get_feed_price(db):
@@ -291,18 +280,18 @@ async def _get_props_lite(db):
 
     # convert NAI amounts to legacy
     nais = ['virtual_supply', 'current_supply', 'current_sbd_supply',
-            'pending_rewarded_vesting_steem', 'pending_rewarded_vesting_shares',
-            'total_vesting_fund_steem', 'total_vesting_shares']
+            'pending_rewarded_vesting_hive', 'pending_rewarded_vesting_shares',
+            'total_vesting_fund_hive', 'total_vesting_shares']
     for k in nais:
         if k in raw:
             raw[k] = legacy_amount(raw[k])
 
     return dict(
         time=raw['time'], #*
-        sbd_print_rate=raw['sbd_print_rate'],
-        sbd_interest_rate=raw['sbd_interest_rate'],
+        hbd_print_rate=raw['hbd_print_rate'],
+        hbd_interest_rate=raw['hbd_interest_rate'],
         head_block_number=raw['head_block_number'], #*
         total_vesting_shares=raw['total_vesting_shares'],
-        total_vesting_fund_steem=raw['total_vesting_fund_steem'],
+        total_vesting_fund_hive=raw['total_vesting_fund_hive'],
         last_irreversible_block_num=raw['last_irreversible_block_num'], #*
     )
diff --git a/hive/server/condenser_api/methods.py b/hive/server/condenser_api/methods.py
index 8c34afe5d1dd2725e6a76ba86dd7a1adfee42c3b..656e06325d605c2adefbeef3e5b3a4d27f471373 100644
--- a/hive/server/condenser_api/methods.py
+++ b/hive/server/condenser_api/methods.py
@@ -1,46 +1,28 @@
 """Steemd/condenser_api compatibility layer API methods."""
-
 from functools import wraps
 
 import hive.server.condenser_api.cursor as cursor
-from hive.server.condenser_api.objects import load_posts, load_posts_reblogs, resultset_to_posts
-from hive.server.condenser_api.objects import _mute_votes, _condenser_post_object
+from hive.server.condenser_api.objects import _condenser_post_object
 from hive.server.common.helpers import (
     ApiError,
     return_error_info,
+    json_date,
     valid_account,
     valid_permlink,
     valid_tag,
     valid_offset,
     valid_limit,
+    valid_truncate,
     valid_follow_type)
-from hive.server.common.mutes import Mutes
+from hive.server.database_api.methods import find_votes_impl, VotesPresentation
 
 # pylint: disable=too-many-arguments,line-too-long,too-many-lines
 
-SELECT_FRAGMENT = """
-    SELECT hive_posts_cache.post_id, hive_posts_cache.author, hive_posts_cache.permlink,
-           hive_posts_cache.title, hive_posts_cache.body, hive_posts_cache.category, hive_posts_cache.depth,
-           hive_posts_cache.promoted, hive_posts_cache.payout, hive_posts_cache.payout_at,
-           hive_posts_cache.is_paidout, hive_posts_cache.children, hive_posts_cache.votes,
-           hive_posts_cache.created_at, hive_posts_cache.updated_at, hive_posts_cache.rshares,
-           hive_posts_cache.raw_json, hive_posts_cache.json, hive_accounts.reputation AS author_rep,
-           hive_posts_cache.is_hidden AS is_hidden, hive_posts_cache.is_grayed AS is_grayed,
-           hive_posts_cache.total_votes AS total_votes, hive_posts_cache.flag_weight AS flag_weight,
-           hive_posts_cache.sc_trend AS sc_trend, hive_accounts.id AS acct_author_id,
-           hive_posts.is_pinned AS is_pinned
-           FROM hive_posts_cache JOIN hive_posts ON (hive_posts_cache.post_id = hive_posts.id)
-                                 JOIN hive_accounts ON (hive_posts_cache.author = hive_accounts.name)"""
-
-
-# Dummy
-
 @return_error_info
 async def get_account_votes(context, account):
     """Return an info message about get_acccount_votes being unsupported."""
     # pylint: disable=unused-argument
-    raise ApiError("get_account_votes is no longer supported, for details see "
-                   "https://steemit.com/steemit/@steemitdev/additional-public-api-change")
+    assert False, "get_account_votes is no longer supported, for details see https://hive.blog/steemit/@steemitdev/additional-public-api-change"
 
 
 # Follows Queries
@@ -49,8 +31,8 @@ def _legacy_follower(follower, following, follow_type):
     return dict(follower=follower, following=following, what=[follow_type])
 
 @return_error_info
-async def get_followers(context, account: str, start: str, follow_type: str = None,
-                        limit: int = None, **kwargs):
+async def get_followers(context, account: str, start: str = '', follow_type: str = None,
+                        limit: int = 1000, **kwargs):
     """Get all accounts following `account`. (EOL)"""
     # `type` reserved word workaround
     if not follow_type and 'type' in kwargs:
@@ -62,12 +44,12 @@ async def get_followers(context, account: str, start: str, follow_type: str = No
         valid_account(account),
         valid_account(start, allow_empty=True),
         valid_follow_type(follow_type),
-        valid_limit(limit, 1000))
+        valid_limit(limit, 1000, 1000))
     return [_legacy_follower(name, account, follow_type) for name in followers]
 
 @return_error_info
-async def get_following(context, account: str, start: str, follow_type: str = None,
-                        limit: int = None, **kwargs):
+async def get_following(context, account: str, start: str = '', follow_type: str = None,
+                        limit: int = 1000, **kwargs):
     """Get all accounts `account` follows. (EOL)"""
     # `type` reserved word workaround
     if not follow_type and 'type' in kwargs:
@@ -79,18 +61,19 @@ async def get_following(context, account: str, start: str, follow_type: str = No
         valid_account(account),
         valid_account(start, allow_empty=True),
         valid_follow_type(follow_type),
-        valid_limit(limit, 1000))
+        valid_limit(limit, 1000, 1000))
     return [_legacy_follower(account, name, follow_type) for name in following]
 
 @return_error_info
 async def get_follow_count(context, account: str):
     """Get follow count stats. (EOL)"""
-    count = await cursor.get_follow_counts(
-        context['db'],
-        valid_account(account))
+    db = context['db']
+    account = valid_account(account)
+    sql = "SELECT * FROM condenser_get_follow_count( (:account)::VARCHAR )"
+    counters = await db.query_row(sql, account=account)
     return dict(account=account,
-                following_count=count['following'],
-                follower_count=count['followers'])
+                following_count=counters[0],
+                follower_count=counters[1])
 
 @return_error_info
 async def get_reblogged_by(context, author: str, permlink: str):
@@ -101,61 +84,69 @@ async def get_reblogged_by(context, author: str, permlink: str):
         valid_permlink(permlink))
 
 @return_error_info
-async def get_account_reputations(context, account_lower_bound: str = None, limit: int = None):
-    """List account reputations"""
-    return {'reputations': await cursor.get_account_reputations(
-        context['db'],
-        account_lower_bound,
-        valid_limit(limit, 1000))}
-
+async def get_account_reputations(context, account_lower_bound: str = '', limit: int = 1000):
+    db = context['db']
+    return await _get_account_reputations_impl(db, True, account_lower_bound, limit)
+
+async def _get_account_reputations_impl(db, fat_node_style, account_lower_bound, limit):
+    """Enumerate account reputations."""
+    if not account_lower_bound:
+      account_lower_bound = ''
+    assert isinstance(account_lower_bound, str), "invalid account_lower_bound type"
+    limit = valid_limit(limit, 1000, 1000)
+
+    sql = "SELECT * FROM condenser_get_account_reputations( (:start)::VARCHAR, :limit )"
+    rows = await db.query_all(sql, start=account_lower_bound, limit=limit)
+    if fat_node_style:
+        return [dict(account=r[0], reputation=r[1]) for r in rows]
+    else:
+        return {'reputations': [dict(name=r[0], reputation=r[1]) for r in rows]}
 
 # Content Primitives
 
 @return_error_info
 async def get_content(context, author: str, permlink: str, observer=None):
-    """Get a single post object."""
     db = context['db']
+    return await _get_content_impl(db, True, author, permlink, observer)
+
+@return_error_info
+async def _get_content_impl(db, fat_node_style, author: str, permlink: str, observer=None):
+    """Get a single post object."""
     valid_account(author)
     valid_permlink(permlink)
 
-    sql = """ ---get_content\n""" + SELECT_FRAGMENT + """
-              WHERE hive_posts_cache.author = :author AND hive_posts_cache.permlink = :permlink AND NOT hive_posts.is_deleted
-          """
+    sql = "SELECT * FROM condenser_get_content(:author, :permlink)"
+
+    post = None
     result = await db.query_all(sql, author=author, permlink=permlink)
-    result = dict(result[0])
-    post = _condenser_post_object(result, 0)
-    if not observer:
-        post['active_votes'] = _mute_votes(post['active_votes'], Mutes.all())
-    else:
-        blacklists_for_user = await Mutes.get_blacklists_for_observer(observer, context)
-        post['active_votes'] = _mute_votes(post['active_votes'], blacklists_for_user.keys())
+    if result:
+        result = dict(result[0])
+        post = _condenser_post_object(result, 0, fat_node_style)
+        post['active_votes'] = await find_votes_impl(db, author, permlink, VotesPresentation.ActiveVotes if fat_node_style else VotesPresentation.CondenserApi)
 
-    assert post, 'post was not found in cache'
     return post
 
 @return_error_info
 async def get_content_replies(context, author: str, permlink: str):
-    """Get a list of post objects based on parent."""
     db = context['db']
+    return await _get_content_replies_impl(db, True, author, permlink)
+
+@return_error_info
+async def _get_content_replies_impl(db, fat_node_style, author: str, permlink: str):
+    """Get a list of post objects based on parent."""
     valid_account(author)
     valid_permlink(permlink)
 
-    sql = """SELECT post_id, author, permlink, title, body, category, depth,
-             promoted, payout, payout_at, is_paidout, children, votes,
-             created_at, updated_at, rshares, raw_json, json
-             FROM hive_posts_cache WHERE post_id IN (
-             SELECT hp2.id FROM hive_posts hp2
-             WHERE hp2.is_deleted = '0' AND
-             hp2.parent_id = (SELECT id FROM hive_posts
-             WHERE author = :author
-             AND permlink = :permlink AND is_deleted = '0')
-             LIMIT :limit
-             )
-             ORDER BY post_id"""
-
-    result=await db.query_all(sql, author=author, permlink = permlink, limit=5000)
-
-    posts = await resultset_to_posts(db=db, resultset=result, truncate_body=0)
+    sql = "SELECT * FROM condenser_get_content_replies(:author, :permlink)"
+    result = await db.query_all(sql, author=author, permlink=permlink)
+
+    posts = []
+    for row in result:
+        row = dict(row)
+        post = _condenser_post_object(row, get_content_additions=fat_node_style)
+        post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.ActiveVotes if fat_node_style else VotesPresentation.CondenserApi)
+        posts.append(post)
+
     return posts
 
 # Discussion Queries
@@ -180,258 +171,227 @@ def nested_query_compat(function):
         return function(*args, **kwargs)
     return wrapper
 
-@return_error_info
-@nested_query_compat
-async def get_discussions_by(discussion_type, context, start_author: str = '',
-                             start_permlink: str = '', limit: int = 20,
-                             tag: str = None, truncate_body: int = 0,
-                             filter_tags: list = None):
-    """ Common implementation for get_discussions_by calls  """
-    assert not filter_tags, 'filter tags not supported'
-    assert discussion_type in ['trending', 'hot', 'created', 'promoted',
-                               'payout', 'payout_comments'], 'invalid discussion type'
-    valid_account(start_author, allow_empty=True)
-    valid_permlink(start_permlink, allow_empty=True)
-    valid_limit(limit, 100)
-    valid_tag(tag, allow_empty=True)
+async def get_posts_by_given_sort(context, sort: str, start_author: str = '', start_permlink: str = '',
+                                     limit: int = 20, tag: str = None,
+                                     truncate_body: int = 0, filter_tags: list = None, observer:str=None):
+    """Query posts, sorted by creation date."""
+    assert not filter_tags, 'filter_tags not supported'
+
     db = context['db']
 
-    sql = "---get_discussions_by_" + discussion_type + "\r\n" + SELECT_FRAGMENT
-    
-    sql = sql + """ WHERE NOT hive_posts.is_deleted """
-    
-    if discussion_type == 'trending':
-        sql = sql + """ AND NOT hive_posts_cache.is_paidout %s ORDER BY sc_trend DESC LIMIT :limit """
-    elif discussion_type == 'hot':
-        sql = sql + """ AND NOT hive_posts_cache.is_paidout %s ORDER BY sc_hot DESC LIMIT :limit """
-    elif discussion_type == 'created':
-        sql = sql + """ AND hive_posts.depth = 0 %s ORDER BY hive_posts_cache.created_at DESC LIMIT :limit """
-    elif discussion_type == 'promoted':
-        sql = sql + """ AND NOT hive_posts_cache.is_paidout AND hive_posts.promoted > 0
-                        %s ORDER BY hive_posts_cache.promoted DESC LIMIT :limit """
-    elif discussion_type == 'payout':
-        sql = sql + """ AND NOT hive_posts_cache.is_paidout AND hive_posts_cache.depth = 0
-                        %s ORDER BY hive_posts_cache.payout DESC LIMIT :limit """
-    elif discussion_type == 'payout_comments':
-        sql = sql + """ AND NOT hive_posts_cache.is_paidout AND hive_posts_cache.depth > 0
-                        %s ORDER BY hive_posts_cache.payout DESC LIMIT :limit """
-    
-    if tag and tag != 'all':
-        if tag[:5] == 'hive-':
-            sql = sql % """ %s AND hive_posts_cache.category = :tag """
-        else:
-            sql = sql % """ %s AND hive_posts_cache.post_id IN (SELECT post_id FROM hive_post_tags WHERE tag = :tag) """
-
-    if start_author and start_permlink:
-        if discussion_type == 'trending':
-            sql = sql % """ AND hive_posts_cache.sc_trend <= (SELECT sc_trend FROM hive_posts_cache WHERE permlink = :permlink AND author = :author)
-                            AND hive_posts_cache.post_id != (SELECT post_id FROM hive_posts_cache WHERE permlink = :permlink AND author = :author) """
-        elif discussion_type == 'hot':
-            sql = sql % """ AND hive_posts_cache.sc_hot <= (SELECT sc_hot FROM hive_posts_cache WHERE permlink = :permlink AND author = :author)
-                            AND hive_posts_cache.post_id != (SELECT post_id FROM hive_posts_cache WHERE permlink = :permlink AND author = :author) """
-        elif discussion_type == 'created':
-            sql = sql % """ AND hive_posts_cache.post_id < (SELECT post_id FROM hive_posts_cache WHERE permlink = :permlink AND author = :author) """
-        elif discussion_type == 'promoted':
-            sql = sql % """ AND hive_posts_cache.promoted <= (SELECT promoted FROM hive_posts_cache WHERE permlink = :permlink AND author = :author)
-                            AND hive_posts_cache.post_id != (SELECT post_id FROM hive_posts_cache WHERE permlink = :permlink AND author = :author) """
-        else:
-            sql = sql % """ AND hive_posts_cache.payout <= (SELECT payout FROM hive_posts_cache where permlink = :permlink AND author = :author)
-                            AND hive_posts_cache.post_id != (SELECT post_id FROM hive_posts_cache WHERE permlink = :permlink AND author = :author) """
-    else:
-        sql = sql % """ """
+    start_author    = valid_account(start_author, allow_empty=True),
+    start_permlink  = valid_permlink(start_permlink, allow_empty=True),
+    limit           = valid_limit(limit, 100, 20),
+    tag             = valid_tag(tag, allow_empty=True)
+    observer        = valid_account(observer, allow_empty=True)
+    truncate_body   = valid_truncate(truncate_body)
 
-    result = await db.query_all(sql, tag=tag, limit=limit, author=start_author, permlink=start_permlink)
     posts = []
-    for row in result:
+    is_community = tag[:5] == 'hive-'
+   
+    if sort == 'created':
+      if is_community:
+        sql = "SELECT * FROM bridge_get_ranked_post_by_created_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
+      elif tag == '':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_created( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+      else:
+        sql = "SELECT * FROM bridge_get_ranked_post_by_created_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+    elif sort == 'trending':
+      if is_community:
+        sql = "SELECT * FROM bridge_get_ranked_post_by_trends_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
+      elif tag == '':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_trends( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+      else:
+        sql = "SELECT * FROM bridge_get_ranked_post_by_trends_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+    elif sort == 'hot':
+      if is_community:
+        sql = "SELECT * FROM bridge_get_ranked_post_by_hot_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+      elif tag == '':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_hot( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+      else:
+        sql = "SELECT * FROM bridge_get_ranked_post_by_hot_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+    elif sort == 'promoted':
+      if is_community:
+        sql = "SELECT * FROM bridge_get_ranked_post_by_promoted_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+      elif tag == '':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_promoted( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+      else:
+        sql = "SELECT * FROM bridge_get_ranked_post_by_promoted_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+    elif sort == 'post_by_payout':
+      if tag == '':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
+      else:
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout_for_category( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
+    elif sort == 'comment_by_payout':
+      if tag == '':
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout_comments( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+      else:
+        sql = "SELECT * FROM bridge_get_ranked_post_by_payout_comments_for_category( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
+    else:
+      return posts
+
+    sql_result = await db.query_all(sql, tag=tag, author=start_author, permlink=start_permlink, limit=limit, observer=observer )
+
+    for row in sql_result:
         post = _condenser_post_object(row, truncate_body)
-        post['active_votes'] = _mute_votes(post['active_votes'], Mutes.all())
+        post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.CondenserApi)
         posts.append(post)
-    #posts = await resultset_to_posts(db=db, resultset=result, truncate_body=truncate_body)
     return posts
 
+@return_error_info
+@nested_query_compat
+async def get_discussions_by_created(context, start_author: str = '', start_permlink: str = '',
+                                     limit: int = 20, tag: str = None,
+                                     truncate_body: int = 0, filter_tags: list = None, observer:str=None):
+  return await get_posts_by_given_sort(context, 'created', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer)
+
 @return_error_info
 @nested_query_compat
 async def get_discussions_by_trending(context, start_author: str = '', start_permlink: str = '',
                                       limit: int = 20, tag: str = None,
-                                      truncate_body: int = 0, filter_tags: list = None):
-    """Query posts, sorted by trending score."""
-    assert not filter_tags, 'filter_tags not supported'
-    ids = await cursor.pids_by_query(
-        context['db'],
-        'trending',
-        valid_account(start_author, allow_empty=True),
-        valid_permlink(start_permlink, allow_empty=True),
-        valid_limit(limit, 100),
-        valid_tag(tag, allow_empty=True))
-    return await load_posts(context['db'], ids, truncate_body=truncate_body)
-
+                                      truncate_body: int = 0, filter_tags: list = None, observer:str=None):
+  return await get_posts_by_given_sort(context, 'trending', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer)
 
 @return_error_info
 @nested_query_compat
 async def get_discussions_by_hot(context, start_author: str = '', start_permlink: str = '',
                                  limit: int = 20, tag: str = None,
-                                 truncate_body: int = 0, filter_tags: list = None):
-    """Query posts, sorted by hot score."""
-    assert not filter_tags, 'filter_tags not supported'
-    ids = await cursor.pids_by_query(
-        context['db'],
-        'hot',
-        valid_account(start_author, allow_empty=True),
-        valid_permlink(start_permlink, allow_empty=True),
-        valid_limit(limit, 100),
-        valid_tag(tag, allow_empty=True))
-    return await load_posts(context['db'], ids, truncate_body=truncate_body)
-
+                                 truncate_body: int = 0, filter_tags: list = None, observer:str=None):
+  return await get_posts_by_given_sort(context, 'hot', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer)
 
 @return_error_info
 @nested_query_compat
 async def get_discussions_by_promoted(context, start_author: str = '', start_permlink: str = '',
                                       limit: int = 20, tag: str = None,
-                                      truncate_body: int = 0, filter_tags: list = None):
-    """Query posts, sorted by promoted amount."""
-    assert not filter_tags, 'filter_tags not supported'
-    ids = await cursor.pids_by_query(
-        context['db'],
-        'promoted',
-        valid_account(start_author, allow_empty=True),
-        valid_permlink(start_permlink, allow_empty=True),
-        valid_limit(limit, 100),
-        valid_tag(tag, allow_empty=True))
-    return await load_posts(context['db'], ids, truncate_body=truncate_body)
-
+                                      truncate_body: int = 0, filter_tags: list = None, observer:str=None):
+  return await get_posts_by_given_sort(context, 'promoted', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer)
 
 @return_error_info
 @nested_query_compat
-async def get_discussions_by_created(context, start_author: str = '', start_permlink: str = '',
-                                     limit: int = 20, tag: str = None,
-                                     truncate_body: int = 0, filter_tags: list = None):
-    """Query posts, sorted by creation date."""
-    assert not filter_tags, 'filter_tags not supported'
-    ids = await cursor.pids_by_query(
-        context['db'],
-        'created',
-        valid_account(start_author, allow_empty=True),
-        valid_permlink(start_permlink, allow_empty=True),
-        valid_limit(limit, 100),
-        valid_tag(tag, allow_empty=True))
-    return await load_posts(context['db'], ids, truncate_body=truncate_body)
+async def get_post_discussions_by_payout(context, start_author: str = '', start_permlink: str = '',
+                                         limit: int = 20, tag: str = None,
+                                         truncate_body: int = 0, observer:str=None):
+  return await get_posts_by_given_sort(context, 'post_by_payout', start_author, start_permlink, limit, tag, truncate_body, [], observer)
 
+@return_error_info
+@nested_query_compat
+async def get_comment_discussions_by_payout(context, start_author: str = '', start_permlink: str = '',
+                                            limit: int = 20, tag: str = None,
+                                            truncate_body: int = 0, observer:str=None):
+  return await get_posts_by_given_sort(context, 'comment_by_payout', start_author, start_permlink, limit, tag, truncate_body, [], observer)
 
 @return_error_info
 @nested_query_compat
-async def get_discussions_by_blog(context, tag: str = None, start_author: str = '',
+async def get_discussions_by_blog(context, tag: str, start_author: str = '',
                                   start_permlink: str = '', limit: int = 20,
                                   truncate_body: int = 0, filter_tags: list = None):
     """Retrieve account's blog posts, including reblogs."""
-    assert tag, '`tag` cannot be blank'
     assert not filter_tags, 'filter_tags not supported'
-    valid_account(tag)
-    valid_account(start_author, allow_empty=True)
-    valid_permlink(start_permlink, allow_empty=True)
-    valid_limit(limit, 100)
-
-    sql = """ ---get_discussions_by_blog """ + SELECT_FRAGMENT + """
-            WHERE NOT hive_posts.is_deleted AND hive_posts_cache.post_id IN
-                (SELECT post_id FROM hive_feed_cache JOIN hive_accounts ON (hive_feed_cache.account_id = hive_accounts.id) WHERE hive_accounts.name = :author)
-          """
-    if start_author and start_permlink != '':
-        sql += """
-         AND hive_posts_cache.created_at <= (SELECT created_at from hive_posts_cache where author = :start_author AND permlink = :start_permlink)
-        """
-
-    sql += """
-        ORDER BY hive_posts_cache.created_at DESC
-        LIMIT :limit
-    """
+    tag = valid_account(tag)
+    start_author = valid_account(start_author, allow_empty=True)
+    start_permlink = valid_permlink(start_permlink, allow_empty=True)
+    limit = valid_limit(limit, 100, 20)
+    truncate_body = valid_truncate(truncate_body)
+
+    sql = "SELECT * FROM bridge_get_account_posts_by_blog( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::INTEGER, False )"
 
     db = context['db']
-    result = await db.query_all(sql, author=tag, start_author=start_author, start_permlink=start_permlink, limit=limit)
+    result = await db.query_all(sql, account=tag, author=start_author, permlink=start_permlink, limit=limit)
     posts_by_id = []
 
     for row in result:
         row = dict(row)
         post = _condenser_post_object(row, truncate_body=truncate_body)
-        post['active_votes'] = _mute_votes(post['active_votes'], Mutes.all())
-        #posts_by_id[row['post_id']] = post
+        post['active_votes'] = await find_votes_impl(db, post['author'], post['permlink'], VotesPresentation.CondenserApi)
         posts_by_id.append(post)
 
     return posts_by_id
 
+async def get_discussions_by_feed_impl(db, account: str, start_author: str = '',
+                                   start_permlink: str = '', limit: int = 20, truncate_body: int = 0, observer:str=None):
+    """Get a list of posts for an account's feed."""
+    sql = "SELECT * FROM bridge_get_by_feed_with_reblog((:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::INTEGER)"
+    result = await db.query_all(sql, account=account, author=start_author, permlink=start_permlink, limit=limit, observer=observer)
+
+    posts = []
+    for row in result:
+        row = dict(row)
+        post = _condenser_post_object(row, truncate_body=truncate_body)
+        reblogged_by = set(row['reblogged_by'])
+        reblogged_by.discard(row['author']) # Eliminate original author of reblogged post
+        if reblogged_by:
+            reblogged_by_list = list(reblogged_by)
+            reblogged_by_list.sort()
+            post['reblogged_by'] = reblogged_by_list
+
+        post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.CondenserApi)
+        posts.append(post)
+
+    return posts
+
 @return_error_info
 @nested_query_compat
-async def get_discussions_by_feed(context, tag: str = None, start_author: str = '',
+async def get_discussions_by_feed(context, tag: str, start_author: str = '',
                                   start_permlink: str = '', limit: int = 20,
-                                  truncate_body: int = 0, filter_tags: list = None):
+                                  truncate_body: int = 0, filter_tags: list = None, observer:str=None):
     """Retrieve account's personalized feed."""
-    assert tag, '`tag` cannot be blank'
     assert not filter_tags, 'filter_tags not supported'
-    res = await cursor.pids_by_feed_with_reblog(
+    return await get_discussions_by_feed_impl(
         context['db'],
         valid_account(tag),
         valid_account(start_author, allow_empty=True),
         valid_permlink(start_permlink, allow_empty=True),
-        valid_limit(limit, 100))
-    return await load_posts_reblogs(context['db'], res, truncate_body=truncate_body)
-
+        valid_limit(limit, 100, 20),
+        valid_truncate(truncate_body), observer)
 
 @return_error_info
 @nested_query_compat
-async def get_discussions_by_comments(context, start_author: str = None, start_permlink: str = '',
+async def get_discussions_by_comments(context, start_author: str, start_permlink: str = '',
                                       limit: int = 20, truncate_body: int = 0,
                                       filter_tags: list = None):
     """Get comments by made by author."""
-    assert start_author, '`start_author` cannot be blank'
     assert not filter_tags, 'filter_tags not supported'
-    valid_account(start_author)
-    valid_permlink(start_permlink, allow_empty=True)
-    valid_limit(limit, 100)
-
-    sql = """ ---get_discussions_by_comments """ + SELECT_FRAGMENT + """
-            WHERE hive_posts_cache.author = :start_author AND hive_posts_cache.depth > 0
-            AND NOT hive_posts.is_deleted
-    """
-
-    if start_permlink:
-        sql += """
-            AND hive_posts_cache.post_id <= (SELECT hive_posts_cache.post_id FROM 
-            hive_posts_cache WHERE permlink = :start_permlink AND author=:start_author)
-        """
-
-    sql += """
-        ORDER BY hive_posts_cache.post_id DESC, depth LIMIT :limit
-    """
+    start_author = valid_account(start_author)
+    start_permlink = valid_permlink(start_permlink, allow_empty=True)
+    limit = valid_limit(limit, 100, 20)
+    truncate_body = valid_truncate(truncate_body)
 
     posts = []
     db = context['db']
-    result = await db.query_all(sql, start_author=start_author, start_permlink=start_permlink, limit=limit)
+
+    sql = "SELECT * FROM bridge_get_account_posts_by_comments( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
+    result = await db.query_all(sql, account=start_author, author=start_author if start_permlink else '', permlink=start_permlink, limit=limit)
 
     for row in result:
         row = dict(row)
         post = _condenser_post_object(row, truncate_body=truncate_body)
-        post['active_votes'] = _mute_votes(post['active_votes'], Mutes.all())
+        post['active_votes'] = await find_votes_impl(db, post['author'], post['permlink'], VotesPresentation.CondenserApi)
         posts.append(post)
 
     return posts
 
-
 @return_error_info
 @nested_query_compat
-async def get_replies_by_last_update(context, start_author: str = None, start_permlink: str = '',
+async def get_replies_by_last_update(context, start_author: str, start_permlink: str = '',
                                      limit: int = 20, truncate_body: int = 0):
     """Get all replies made to any of author's posts."""
-    assert start_author, '`start_author` cannot be blank'
-    ids = await cursor.pids_by_replies_to_account(
+    # despite the name time of last edit is not used, posts ranked by creation time (that is, their id)
+    # note that in this call start_author has dual meaning:
+    # - when there is only start_author it means account that authored posts that we seek replies to
+    # - when there is also start_permlink it points to one of replies (last post of previous page) and
+    #   we'll be getting account like above in form of author of parent post to the post pointed by
+    #   given start_author+start_permlink
+    return await cursor.get_by_replies_to_account(
         context['db'],
         valid_account(start_author),
         valid_permlink(start_permlink, allow_empty=True),
-        valid_limit(limit, 100))
-    return await load_posts(context['db'], ids, truncate_body=truncate_body)
-
+        valid_limit(limit, 100, 20),
+        valid_truncate(truncate_body))
 
 @return_error_info
 @nested_query_compat
-async def get_discussions_by_author_before_date(context, author: str = None, start_permlink: str = '',
-                                                before_date: str = '', limit: int = 10):
+async def get_discussions_by_author_before_date(context, author: str, start_permlink: str = '',
+                                                before_date: str = '', limit: int = 10, truncate_body: int = 0):
     """Retrieve account's blog posts, without reblogs.
 
     NOTE: before_date is completely ignored, and it appears to be broken and/or
@@ -439,47 +399,12 @@ async def get_discussions_by_author_before_date(context, author: str = None, sta
     get_discussions_by_blog but does NOT serve reblogs.
     """
     # pylint: disable=invalid-name,unused-argument
-    assert author, '`author` cannot be blank'
-    ids = await cursor.pids_by_blog_without_reblog(
+    return await cursor.get_by_blog_without_reblog(
         context['db'],
         valid_account(author),
         valid_permlink(start_permlink, allow_empty=True),
-        valid_limit(limit, 100))
-    return await load_posts(context['db'], ids)
-
-
-@return_error_info
-@nested_query_compat
-async def get_post_discussions_by_payout(context, start_author: str = '', start_permlink: str = '',
-                                         limit: int = 20, tag: str = None,
-                                         truncate_body: int = 0):
-    """Query top-level posts, sorted by payout."""
-    ids = await cursor.pids_by_query(
-        context['db'],
-        'payout',
-        valid_account(start_author, allow_empty=True),
-        valid_permlink(start_permlink, allow_empty=True),
-        valid_limit(limit, 100),
-        valid_tag(tag, allow_empty=True))
-    return await load_posts(context['db'], ids, truncate_body=truncate_body)
-
-
-@return_error_info
-@nested_query_compat
-async def get_comment_discussions_by_payout(context, start_author: str = '', start_permlink: str = '',
-                                            limit: int = 20, tag: str = None,
-                                            truncate_body: int = 0):
-    """Query comments, sorted by payout."""
-    # pylint: disable=invalid-name
-    ids = await cursor.pids_by_query(
-        context['db'],
-        'payout_comments',
-        valid_account(start_author, allow_empty=True),
-        valid_permlink(start_permlink, allow_empty=True),
-        valid_limit(limit, 100),
-        valid_tag(tag, allow_empty=True))
-    return await load_posts(context['db'], ids, truncate_body=truncate_body)
-
+        valid_limit(limit, 100, 10),
+        valid_truncate(truncate_body))
 
 @return_error_info
 @nested_query_compat
@@ -487,8 +412,40 @@ async def get_blog(context, account: str, start_entry_id: int = 0, limit: int =
     """Get posts for an author's blog (w/ reblogs), paged by index/limit.
 
     Equivalent to get_discussions_by_blog, but uses offset-based pagination.
+
+    Examples: (ABW: old description and examples were misleading as in many cases code worked differently, also now more cases actually work that gave error earlier)
+    (acct, -1, limit) for limit 1..500 - returns latest (no more than) limit posts
+    (acct, 0) - returns latest single post (ABW: this is a bug but I left it here because I'm afraid it was actively used - it should return oldest post)
+    (acct, 0, limit) for limit 1..500 - same as (acct, -1, limit) - see above
+    (acct, last_idx) for positive last_idx - returns last_idx oldest posts, or posts in range [last_idx..last_idx-500) when last_idx >= 500
+    (acct, last_idx, limit) for positive last_idx and limit 1..500 - returns posts in range [last_idx..last_idx-limit)
     """
-    return await _get_blog(context['db'], account, start_entry_id, limit)
+    db = context['db']
+
+    account = valid_account(account)
+    if not start_entry_id:
+        start_entry_id = -1
+    start_entry_id = valid_offset(start_entry_id)
+    if not limit:
+        limit = max(start_entry_id + 1, 1)
+        limit = min(limit, 500)
+    limit = valid_limit(limit, 500, None)
+
+    sql = "SELECT * FROM condenser_get_blog(:account, :last, :limit)"
+    result = await db.query_all(sql, account=account, last=start_entry_id, limit=limit)
+
+    out = []
+    for row in result:
+        row = dict(row)
+        post = _condenser_post_object(row)
+
+        post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.CondenserApi)
+        out.append({"blog": account,
+                    "entry_id": row['entry_id'],
+                    "comment": post,
+                    "reblogged_on": json_date(row['reblogged_at'])})
+
+    return list(reversed(out))
 
 @return_error_info
 @nested_query_compat
@@ -497,49 +454,36 @@ async def get_blog_entries(context, account: str, start_entry_id: int = 0, limit
 
     Interface identical to get_blog, but returns minimalistic post references.
     """
+    db = context['db']
 
-    entries = await _get_blog(context['db'], account, start_entry_id, limit)
-    for entry in entries:
-        # replace the comment body with just author/permlink
-        post = entry.pop('comment')
-        entry['author'] = post['author']
-        entry['permlink'] = post['permlink']
-
-    return entries
-
-async def _get_blog(db, account: str, start_index: int, limit: int = None):
-    """Get posts for an author's blog (w/ reblogs), paged by index/limit.
-
-    Examples:
-    (acct, 2) = returns blog entries 0 up to 2 (3 oldest)
-    (acct, 0) = returns all blog entries (limit 0 means return all?)
-    (acct, 2, 1) = returns 1 post starting at idx 2
-    (acct, 2, 3) = returns 3 posts: idxs (2,1,0)
-    (acct, -1, 10) = returns latest 10 posts
-    """
-
-    if start_index is None:
-        start_index = 0
-
+    account = valid_account(account)
+    if not start_entry_id:
+        start_entry_id = -1
+    start_entry_id = valid_offset(start_entry_id)
     if not limit:
-        limit = start_index + 1
+        limit = max(start_entry_id + 1, 1)
+        limit = min(limit, 500)
+    limit = valid_limit(limit, 500, None)
 
-    start_index, ids = await cursor.pids_by_blog_by_index(
-        db,
-        valid_account(account),
-        valid_offset(start_index),
-        valid_limit(limit, 500))
+    sql = "SELECT * FROM condenser_get_blog_entries(:account, :last, :limit)"
+    result = await db.query_all(sql, account=account, last=start_entry_id, limit=limit)
 
     out = []
-
-    idx = int(start_index)
-    for post in await load_posts(db, ids):
-        reblog = post['author'] != account
-        reblog_on = post['created'] if reblog else "1970-01-01T00:00:00"
+    for row in result:
+        row = dict(row)
         out.append({"blog": account,
-                    "entry_id": idx,
-                    "comment": post,
-                    "reblogged_on": reblog_on})
-        idx -= 1
+                    "entry_id": row['entry_id'],
+                    "author": row['author'],
+                    "permlink": row['permlink'],
+                    "reblogged_on": json_date(row['reblogged_at'])})
+
+    return list(reversed(out))
+
+@return_error_info
+async def get_active_votes(context, author: str, permlink: str):
+    """ Returns all votes for the given post. """
+    valid_account(author)
+    valid_permlink(permlink)
+    db = context['db']
 
-    return out
+    return await find_votes_impl( db, author, permlink, VotesPresentation.ActiveVotes  )
diff --git a/hive/server/condenser_api/objects.py b/hive/server/condenser_api/objects.py
index 725d804bbafae4f4eec459beccd3a9e0511de3c6..5ec38b8883b8fe1efcd5717336fc6bf757764801 100644
--- a/hive/server/condenser_api/objects.py
+++ b/hive/server/condenser_api/objects.py
@@ -3,144 +3,56 @@
 import logging
 import ujson as json
 
-from hive.utils.normalize import sbd_amount, rep_to_raw
-from hive.server.common.mutes import Mutes
-from hive.server.common.helpers import json_date
+from hive.utils.normalize import sbd_amount
+from hive.server.common.helpers import json_date, get_hive_accounts_info_view_query_string
+from hive.server.database_api.methods import find_votes_impl, VotesPresentation
+from hive.utils.account import safe_db_profile_metadata
 
 log = logging.getLogger(__name__)
 
 # Building of legacy account objects
 
-async def load_accounts(db, names):
+async def load_accounts(db, names, lite = False):
     """`get_accounts`-style lookup for `get_state` compat layer."""
-    sql = """SELECT id, name, display_name, about, reputation, vote_weight,
-                    created_at, post_count, profile_image, location, website,
-                    cover_image
-               FROM hive_accounts WHERE name IN :names"""
+    sql = get_hive_accounts_info_view_query_string( names, lite )
     rows = await db.query_all(sql, names=tuple(names))
     return [_condenser_account_object(row) for row in rows]
 
-async def load_posts_reblogs(db, ids_with_reblogs, truncate_body=0):
-    """Given a list of (id, reblogged_by) tuples, return posts w/ reblog key."""
-    post_ids = [r[0] for r in ids_with_reblogs]
-    reblog_by = dict(ids_with_reblogs)
-    posts = await load_posts(db, post_ids, truncate_body=truncate_body)
-
-    # Merge reblogged_by data into result set
-    for post in posts:
-        rby = set(reblog_by[post['post_id']].split(','))
-        rby.discard(post['author'])
-        if rby:
-            post['reblogged_by'] = list(rby)
-
-    return posts
-
-async def load_posts_keyed(db, ids, truncate_body=0):
-    """Given an array of post ids, returns full posts objects keyed by id."""
-    assert ids, 'no ids passed to load_posts_keyed'
-
-    # fetch posts and associated author reps
-    sql = """SELECT post_id, author, permlink, title, body, category, depth,
-                    promoted, payout, payout_at, is_paidout, children, votes,
-                    created_at, updated_at, rshares, raw_json, json
-               FROM hive_posts_cache WHERE post_id IN :ids"""
-    result = await db.query_all(sql, ids=tuple(ids))
-    author_reps = await _query_author_rep_map(db, result)
-
-    muted_accounts = Mutes.all()
-    posts_by_id = {}
-    for row in result:
-        row = dict(row)
-        row['author_rep'] = author_reps[row['author']]
-        post = _condenser_post_object(row, truncate_body=truncate_body)
-        post['active_votes'] = _mute_votes(post['active_votes'], muted_accounts)
-        posts_by_id[row['post_id']] = post
-
-    return posts_by_id
-
-def _mute_votes(votes, muted_accounts):
-    if not muted_accounts:
-        return votes
-    return [v for v in votes if v['voter'] not in muted_accounts]
-
-async def load_posts(db, ids, truncate_body=0):
-    """Given an array of post ids, returns full objects in the same order."""
-    if not ids:
-        return []
-
-    # posts are keyed by id so we can return output sorted by input order
-    posts_by_id = await load_posts_keyed(db, ids, truncate_body=truncate_body)
-
-    # in rare cases of cache inconsistency, recover and warn
-    missed = set(ids) - posts_by_id.keys()
-    if missed:
-        log.info("get_posts do not exist in cache: %s", repr(missed))
-        for _id in missed:
-            ids.remove(_id)
-            sql = ("SELECT id, author, permlink, depth, created_at, is_deleted "
-                   "FROM hive_posts WHERE id = :id")
-            post = await db.query_row(sql, id=_id)
-            if not post['is_deleted']:
-                # TODO: This should never happen. See #173 for analysis
-                log.error("missing post -- %s", dict(post))
-            else:
-                log.info("requested deleted post: %s", dict(post))
-
-    return [posts_by_id[_id] for _id in ids]
-
-async def resultset_to_posts(db, resultset, truncate_body=0):
-    author_reps = await _query_author_rep_map(db, resultset)
-    muted_accounts = Mutes.all()
-
-    posts = []
-    for row in resultset:
-        row = dict(row)
-        row['author_rep'] = author_reps[row['author']]
-        post = _condenser_post_object(row, truncate_body=truncate_body)
-        post['active_votes'] = _mute_votes(post['active_votes'], muted_accounts)
-        posts.append(post)
-
-    return posts
-
-async def _query_author_rep_map(db, posts):
-    """Given a list of posts, returns an author->reputation map."""
-    if not posts:
-        return {}
-    names = tuple({post['author'] for post in posts})
-    sql = "SELECT name, reputation FROM hive_accounts WHERE name IN :names"
-    return {r['name']: r['reputation'] for r in await db.query_all(sql, names=names)}
-
 def _condenser_account_object(row):
     """Convert an internal account record into legacy-steemd style."""
+    #The member `vote_weight` from `hive_accounts` is removed, so currently the member `net_vesting_shares` is equals to zero.
+
+    profile = safe_db_profile_metadata(row['posting_json_metadata'], row['json_metadata'])
+
     return {
         'name': row['name'],
         'created': str(row['created_at']),
         'post_count': row['post_count'],
-        'reputation': rep_to_raw(row['reputation']),
-        'net_vesting_shares': row['vote_weight'],
+        'reputation': row['reputation'],
+        'net_vesting_shares': 0,
         'transfer_history': [],
         'json_metadata': json.dumps({
-            'profile': {'name': row['display_name'],
-                        'about': row['about'],
-                        'website': row['website'],
-                        'location': row['location'],
-                        'cover_image': row['cover_image'],
-                        'profile_image': row['profile_image'],
+            'profile': {'name': profile['name'],
+                        'about': profile['about'],
+                        'website': profile['website'],
+                        'location': profile['location'],
+                        'cover_image': profile['cover_image'],
+                        'profile_image': profile['profile_image'],
                        }})}
 
-def _condenser_post_object(row, truncate_body=0):
-    """Given a hive_posts_cache row, create a legacy-style post object."""
+def _condenser_post_object(row, truncate_body=0, get_content_additions=False):
+    """Given a hive_posts row, create a legacy-style post object."""
     paid = row['is_paidout']
 
-    # condenser#3424 mitigation
-    if not row['category']:
-        row['category'] = 'undefined'
-
+    full_payout = row['pending_payout'] + row['payout'];
     post = {}
-    post['post_id'] = row['post_id']
     post['author'] = row['author']
     post['permlink'] = row['permlink']
-    post['category'] = row['category']
+
+    if not row['category']:
+      post['category'] = 'undefined' # condenser#3424 mitigation
+    else:
+      post['category'] = row['category']
 
     post['title'] = row['title']
     post['body'] = row['body'][0:truncate_body] if truncate_body else row['body']
@@ -150,48 +62,69 @@ def _condenser_post_object(row, truncate_body=0):
     post['last_update'] = json_date(row['updated_at'])
     post['depth'] = row['depth']
     post['children'] = row['children']
-    post['net_rshares'] = row['rshares']
 
     post['last_payout'] = json_date(row['payout_at'] if paid else None)
     post['cashout_time'] = json_date(None if paid else row['payout_at'])
+
     post['total_payout_value'] = _amount(row['payout'] if paid else 0)
     post['curator_payout_value'] = _amount(0)
-    post['pending_payout_value'] = _amount(0 if paid else row['payout'])
+
+    post['pending_payout_value'] = _amount(0 if paid else full_payout)
     post['promoted'] = _amount(row['promoted'])
 
     post['replies'] = []
     post['body_length'] = len(row['body'])
-    post['active_votes'] = _hydrate_active_votes(row['votes'])
-    post['author_reputation'] = rep_to_raw(row['author_rep'])
-
-    # import fields from legacy object
-    assert row['raw_json']
-    assert len(row['raw_json']) > 32
-    raw_json = json.loads(row['raw_json'])
-
-    if row['depth'] > 0:
-        post['parent_author'] = raw_json['parent_author']
-        post['parent_permlink'] = raw_json['parent_permlink']
-    else:
-        post['parent_author'] = ''
-        post['parent_permlink'] = row['category']
-
-    post['url'] = raw_json['url']
-    post['root_title'] = raw_json['root_title']
-    post['beneficiaries'] = raw_json['beneficiaries']
-    post['max_accepted_payout'] = raw_json['max_accepted_payout']
-    post['percent_steem_dollars'] = raw_json['percent_steem_dollars']
-
-    if paid:
-        curator_payout = sbd_amount(raw_json['curator_payout_value'])
+    post['author_reputation'] = row['author_rep']
+
+    post['parent_author'] = row['parent_author']
+    post['parent_permlink'] = row['parent_permlink_or_category']
+
+    post['url'] = row['url']
+    post['root_title'] = row['root_title']
+    post['beneficiaries'] = row['beneficiaries']
+    post['max_accepted_payout'] = row['max_accepted_payout']
+    post['percent_hbd'] = row['percent_hbd']
+
+    if get_content_additions:
+        post['id'] = row['id'] # let's be compatible with old code until this API is supported.
+        post['active'] = json_date(row['active'])
+        post['author_rewards'] = row['author_rewards']
+        post['max_cashout_time'] = json_date(None) # ABW: only relevant up to HF17, timestamp::max for all posts later (and also all paid) 
+        curator_payout = sbd_amount(row['curator_payout_value'])
         post['curator_payout_value'] = _amount(curator_payout)
         post['total_payout_value'] = _amount(row['payout'] - curator_payout)
 
-    # not used by condenser, but may be useful
-    #post['net_votes'] = post['total_votes'] - row['up_votes']
-    #post['allow_replies'] = raw_json['allow_replies']
-    #post['allow_votes'] = raw_json['allow_votes']
-    #post['allow_curation_rewards'] = raw_json['allow_curation_rewards']
+        post['reward_weight'] = 10000
+
+        post['root_author'] = row['root_author']
+        post['root_permlink'] = row['root_permlink']
+
+        post['allow_replies'] = row['allow_replies']
+        post['allow_votes'] = row['allow_votes']
+        post['allow_curation_rewards'] = row['allow_curation_rewards']
+        post['reblogged_by'] = []
+        post['net_votes'] = row['net_votes']
+
+        post['children_abs_rshares'] = 0    # see: hive/server/database_api/objects.py:68
+        post['total_pending_payout_value'] = '0.000 HBD'      # no data
+
+        if paid:
+            post['total_vote_weight'] = 0
+            post['vote_rshares'] = 0
+            post['net_rshares'] = 0
+            post['abs_rshares'] = 0
+        else:
+            post['total_vote_weight'] = row['total_vote_weight']
+            post['vote_rshares'] = ( row['rshares'] + row['abs_rshares'] ) // 2
+            post['net_rshares'] = row['rshares']
+            post['abs_rshares'] = row['abs_rshares']
+    else:
+        post['post_id'] = row['id']
+        post['net_rshares'] = row['rshares']
+        if paid:
+            curator_payout = sbd_amount(row['curator_payout_value'])
+            post['curator_payout_value'] = _amount(curator_payout)
+            post['total_payout_value'] = _amount(row['payout'] - curator_payout)
 
     return post
 
@@ -199,16 +132,3 @@ def _amount(amount, asset='HBD'):
     """Return a steem-style amount string given a (numeric, asset-str)."""
     assert asset == 'HBD', 'unhandled asset %s' % asset
     return "%.3f HBD" % amount
-
-def _hydrate_active_votes(vote_csv):
-    """Convert minimal CSV representation into steemd-style object."""
-    if not vote_csv:
-        return []
-    votes = []
-    for line in vote_csv.split("\n"):
-        voter, rshares, percent, reputation = line.split(',')
-        votes.append(dict(voter=voter,
-                          rshares=rshares,
-                          percent=percent,
-                          reputation=rep_to_raw(reputation)))
-    return votes
diff --git a/hive/server/condenser_api/tags.py b/hive/server/condenser_api/tags.py
index 9a4f5b5b3384a5dd004940dfbddcc163cbc0d5fe..d63cd62e04c77966c4fba052dd81ec800cd84d06 100644
--- a/hive/server/condenser_api/tags.py
+++ b/hive/server/condenser_api/tags.py
@@ -7,16 +7,7 @@ from hive.server.common.helpers import (return_error_info, valid_tag, valid_limi
 @cached(ttl=7200, timeout=1200)
 async def get_top_trending_tags_summary(context):
     """Get top 50 trending tags among pending posts."""
-    # Same results, more overhead:
-    #return [tag['name'] for tag in await get_trending_tags('', 50)]
-    sql = """
-        SELECT category
-          FROM hive_posts_cache
-         WHERE is_paidout = '0'
-      GROUP BY category
-      ORDER BY SUM(payout) DESC
-         LIMIT 50
-    """
+    sql = "SELECT condenser_get_top_trending_tags_summary(50)"
     return await context['db'].query_col(sql)
 
 @return_error_info
@@ -24,34 +15,13 @@ async def get_top_trending_tags_summary(context):
 async def get_trending_tags(context, start_tag: str = '', limit: int = 250):
     """Get top 250 trending tags among pending posts, with stats."""
 
-    limit = valid_limit(limit, ubound=250)
-    start_tag = valid_tag(start_tag or '', allow_empty=True)
+    limit = valid_limit(limit, 250, 250)
+    start_tag = valid_tag(start_tag, allow_empty=True)
 
-    if start_tag:
-        seek = """
-          HAVING SUM(payout) <= (
-            SELECT SUM(payout)
-              FROM hive_posts_cache
-             WHERE is_paidout = '0'
-               AND category = :start_tag)
-        """
-    else:
-        seek = ''
-
-    sql = """
-      SELECT category,
-             COUNT(*) AS total_posts,
-             SUM(CASE WHEN depth = 0 THEN 1 ELSE 0 END) AS top_posts,
-             SUM(payout) AS total_payouts
-        FROM hive_posts_cache
-       WHERE is_paidout = '0'
-    GROUP BY category %s
-    ORDER BY SUM(payout) DESC
-       LIMIT :limit
-    """ % seek
+    sql = "SELECT * FROM condenser_get_trending_tags( (:tag)::VARCHAR, :limit )"
 
     out = []
-    for row in await context['db'].query_all(sql, limit=limit, start_tag=start_tag):
+    for row in await context['db'].query_all(sql, limit=limit, tag=start_tag):
         out.append({
             'name': row['category'],
             'comments': row['total_posts'] - row['top_posts'],
diff --git a/hive/server/database_api/__init__.py b/hive/server/database_api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..609e45795e4f2fd34c029573df6487bd9d366dec
--- /dev/null
+++ b/hive/server/database_api/__init__.py
@@ -0,0 +1 @@
+"""Hive database_api methods and support."""
diff --git a/hive/server/database_api/methods.py b/hive/server/database_api/methods.py
new file mode 100644
index 0000000000000000000000000000000000000000..405da77083772d51331f15630b5be24846771d37
--- /dev/null
+++ b/hive/server/database_api/methods.py
@@ -0,0 +1,241 @@
+# pylint: disable=too-many-arguments,line-too-long,too-many-lines
+from enum import Enum
+
+from hive.server.common.helpers import return_error_info, valid_limit, valid_account, valid_permlink, valid_date
+from hive.server.database_api.objects import database_post_object
+from hive.server.common.helpers import json_date
+from hive.utils.normalize import escape_characters
+
+@return_error_info
+async def list_comments(context, start: list, limit: int = 1000, order: str = None):
+    """Returns all comments, starting with the specified options."""
+
+    supported_order_list = ['by_cashout_time', 'by_permlink', 'by_root', 'by_parent', 'by_last_update', 'by_author_last_update']
+    assert not order is None, "missing a required argument: 'order'"
+    assert order in supported_order_list, "Unsupported order, valid orders: {}".format(", ".join(supported_order_list))
+    limit = valid_limit(limit, 1000, 1000)
+    db = context['db']
+
+    result = []
+    if order == 'by_cashout_time':
+        assert len(start) == 3, "Expecting three arguments in 'start' array: cashout time, optional page start author and permlink"
+        cashout_time = start[0]
+        valid_date(cashout_time)
+        if cashout_time[0:4] == '1969':
+            cashout_time = "infinity"
+        author = start[1]
+        valid_account(author, allow_empty=True)
+        permlink = start[2]
+        valid_permlink(permlink, allow_empty=True)
+        sql = "SELECT * FROM list_comments_by_cashout_time(:cashout_time, :author, :permlink, :limit)"
+        result = await db.query_all(sql, cashout_time=cashout_time, author=author, permlink=permlink, limit=limit)
+    elif order == 'by_permlink':
+        assert len(start) == 2, "Expecting two arguments in 'start' array: author and permlink"
+        author = start[0]
+        assert isinstance(author, str), "invalid account name type"
+        permlink = start[1]
+        assert isinstance(permlink, str), "permlink must be string"
+        sql = "SELECT * FROM list_comments_by_permlink(:author, :permlink, :limit)"
+        result = await db.query_all(sql, author=author, permlink=permlink, limit=limit)
+    elif order == 'by_root':
+        assert len(start) == 4, "Expecting 4 arguments in 'start' array: discussion root author and permlink, optional page start author and permlink"
+        root_author = start[0]
+        valid_account(root_author)
+        root_permlink = start[1]
+        valid_permlink(root_permlink)
+        start_post_author = start[2]
+        valid_account(start_post_author, allow_empty=True)
+        start_post_permlink = start[3]
+        valid_permlink(start_post_permlink, allow_empty=True)
+        sql = "SELECT * FROM list_comments_by_root(:root_author, :root_permlink, :start_post_author, :start_post_permlink, :limit)"
+        result = await db.query_all(sql, root_author=root_author, root_permlink=root_permlink, start_post_author=start_post_author, start_post_permlink=start_post_permlink, limit=limit)
+    elif order == 'by_parent':
+        assert len(start) == 4, "Expecting 4 arguments in 'start' array: parent post author and permlink, optional page start author and permlink"
+        parent_author = start[0]
+        valid_account(parent_author)
+        parent_permlink = start[1]
+        valid_permlink(parent_permlink)
+        start_post_author = start[2]
+        valid_account(start_post_author, allow_empty=True)
+        start_post_permlink = start[3]
+        valid_permlink(start_post_permlink, allow_empty=True)
+        sql = "SELECT * FROM list_comments_by_parent(:parent_author, :parent_permlink, :start_post_author, :start_post_permlink, :limit)"
+        result = await db.query_all(sql, parent_author=parent_author, parent_permlink=parent_permlink, start_post_author=start_post_author, start_post_permlink=start_post_permlink, limit=limit)
+    elif order == 'by_last_update':
+        assert len(start) == 4, "Expecting 4 arguments in 'start' array: parent author, update time, optional page start author and permlink"
+        parent_author = start[0]
+        valid_account(parent_author)
+        updated_at = start[1]
+        valid_date(updated_at)
+        start_post_author = start[2]
+        valid_account(start_post_author, allow_empty=True)
+        start_post_permlink = start[3]
+        valid_permlink(start_post_permlink, allow_empty=True)
+        sql = "SELECT * FROM list_comments_by_last_update(:parent_author, :updated_at, :start_post_author, :start_post_permlink, :limit)"
+        result = await db.query_all(sql, parent_author=parent_author, updated_at=updated_at, start_post_author=start_post_author, start_post_permlink=start_post_permlink, limit=limit)
+    elif order == 'by_author_last_update':
+        assert len(start) == 4, "Expecting 4 arguments in 'start' array: author, update time, optional page start author and permlink"
+        author = start[0]
+        valid_account(author)
+        updated_at = start[1]
+        valid_date(updated_at)
+        start_post_author = start[2]
+        valid_account(start_post_author, allow_empty=True)
+        start_post_permlink = start[3]
+        valid_permlink(start_post_permlink, allow_empty=True)
+        sql = "SELECT * FROM list_comments_by_author_last_update(:author, :updated_at, :start_post_author, :start_post_permlink, :limit)"
+        result = await db.query_all(sql, author=author, updated_at=updated_at, start_post_author=start_post_author, start_post_permlink=start_post_permlink, limit=limit)
+
+    return { "comments": [database_post_object(dict(row)) for row in result] }
+
+@return_error_info
+async def find_comments(context, comments: list):
+    """ Search for comments: limit and order is ignored in hive code """
+    result = []
+
+    assert isinstance(comments, list), "Expected array of author+permlink pairs"
+    assert len(comments) <= 1000, "Parameters count is greather than max allowed (1000)"
+    db = context['db']
+
+    SQL_TEMPLATE = """
+        SELECT
+            hp.id,
+            hp.community_id,
+            hp.author,
+            hp.permlink,
+            hp.title,
+            hp.body,
+            hp.category,
+            hp.depth,
+            hp.promoted,
+            hp.payout,
+            hp.last_payout_at,
+            hp.cashout_time,
+            hp.is_paidout,
+            hp.children,
+            hp.votes,
+            hp.created_at,
+            hp.updated_at,
+            hp.rshares,
+            hp.json,
+            hp.is_hidden,
+            hp.is_grayed,
+            hp.total_votes,
+            hp.net_votes,
+            hp.total_vote_weight,
+            hp.parent_author,
+            hp.parent_permlink_or_category,
+            hp.curator_payout_value,
+            hp.root_author,
+            hp.root_permlink,
+            hp.max_accepted_payout,
+            hp.percent_hbd,
+            hp.allow_replies,
+            hp.allow_votes,
+            hp.allow_curation_rewards,
+            hp.beneficiaries,
+            hp.url,
+            hp.root_title,
+            hp.abs_rshares,
+            hp.active,
+            hp.author_rewards
+        FROM
+            hive_posts_view hp
+        JOIN (VALUES {}) AS t (author, permlink, number) ON hp.author = t.author AND hp.permlink = t.permlink
+        WHERE
+            NOT hp.is_muted
+        ORDER BY t.number
+    """
+
+    idx = 0
+    values = ""
+    for arg in comments:
+        if not isinstance(arg, list) or len(arg) < 2:
+            continue
+        author = arg[0]
+        permlink = arg[1]
+        if not isinstance(author, str) or not isinstance(permlink, str):
+            continue
+        if idx > 0:
+            values += ","
+        values += "({},{},{})".format(escape_characters(author), escape_characters(permlink), idx)
+        idx += 1
+    sql = SQL_TEMPLATE.format(values)
+
+    if idx > 0:
+        rows = await db.query_all(sql)
+        for row in rows:
+            cpo = database_post_object(dict(row))
+            result.append(cpo)
+
+    return { "comments": result }
+
+class VotesPresentation(Enum):
+    ActiveVotes = 1
+    DatabaseApi = 2
+    CondenserApi = 3
+    BridgeApi = 4
+
+def api_vote_info(rows, votes_presentation):
+  ret = []
+  for row in rows:
+      if votes_presentation == VotesPresentation.DatabaseApi:
+          ret.append(dict(id = row.id, voter = row.voter, author = row.author, permlink = row.permlink,
+                          weight = row.weight, rshares = row.rshares, vote_percent = row.percent,
+                          last_update = json_date(row.last_update), num_changes = row.num_changes))
+      elif votes_presentation == VotesPresentation.CondenserApi:
+          ret.append(dict(percent = str(row.percent), reputation = row.reputation,
+                          rshares = row.rshares, voter = row.voter))
+      elif votes_presentation == VotesPresentation.BridgeApi:
+          ret.append(dict(rshares = row.rshares, voter = row.voter))
+      else:
+          ret.append(dict(percent = row.percent, reputation = row.reputation,
+                          rshares = row.rshares, time = json_date(row.last_update),
+                          voter = row.voter, weight = row.weight
+                          ))
+  return ret
+
+@return_error_info
+async def find_votes_impl(db, author: str, permlink: str, votes_presentation, limit: int = 1000):
+    sql = "SELECT * FROM find_votes(:author,:permlink,:limit)"
+    rows = await db.query_all(sql, author=author, permlink=permlink, limit=limit)
+    return api_vote_info(rows, votes_presentation)
+
+@return_error_info
+async def find_votes(context, author: str, permlink: str):
+    """ Returns all votes for the given post """
+    valid_account(author)
+    valid_permlink(permlink)
+    return { 'votes': await find_votes_impl(context['db'], author, permlink, VotesPresentation.DatabaseApi) }
+
+@return_error_info
+async def list_votes(context, start: list, limit: int = 1000, order: str = None):
+    """ Returns all votes, starting with the specified voter and/or author and permlink. """
+    supported_order_list = ["by_comment_voter", "by_voter_comment"]
+    assert not order is None, "missing a required argument: 'order'"
+    assert order in supported_order_list, "Unsupported order, valid orders: {}".format(", ".join(supported_order_list))
+    limit = valid_limit(limit, 1000, 1000)
+    db = context['db']
+
+    if order == "by_voter_comment":
+        assert len(start) == 3, "Expecting 3 arguments in 'start' array: voter, optional page start author and permlink"
+        voter = start[0]
+        valid_account(voter)
+        start_post_author = start[1]
+        valid_account(start_post_author, allow_empty=True)
+        start_post_permlink = start[2]
+        valid_permlink(start_post_permlink, allow_empty=True)
+        sql = "SELECT * FROM list_votes_by_voter_comment(:voter,:author,:permlink,:limit)"
+        rows = await db.query_all(sql, voter=voter, author=start_post_author, permlink=start_post_permlink, limit=limit)
+    else:
+        assert len(start) == 3, "Expecting 3 arguments in 'start' array: post author and permlink, optional page start voter"
+        author = start[0]
+        valid_account(author)
+        permlink = start[1]
+        valid_permlink(permlink)
+        start_voter = start[2]
+        valid_account(start_voter, allow_empty=True)
+        sql = "SELECT * FROM list_votes_by_comment_voter(:voter,:author,:permlink,:limit)"
+        rows = await db.query_all(sql, voter=start_voter, author=author, permlink=permlink, limit=limit)
+    return { 'votes': api_vote_info(rows, VotesPresentation.DatabaseApi) }
+
diff --git a/hive/server/database_api/objects.py b/hive/server/database_api/objects.py
new file mode 100644
index 0000000000000000000000000000000000000000..810093e726731917dec02fcfe04348ba7e6dcb97
--- /dev/null
+++ b/hive/server/database_api/objects.py
@@ -0,0 +1,69 @@
+from hive.server.common.helpers import json_date
+from hive.utils.normalize import sbd_amount, to_nai
+
+def _amount(amount, asset='HBD'):
+    """Return a steem-style amount string given a (numeric, asset-str)."""
+    assert asset == 'HBD', 'unhandled asset %s' % asset
+    return "%.3f HBD" % amount
+
+def database_post_object(row, truncate_body=0):
+    """Given a hive_posts row, create a legacy-style post object."""
+
+    paid = row['is_paidout']
+
+    post = {}
+    post['active'] = json_date(row['active'])
+    post['author_rewards'] = row['author_rewards']
+    post['id'] = row['id']
+    post['author'] = row['author']
+    post['permlink'] = row['permlink']
+    post['category'] = row['category'] if 'category' in row else 'undefined'
+
+    post['title'] = row['title']
+    post['body'] = row['body'][0:truncate_body] if truncate_body else row['body']
+    post['json_metadata'] = row['json']
+
+    post['created'] = json_date(row['created_at'])
+    post['last_update'] = json_date(row['updated_at'])
+    post['depth'] = row['depth']
+    post['children'] = row['children']
+
+    post['last_payout'] = json_date(row['last_payout_at'])
+    post['cashout_time'] = json_date(row['cashout_time'])
+    post['max_cashout_time'] = json_date(None) # ABW: only relevant up to HF17, timestamp::max for all posts later (and also all paid)
+
+    curator_payout = sbd_amount(row['curator_payout_value'])
+    post['curator_payout_value'] = to_nai(_amount(curator_payout))
+    post['total_payout_value'] = to_nai(_amount(row['payout'] - curator_payout))
+
+    post['reward_weight'] = 10000 # ABW: only relevant between HF12 and HF17 and we don't have access to correct value
+
+    post['root_author'] = row['root_author']
+    post['root_permlink'] = row['root_permlink']
+
+    post['allow_replies'] = row['allow_replies']
+    post['allow_votes'] = row['allow_votes']
+    post['allow_curation_rewards'] = row['allow_curation_rewards']
+
+    post['parent_author'] = row['parent_author']
+    post['parent_permlink'] = row['parent_permlink_or_category']
+
+    post['beneficiaries'] = row['beneficiaries']
+    post['max_accepted_payout'] = to_nai(row['max_accepted_payout'])
+    post['percent_hbd'] = row['percent_hbd']
+    post['net_votes'] = row['net_votes']
+
+    if paid:
+        post['total_vote_weight'] = 0
+        post['vote_rshares'] = 0
+        post['net_rshares'] = 0 # if row['rshares'] > 0 else row['rshares'] ABW: used to be like this but after HF19 cashouts disappear and all give 0
+        post['abs_rshares'] = 0
+        post['children_abs_rshares'] = 0
+    else:
+        post['total_vote_weight'] = row['total_vote_weight']
+        post['vote_rshares'] = ( row['rshares'] + row['abs_rshares'] ) // 2 # effectively sum of all positive rshares
+        post['net_rshares'] = row['rshares']
+        post['abs_rshares'] = row['abs_rshares']
+        post['children_abs_rshares'] = 0 # TODO - ABW: I'm not sure about that, it is costly and useless (used to be part of mechanism to determine cashout time)
+
+    return post
diff --git a/hive/server/db.py b/hive/server/db.py
index e7608b4b6b12fb96ec02eeeeff4b67f81ebfd4f3..60ce9391274d8e01a251d820e0cf90410c81bef5 100644
--- a/hive/server/db.py
+++ b/hive/server/db.py
@@ -38,13 +38,20 @@ class Db:
     async def init(self, url):
         """Initialize the aiopg.sa engine."""
         conf = make_url(url)
-        self.db = await create_engine(user=conf.username,
-                                      database=conf.database,
-                                      password=conf.password,
-                                      host=conf.host,
-                                      port=conf.port,
-                                      maxsize=20,
-                                      **conf.query)
+        dsn = {}
+        if conf.username:
+            dsn['user'] = conf.username
+        if conf.database:
+            dsn['database'] = conf.database
+        if conf.password:
+            dsn['password'] = conf.password
+        if conf.host:
+            dsn['host'] = conf.host
+        if conf.port:
+            dsn['port'] = conf.port
+        if 'application_name' not in conf.query:
+            dsn['application_name'] = 'hive_server'
+        self.db = await create_engine(**dsn, maxsize=20, **conf.query)
 
     def close(self):
         """Close pool."""
diff --git a/hive/server/follow_api/__init__.py b/hive/server/follow_api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/hive/server/follow_api/methods.py b/hive/server/follow_api/methods.py
new file mode 100644
index 0000000000000000000000000000000000000000..f75c82c29bdfd0241e1932f4c0e7da70f01fbb95
--- /dev/null
+++ b/hive/server/follow_api/methods.py
@@ -0,0 +1,7 @@
+from hive.server.condenser_api.methods import _get_account_reputations_impl
+from hive.server.common.helpers import return_error_info
+
+@return_error_info
+async def get_account_reputations(context, account_lower_bound: str = '', limit: int = 1000):
+    db = context['db']
+    return await _get_account_reputations_impl(db, False, account_lower_bound, limit)
diff --git a/hive/server/hive_api/common.py b/hive/server/hive_api/common.py
index 17da19005c18222f45273537606a31a6a93b9c0a..fe39d4c82e296e9ac2ae57010f56fee6e483fefa 100644
--- a/hive/server/hive_api/common.py
+++ b/hive/server/hive_api/common.py
@@ -14,26 +14,11 @@ def __used_refs():
 
 async def get_community_id(db, name):
     """Get community id from db."""
-    return await db.query_one("SELECT id FROM hive_communities WHERE name = :name",
-                              name=name)
-
-async def url_to_id(db, url):
-    """Get post_id based on post url."""
-    return await get_post_id(db, *split_url(url))
-
-async def get_post_id(db, author, permlink):
-    """Get post_id based on author/permlink."""
-    sql = "SELECT id FROM hive_posts WHERE author = :a AND permlink = :p"
-    _id = await db.query_one(sql, a=author, p=permlink)
-    assert _id, 'post id not found'
-    return _id
+    return await db.query_one("SELECT find_community_id( (:name)::VARCHAR, True )", name=name)
 
 async def get_account_id(db, name):
     """Get account id from account name."""
-    assert name, 'no account name specified'
-    _id = await db.query_one("SELECT id FROM hive_accounts WHERE name = :n", n=name)
-    assert _id, "account not found: `%s`" % name
-    return _id
+    return await db.query_one("SELECT find_account_id( (:name)::VARCHAR, True )", name=name)
 
 def estimated_sp(vests):
     """Convert VESTS to SP units for display."""
diff --git a/hive/server/hive_api/community.py b/hive/server/hive_api/community.py
index 47963d1ce30661bd7e23a315713acfbed5aa497b..1faf7793d2a4cb6e091a1b01ad41d7c6e649ee9b 100644
--- a/hive/server/hive_api/community.py
+++ b/hive/server/hive_api/community.py
@@ -4,8 +4,8 @@ from datetime import datetime
 from dateutil.relativedelta import relativedelta
 import ujson as json
 
-from hive.server.hive_api.common import (get_account_id, get_community_id, valid_limit)
-from hive.server.common.helpers import return_error_info
+from hive.server.hive_api.common import (get_account_id, get_community_id, valid_account, valid_limit)
+from hive.server.common.helpers import return_error_info, last_month
 
 def days_ago(days):
     """Get the date `n` days ago."""
@@ -35,10 +35,10 @@ async def get_community(context, name, observer=None):
     """
     db = context['db']
     cid = await get_community_id(db, name)
-    assert cid, 'community not found'
     communities = await load_communities(db, [cid], lite=False)
 
     if observer:
+        observer = valid_account(observer)
         observer_id = await get_account_id(db, observer)
         await _append_observer_roles(db, communities, observer_id)
         await _append_observer_subs(db, communities, observer_id)
@@ -49,8 +49,9 @@ async def get_community(context, name, observer=None):
 async def get_community_context(context, name, account):
     """For a community/account: returns role, title, subscribed state"""
     db = context['db']
+    account = valid_account(account)
     cid = await get_community_id(db, name)
-    assert cid, 'community not found'
+
     aid = await get_account_id(db, account)
     assert aid, 'account not found'
 
@@ -70,13 +71,14 @@ async def get_community_context(context, name, account):
 @return_error_info
 async def list_top_communities(context, limit=25):
     """List top communities. Returns lite community list."""
-    assert limit < 100
-    #sql = """SELECT name, title FROM hive_communities
-    #          WHERE rank > 0 ORDER BY rank LIMIT :limit"""
+    limit = valid_limit(limit, 100, 25)
     sql = """SELECT name, title FROM hive_communities
-              WHERE id = 1344247 OR rank > 0
-           ORDER BY (CASE WHEN id = 1344247 THEN 0 ELSE rank END)
-              LIMIT :limit"""
+              WHERE rank > 0 ORDER BY rank LIMIT :limit"""
+    #ABW: restored older version since hardcoded id is out of the question
+    #sql = """SELECT name, title FROM hive_communities
+    #          WHERE id = 1344247 OR rank > 0
+    #       ORDER BY (CASE WHEN id = 1344247 THEN 0 ELSE rank END)
+    #          LIMIT :limit"""
 
     out = await context['db'].query_all(sql, limit=limit)
 
@@ -84,9 +86,9 @@ async def list_top_communities(context, limit=25):
 
 
 @return_error_info
-async def list_pop_communities(context, limit=25):
+async def list_pop_communities(context, limit:int=25):
     """List communities by new subscriber count. Returns lite community list."""
-    limit = valid_limit(limit, 25)
+    limit = valid_limit(limit, 25, 25)
     sql = """SELECT name, title
                FROM hive_communities
                JOIN (
@@ -98,7 +100,7 @@ async def list_pop_communities(context, limit=25):
                  ON stats.community_id = id
            ORDER BY newsubs DESC
               LIMIT :limit"""
-    out = await context['db'].query_all(sql, limit=limit)
+    out = await context['db'].query_all(sql, limit=limit, cutoff=last_month())
 
     return [(r[0], r[1]) for r in out]
 
@@ -107,6 +109,7 @@ async def list_pop_communities(context, limit=25):
 async def list_all_subscriptions(context, account):
     """Lists all communities `account` subscribes to, plus role and title in each."""
     db = context['db']
+    account = valid_account(account)
     account_id = await get_account_id(db, account)
 
     sql = """SELECT c.name, c.title, COALESCE(r.role_id, 0), COALESCE(r.title, '')
@@ -122,7 +125,6 @@ async def list_all_subscriptions(context, account):
 @return_error_info
 async def list_subscribers(context, community):
     """Lists subscribers of `community`."""
-    #limit = valid_limit(limit, 100)
     db = context['db']
     cid = await get_community_id(db, community)
 
@@ -132,7 +134,7 @@ async def list_subscribers(context, community):
                                  AND hs.community_id = hr.community_id
                JOIN hive_accounts ha ON hs.account_id = ha.id
               WHERE hs.community_id = :cid
-           ORDER BY hs.created_at DESC
+           ORDER BY hs.created_at DESC, hs.id ASC
               LIMIT 250"""
     rows = await db.query_all(sql, cid=cid)
     return [(r['name'], ROLES[r['role_id'] or 0], r['title'],
@@ -142,7 +144,7 @@ async def list_subscribers(context, community):
 async def list_communities(context, last='', limit=100, query=None, sort='rank', observer=None):
     """List all communities, paginated. Returns lite community list."""
     # pylint: disable=too-many-arguments, too-many-locals
-    limit = valid_limit(limit, 100)
+    limit = valid_limit(limit, 100, 100)
 
     db = context['db']
     assert sort in ('rank', 'new', 'subs'), 'invalid sort'
@@ -179,6 +181,7 @@ async def list_communities(context, last='', limit=100, query=None, sort='rank',
     # append observer context, leadership data
     communities = await load_communities(db, ids, lite=True)
     if observer:
+        observer = valid_account(observer)
         observer_id = await get_account_id(db, observer)
         await _append_observer_subs(db, communities, observer_id)
         await _append_observer_roles(db, communities, observer_id)
@@ -274,7 +277,7 @@ async def _community_team(db, community_id):
                JOIN hive_accounts a ON r.account_id = a.id
               WHERE r.community_id = :community_id
                 AND r.role_id BETWEEN 4 AND 8
-           ORDER BY r.role_id DESC"""
+           ORDER BY r.role_id DESC, r.account_id DESC"""
     rows = await db.query_all(sql, community_id=community_id)
     return [(r['name'], ROLES[r['role_id']], r['title']) for r in rows]
 
@@ -307,7 +310,7 @@ async def _append_admins(db, communities):
     ids = communities.keys()
     sql = """SELECT community_id, ha.name FROM hive_roles hr
                JOIN hive_accounts ha ON hr.account_id = ha.id
-              WHERE role_id = 6 AND community_id IN :ids"""
+              WHERE role_id = 6 AND community_id IN :ids ORDER BY ha.name"""
     for row in await db.query_all(sql, ids=tuple(ids)):
         _id = row[0]
         if 'admins' not in communities[_id]:
@@ -345,16 +348,25 @@ async def top_community_authors(context, community):
 async def top_community_muted(context, community):
     """Get top authors (by SP) who are muted in a community."""
     db = context['db']
+    cid = await get_community_id(db, community)
     sql = """SELECT a.name, a.voting_weight, r.title FROM hive_accounts a
                JOIN hive_roles r ON a.id = r.account_id
               WHERE r.community_id = :community_id AND r.role_id < 0
            ORDER BY voting_weight DESC LIMIT 5"""
-    return await db.query(sql, community_id=await get_community_id(db, community))
+    return await db.query(sql, community_id=cid)
 
 async def _top_community_posts(db, community, limit=50):
     # TODO: muted equivalent
-    sql = """SELECT author, votes, payout FROM hive_posts_cache
-              WHERE category = :community AND is_paidout = '0'
-                AND post_id IN (SELECT id FROM hive_posts WHERE is_muted = '0')
-           ORDER BY payout DESC LIMIT :limit"""
+    sql = """
+    SELECT ha_a.name as author,
+        0 as votes,
+        ( hp.payout + hp.pending_payout ) as payout
+    FROM hive_posts hp
+    INNER JOIN hive_accounts ha_a ON ha_a.id = hp.author_id
+    LEFT JOIN hive_post_data hpd ON hpd.id = hp.id
+    LEFT JOIN hive_category_data hcd ON hcd.id = hp.category_id
+    WHERE hcd.category = :community AND hp.counter_deleted = 0 AND NOT hp.is_paidout
+        AND post_id IN (SELECT id FROM hive_posts WHERE is_muted = '0')
+    ORDER BY ( hp.payout + hp.pending_payout ) DESC LIMIT :limit"""
+
     return await db.query_all(sql, community=community, limit=limit)
diff --git a/hive/server/hive_api/notify.py b/hive/server/hive_api/notify.py
index 3b1a19e1ac18c5a5d678fb1de7f0d488bfc2b177..9a9e227effe2469774e8fb92cec1719e743a5288 100644
--- a/hive/server/hive_api/notify.py
+++ b/hive/server/hive_api/notify.py
@@ -1,9 +1,9 @@
 """Hive API: Notifications"""
 import logging
 
-from hive.server.common.helpers import return_error_info, json_date
+from hive.server.common.helpers import return_error_info, valid_account, valid_permlink, valid_number, valid_limit, valid_score, json_date
 from hive.indexer.notify import NotifyType
-from hive.server.hive_api.common import get_account_id, valid_limit, get_post_id
+from hive.server.common.mutes import Mutes
 
 log = logging.getLogger(__name__)
 
@@ -22,7 +22,7 @@ STRINGS = {
 
     # personal
     NotifyType.error:          'error: <payload>',
-    NotifyType.reblog:         '<src> resteemed your post',
+    NotifyType.reblog:         '<src> reblogged your post',
     NotifyType.follow:         '<src> followed you',
     NotifyType.reply:          '<src> replied to your post',
     NotifyType.reply_comment:  '<src> replied to your comment',
@@ -43,54 +43,49 @@ STRINGS = {
 async def unread_notifications(context, account, min_score=25):
     """Load notification status for a named account."""
     db = context['db']
-    account_id = await get_account_id(db, account)
-
-    sql = """SELECT lastread_at,
-                    (SELECT COUNT(*) FROM hive_notifs
-                      WHERE dst_id = ha.id
-                        AND score >= :min_score
-                        AND created_at > lastread_at) unread
-               FROM hive_accounts ha
-              WHERE id = :account_id"""
-    row = await db.query_row(sql, account_id=account_id, min_score=min_score)
+    valid_account(account)
+    min_score = valid_score(min_score, 100, 25)
+
+    sql = """SELECT * FROM get_number_of_unread_notifications( :account, (:min_score)::SMALLINT)"""
+    row = await db.query_row(sql, account=account, min_score=min_score)
     return dict(lastread=str(row['lastread_at']), unread=row['unread'])
 
 @return_error_info
 async def account_notifications(context, account, min_score=25, last_id=None, limit=100):
     """Load notifications for named account."""
     db = context['db']
-    limit = valid_limit(limit, 100)
-    account_id = await get_account_id(db, account)
-
-    if account[:5] == 'hive-': min_score = 0
+    valid_account(account)
+    min_score = valid_score(min_score, 100, 25)
+    last_id = valid_number(last_id, 0, "last_id")
+    limit = valid_limit(limit, 100, 100)
 
-    seek = ' AND hn.id < :last_id' if last_id else ''
-    col = 'hn.community_id' if account[:5] == 'hive-' else 'dst_id'
-    sql = _notifs_sql(col + " = :dst_id" + seek)
+    sql_query = "SELECT * FROM account_notifications( (:account)::VARCHAR, (:min_score)::SMALLINT, (:last_id)::BIGINT, (:limit)::SMALLINT )"
 
-    rows = await db.query_all(sql, min_score=min_score, dst_id=account_id,
-                              last_id=last_id, limit=limit)
+    rows = await db.query_all(sql_query, account=account, min_score=min_score, last_id=last_id, limit=limit)
     return [_render(row) for row in rows]
 
 @return_error_info
-async def post_notifications(context, author, permlink, min_score=25, last_id=None, limit=100):
+async def post_notifications(context, author:str, permlink:str, min_score:int=25, last_id:int=None, limit:int=100):
     """Load notifications for a specific post."""
     # pylint: disable=too-many-arguments
     db = context['db']
-    limit = valid_limit(limit, 100)
-    post_id = await get_post_id(db, author, permlink)
+    valid_account(author)
+    valid_permlink(permlink)
+    min_score = valid_score(min_score, 100, 25)
+    last_id = valid_number(last_id, 0, "last_id")
+    limit = valid_limit(limit, 100, 100)
 
-    seek = ' AND hn.id < :last_id' if last_id else ''
-    sql = _notifs_sql("post_id = :post_id" + seek)
+    sql_query = "SELECT * FROM post_notifications( (:author)::VARCHAR, (:permlink)::VARCHAR, (:min_score)::SMALLINT, (:last_id)::BIGINT, (:limit)::SMALLINT )"
 
-    rows = await db.query_all(sql, min_score=min_score, post_id=post_id,
-                              last_id=last_id, limit=limit)
+    rows = await db.query_all(sql_query, author=author, permlink=permlink, min_score=min_score, last_id=last_id, limit=limit)
     return [_render(row) for row in rows]
 
 def _notifs_sql(where):
     sql = """SELECT hn.id, hn.type_id, hn.score, hn.created_at,
                     src.name src, dst.name dst,
-                    hp.author, hp.permlink, hc.name community,
+                    (SELECT name FROM hive_accounts WHERE id = hp.author_id) as author,
+                    (SELECT permlink FROM hive_permlink_data WHERE id = hp.permlink_id) as permlink,
+                    hc.name community,
                     hc.title community_title, payload
                FROM hive_notifs hn
           LEFT JOIN hive_accounts src ON hn.src_id = src.id
@@ -99,7 +94,7 @@ def _notifs_sql(where):
           LEFT JOIN hive_communities hc ON hn.community_id = hc.id
           WHERE %s
             AND score >= :min_score
-            AND COALESCE(hp.is_deleted, False) = False
+            AND COALESCE(hp.counter_deleted, 0) = 0
        ORDER BY hn.id DESC
           LIMIT :limit"""
     return sql % where
@@ -124,10 +119,7 @@ def _render_msg(row):
     msg = STRINGS[row['type_id']]
     payload = row['payload']
     if row['type_id'] == NotifyType.vote and payload:
-        amt = float(payload[1:])
-        if amt >= 0.01:
-            msg += ' (<payload>)'
-            payload = "$%.2f" % amt
+        msg += ' <payload>'
 
     if '<dst>' in msg: msg = msg.replace('<dst>', '@' + row['dst'])
     if '<src>' in msg: msg = msg.replace('<src>', '@' + row['src'])
diff --git a/hive/server/hive_api/objects.py b/hive/server/hive_api/objects.py
index c8c74a37ed8f28951d1f9243b31d60e90b893a2f..560269c8780d9574f34f42a4eb7ef603ad3c370f 100644
--- a/hive/server/hive_api/objects.py
+++ b/hive/server/hive_api/objects.py
@@ -1,6 +1,7 @@
 """Hive API: account, post, and comment object retrieval"""
 import logging
-from hive.server.hive_api.common import get_account_id, estimated_sp
+from hive.server.hive_api.common import get_account_id
+from hive.utils.account import safe_db_profile_metadata
 log = logging.getLogger(__name__)
 
 # Account objects
@@ -9,33 +10,33 @@ log = logging.getLogger(__name__)
 async def accounts_by_name(db, names, observer=None, lite=True):
     """Find and return accounts by `name`."""
 
-    sql = """SELECT id, name, display_name, about, created_at,
-                    vote_weight, rank, followers, following %s
+    sql = """SELECT id, name, created_at,
+                    rank, followers, following, posting_json_metadata, json_metadata
                FROM hive_accounts WHERE name IN :names"""
-    fields = '' if lite else ', location, website, profile_image, cover_image'
-    rows = await db.query_all(sql % fields, names=tuple(names))
+    rows = await db.query_all(sql, names=tuple(names))
 
     accounts = {}
     for row in rows:
+        profile = safe_db_profile_metadata(row['posting_json_metadata'], row['json_metadata'])
         account = {
             'id': row['id'],
             'name': row['name'],
             'created': str(row['created_at']).split(' ')[0],
-            'sp': int(estimated_sp(row['vote_weight'])),
             'rank': row['rank'],
             'followers': row['followers'],
             'following': row['following'],
-            'display_name': row['display_name'],
-            'about': row['about'],
+            'display_name': profile['name'],
+            'about': profile['about'],
         }
         if not lite:
-            account['location'] = row['location']
-            account['website'] = row['website']
-            account['profile_image'] = row['profile_image']
-            account['cover_image'] = row['cover_image']
+            account['location'] = profile['location']
+            account['website'] = profile['website']
+            account['profile_image'] = profile['profile_image']
+            account['cover_image'] = profile['cover_image']
         accounts[account['id']] = account
 
     if observer:
+        observer = valid_account(observer)
         await _follow_contexts(db, accounts,
                                observer_id=await get_account_id(db, observer),
                                include_mute=not lite)
@@ -68,10 +69,24 @@ async def comments_by_id(db, ids, observer=None):
     """Given an array of post ids, returns comment objects keyed by id."""
     assert ids, 'no ids passed to comments_by_id'
 
-    sql = """SELECT post_id, author, permlink, body, depth,
-                    payout, payout_at, is_paidout, created_at, updated_at,
-                    rshares, is_hidden, is_grayed, votes
-               FROM hive_posts_cache WHERE post_id IN :ids""" #votes
+    sql = """
+      SELECT
+        hp.id,
+        hp.author,
+        hp.permlink,
+        hp.body,
+        hp.depth,
+        hp.payout,
+        hp.payout_at,
+        hp.is_paidout,
+        hp.created_at,
+        hp.updated_at,
+        hp.rshares,
+        hp.is_hidden,
+        hp.is_grayed,
+        hp.votes
+      FROM hive_posts_view hp
+      WHERE hp.id IN :ids""" #votes
     result = await db.query_all(sql, ids=tuple(ids))
 
     authors = set()
@@ -79,7 +94,7 @@ async def comments_by_id(db, ids, observer=None):
     for row in result:
         top_votes, observer_vote = _top_votes(row, 5, observer)
         post = {
-            'id': row['post_id'],
+            'id': row['id'],
             'author': row['author'],
             'url': row['author'] + '/' + row['permlink'],
             'depth': row['depth'],
@@ -108,11 +123,26 @@ async def posts_by_id(db, ids, observer=None, lite=True):
     """Given a list of post ids, returns lite post objects in the same order."""
 
     # pylint: disable=too-many-locals
-    sql = """SELECT post_id, author, permlink, title, img_url, payout, promoted,
-                    created_at, payout_at, is_nsfw, rshares, votes,
-                    is_muted, is_invalid, %s
-               FROM hive_posts_cache WHERE post_id IN :ids"""
-    fields = ['preview'] if lite else ['body', 'updated_at', 'json']
+    sql = """
+        SELECT 
+          hp.id,
+          hp.author,
+          hp.permlink,
+          hp.title, 
+          hp.img_url,
+          hp.payout,
+          hp.promoted,
+          hp.created_at,
+          hp.payout_at,
+          hp.is_nsfw,
+          hp.rshares,
+          hp.votes,
+          hp.is_muted,
+          hp.is_valid,
+          %s
+        FROM hive_posts_view hp 
+        WHERE id IN :ids"""
+    fields = ['hp.preview'] if lite else ['hp.body', 'hp.updated_at', 'hp.json']
     sql = sql % (', '.join(fields))
 
     reblogged_ids = await _reblogged_ids(db, observer, ids) if observer else []
@@ -124,8 +154,8 @@ async def posts_by_id(db, ids, observer=None, lite=True):
     by_id = {}
     for row in await db.query_all(sql, ids=tuple(ids)):
         assert not row['is_muted']
-        assert not row['is_invalid']
-        pid = row['post_id']
+        assert row['is_valid']
+        pid = row['id']
         top_votes, observer_vote = _top_votes(row, 5, observer)
 
         obj = {
@@ -158,7 +188,7 @@ async def posts_by_id(db, ids, observer=None, lite=True):
             }
 
         authors.add(obj['author'])
-        by_id[row['post_id']] = obj
+        by_id[row['id']] = obj
 
     # in rare cases of cache inconsistency, recover and warn
     missed = set(ids) - by_id.keys()
@@ -172,8 +202,12 @@ async def posts_by_id(db, ids, observer=None, lite=True):
             'accounts': await accounts_by_name(db, authors, observer, lite=True)}
 
 async def _append_flags(db, posts):
-    sql = """SELECT id, parent_id, community_id, category, is_muted, is_valid
-               FROM hive_posts WHERE id IN :ids"""
+    sql = """
+        SELECT 
+            id, parent_id, community_id,  hcd.category as category, is_muted, is_valid
+        FROM hive_posts hp
+        LEFT JOIN hive_category_data hcd ON hcd.id = hp.category_id
+        WHERE id IN :ids"""
     for row in await db.query_all(sql, ids=tuple(posts.keys())):
         post = posts[row['id']]
         post['parent_id'] = row['parent_id']
@@ -184,9 +218,16 @@ async def _append_flags(db, posts):
     return posts
 
 async def _reblogged_ids(db, observer, post_ids):
-    sql = """SELECT post_id FROM hive_reblogs
-              WHERE account = :observer
-                AND post_id IN :ids"""
+    sql = """
+        SELECT
+            hr.post_id
+        FROM 
+            hive_reblogs hr
+        INNER JOIN hive_accounts ha ON ha.id = hr.blogger_id
+        WHERE
+            ha.name = :observer
+            AND hr.post_id IN :ids
+    """
     return  await db.query_col(sql, observer=observer, ids=tuple(post_ids))
 
 def _top_votes(obj, limit, observer):
diff --git a/hive/server/hive_api/public.py b/hive/server/hive_api/public.py
index 290cf777910345cbc2488d7cd7c75d7264f38548..0009453caec02d25700e01625884f8c2a65590ea 100644
--- a/hive/server/hive_api/public.py
+++ b/hive/server/hive_api/public.py
@@ -7,9 +7,8 @@ from hive.server.hive_api.common import (
     get_account_id, split_url,
     valid_account, valid_permlink, valid_limit)
 from hive.server.condenser_api.cursor import get_followers, get_following
-from hive.server.bridge_api.cursor import (
-    pids_by_blog, pids_by_comments, pids_by_feed_with_reblog)
 
+from hive.db.schema import DB_VERSION as SCHEMA_DB_VERSION
 
 log = logging.getLogger(__name__)
 
@@ -36,75 +35,48 @@ async def get_accounts(context, names, observer=None):
 
 # Follows/mute
 
-async def list_followers(context, account, start='', limit=50, observer=None):
+async def list_followers(context, account:str, start:str='', limit:int=50, observer:str=None):
     """Get a list of all accounts following `account`."""
     followers = await get_followers(
         context['db'],
         valid_account(account),
         valid_account(start, allow_empty=True),
-        'blog', valid_limit(limit, 100))
+        1, # blog
+        valid_limit(limit, 100, 50))
     return await accounts_by_name(context['db'], followers, observer, lite=True)
 
-async def list_following(context, account, start='', limit=50, observer=None):
+async def list_following(context, account:str, start:str='', limit:int=50, observer:str=None):
     """Get a list of all accounts `account` follows."""
     following = await get_following(
         context['db'],
         valid_account(account),
         valid_account(start, allow_empty=True),
-        'blog', valid_limit(limit, 100))
+        1, # blog
+        valid_limit(limit, 100, 50))
     return await accounts_by_name(context['db'], following, observer, lite=True)
 
 async def list_all_muted(context, account):
     """Get a list of all account names muted by `account`."""
     db = context['db']
+    account = valid_account(account)
     sql = """SELECT a.name FROM hive_follows f
                JOIN hive_accounts a ON f.following_id = a.id
               WHERE follower = :follower AND state = 2"""
     return await db.query_col(sql, follower=get_account_id(db, account))
 
-
-# Account post lists
-
-async def list_account_blog(context, account, limit=10, observer=None, last_post=None):
-    """Get a blog feed (posts and reblogs from the specified account)"""
-    db = context['db']
-
-    post_ids = await pids_by_blog(
-        db,
-        valid_account(account),
-        *split_url(last_post, allow_empty=True),
-        valid_limit(limit, 50))
-    return await posts_by_id(db, post_ids, observer)
-
-async def list_account_posts(context, account, limit=10, observer=None, last_post=None):
-    """Get an account's posts and comments"""
+async def get_info(context):
     db = context['db']
-    start_author, start_permlink = split_url(last_post, allow_empty=True)
-    assert not start_author or (start_author == account)
-    post_ids = await pids_by_comments(
-        db,
-        valid_account(account),
-        valid_permlink(start_permlink),
-        valid_limit(limit, 50))
-    return await posts_by_id(db, post_ids, observer)
 
-async def list_account_feed(context, account, limit=10, observer=None, last_post=None):
-    """Get all posts (blogs and resteems) from `account`'s follows."""
-    db = context['db']
-    ids_with_reblogs = await pids_by_feed_with_reblog(
-        context['db'],
-        valid_account(account),
-        *split_url(last_post, allow_empty=True),
-        valid_limit(limit, 50))
+    sql = "SELECT num FROM hive_blocks ORDER BY num DESC LIMIT 1"
+    database_head_block = await db.query_one(sql)
 
-    reblog_by = dict(ids_with_reblogs)
-    post_ids = [r[0] for r in ids_with_reblogs]
-    posts = await posts_by_id(db, post_ids, observer)
+    from hive.version import VERSION, GIT_REVISION
 
-    # Merge reblogged_by data into result set
-    for post in posts:
-        rby = set(reblog_by[post['post_id']].split(','))
-        rby.discard(post['author'])
-        if rby: post['reblogged_by'] = list(rby)
+    ret = {
+        "hivemind_version" : VERSION,
+        "hivemind_git_rev" : GIT_REVISION,
+        "database_schema_version" : SCHEMA_DB_VERSION,
+        "database_head_block" : database_head_block
+    }
 
-    return posts
+    return ret
diff --git a/hive/server/hive_api/stats.py b/hive/server/hive_api/stats.py
index 92977a68ec02e4984d5df9bc73a3ae5ecb7ccf93..bd193ad5d8076278bb99e718916431d531031aa4 100644
--- a/hive/server/hive_api/stats.py
+++ b/hive/server/hive_api/stats.py
@@ -2,7 +2,6 @@
 import logging
 
 from hive.server.common.helpers import return_error_info
-from hive.server.common.payout_stats import PayoutStats
 from hive.server.hive_api.common import valid_limit
 
 log = logging.getLogger(__name__)
@@ -21,14 +20,11 @@ def _row(row):
 async def get_payout_stats(context, limit=250):
     """Get payout stats for building treemap."""
     db = context['db']
-    limit = valid_limit(limit, 250)
-
-    stats = PayoutStats.instance()
-    await stats.generate()
+    limit = valid_limit(limit, 250, 250)
 
     sql = """
         SELECT hc.name, hc.title, author, payout, posts, authors
-          FROM payout_stats
+          FROM payout_stats_view
      LEFT JOIN hive_communities hc ON hc.id = community_id
          WHERE (community_id IS NULL AND author IS NOT NULL)
             OR (community_id IS NOT NULL AND author IS NULL)
@@ -39,10 +35,10 @@ async def get_payout_stats(context, limit=250):
     rows = await db.query_all(sql, limit=limit)
     items = list(map(_row, rows))
 
-    sql = """SELECT SUM(payout) FROM payout_stats WHERE author IS NULL"""
+    sql = """SELECT SUM(payout) FROM payout_stats_view WHERE author IS NULL"""
     total = await db.query_one(sql)
 
-    sql = """SELECT SUM(payout) FROM payout_stats
+    sql = """SELECT SUM(payout) FROM payout_stats_view
               WHERE community_id IS NULL AND author IS NULL"""
     blog_ttl = await db.query_one(sql)
 
diff --git a/hive/server/hive_api/thread.py b/hive/server/hive_api/thread.py
deleted file mode 100644
index 27d98423528e2874e31e9c827ee013d99a155ee1..0000000000000000000000000000000000000000
--- a/hive/server/hive_api/thread.py
+++ /dev/null
@@ -1,134 +0,0 @@
-"""Hive API: Threaded discussion handling"""
-import logging
-
-from hive.server.hive_api.common import url_to_id, valid_comment_sort, valid_limit
-from hive.server.hive_api.objects import comments_by_id
-log = logging.getLogger(__name__)
-
-# pylint: disable=too-many-arguments
-
-async def fetch_tree(context, root, sort='top', limit=20, observer=None):
-    """Fetch comment tree. Includes comments and lite author data.
-
-    If community: follows/applies mod rules
-    If blog: hides comments by any muted accounts of the author's
-    Sort: new, old, hot, payout"""
-    db = context['db']
-    root_id = await url_to_id(db, root)
-    return await _fetch_children(db, root_id, None,
-                                 valid_comment_sort(sort),
-                                 valid_limit(limit, 50),
-                                 observer)
-
-async def fetch_more_children(context, root_id, last_sibling_id, sort='top',
-                              limit=20, observer=None):
-    """Fetch truncated siblings from tree."""
-    db = context['db']
-    return await _fetch_children(db, root_id, last_sibling_id,
-                                 valid_comment_sort(sort),
-                                 valid_limit(limit, 50),
-                                 observer)
-
-_SORTS = dict(hot='sc_hot', top='payout', new='post_id')
-async def _fetch_children(db, root_id, start_id, sort, limit, observer=None):
-    """Fetch truncated children from tree."""
-    mutes = set()
-    field = _SORTS[sort]
-
-    # load id skeleton
-    tree, parent = await _load_tree(db, root_id, mutes, max_depth=3)
-
-    # find most relevant ids in subset
-    seek = ''
-    if start_id:
-        seek = """AND %s < (SELECT %s FROM hive_posts_cache
-                             WHERE post_id = :start_id)""" % (field, field)
-    sql = """SELECT post_id FROM hive_posts_cache
-              WHERE post_id IN :ids %s ORDER BY %s DESC
-              LIMIT :limit""" % (seek, field)
-    relevant_ids = await db.query_col(sql, ids=tuple(parent.keys()),
-                                      start_id=start_id, limit=limit)
-
-    # fill in missing parents
-    for _id in relevant_ids:
-        if _id != root_id:
-            if parent[_id] not in relevant_ids:
-                relevant_ids.append(parent[_id])
-
-    # load objects and assemble response tree
-    comments = await comments_by_id(db, relevant_ids, observer)
-
-    return {'accounts': comments['accounts'],
-            'posts': _build_tree(tree[root_id], tree, comments['posts'], sort_ids=relevant_ids)}
-
-
-def _build_tree(root_ids, tree, comments, sort_ids):
-    # comments is sorted...
-
-    ret = []
-    for root_id in sorted(root_ids, key=sort_ids.index):
-        assert root_id in comments, 'root not loaded'
-        out = comments[root_id]
-        out['type'] = 'comment'
-
-        if root_id in tree:
-            missing = 0
-            loaded_ids = []
-            for cid in tree[root_id]:
-                if cid in comments:
-                    assert not missing, 'missing mode: not expected to find'
-                    loaded_ids.append(cid)
-                else:
-                    missing += 1
-
-            if loaded_ids:
-                out['children'] = _build_tree(loaded_ids, tree, comments, sort_ids)
-            else:
-                out['children'] = []
-            if missing:
-                last_id = loaded_ids[-1] if loaded_ids else None
-                out['children'].append({'type': 'more-children',
-                                        'root_id': root_id,
-                                        'last_id': last_id,
-                                        'count': missing})
-
-        ret.append(out)
-
-    return ret
-
-
-async def _load_tree(db, root_id, muted, max_depth):
-    """Build `ids` list and `tree` map."""
-    parent = {} # only loaded to max_depth
-    tree = {}   # loaded to max_depth + 1
-    todo = [root_id]
-    depth = 0
-    while todo:
-        depth += 1
-        rows = await _child_ids(db, todo, muted)
-        todo = []
-        for pid, cids in rows:
-            tree[pid] = cids
-            todo.extend(cids)
-            if depth <= max_depth:
-                for cid in cids:
-                    parent[cid] = pid
-        if depth > max_depth:
-            break
-
-    return (tree, parent)
-
-async def _child_ids(db, parent_ids, muted):
-    """Load child ids for multiple parent ids."""
-    filt = 'AND author NOT IN :muted' if muted else ''
-    sql = """
-             SELECT parent_id, array_agg(id)
-               FROM hive_posts
-              WHERE parent_id IN :ids
-                AND is_deleted = '0'
-                AND is_muted = '0'
-                AND is_valid = '1' %s
-           GROUP BY parent_id
-    """ % filt
-    rows = await db.query_all(sql, ids=tuple(parent_ids), muted=tuple(muted))
-    return [[row[0], row[1]] for row in rows]
diff --git a/hive/server/serve.py b/hive/server/serve.py
index c83053f749c4fd70b9bb991e3799f1f39bd26558..b8874b92aeda3ba45fc505fb4a6ad2df0854c968 100644
--- a/hive/server/serve.py
+++ b/hive/server/serve.py
@@ -11,12 +11,13 @@ from aiohttp import web
 from jsonrpcserver.methods import Methods
 from jsonrpcserver import async_dispatch as dispatch
 
+import simplejson
+
 from hive.server.condenser_api import methods as condenser_api
 from hive.server.condenser_api.tags import get_trending_tags as condenser_api_get_trending_tags
 from hive.server.condenser_api.get_state import get_state as condenser_api_get_state
 from hive.server.condenser_api.call import call as condenser_api_call
 from hive.server.common.mutes import Mutes
-from hive.server.common.payout_stats import PayoutStats
 
 from hive.server.bridge_api import methods as bridge_api
 from hive.server.bridge_api.thread import get_discussion as bridge_api_get_discussion
@@ -25,11 +26,23 @@ from hive.server.bridge_api.support import get_post_header as bridge_api_get_pos
 from hive.server.hive_api import community as hive_api_community
 from hive.server.hive_api import notify as hive_api_notify
 from hive.server.hive_api import stats as hive_api_stats
+from hive.server.hive_api.public import get_info as hive_api_get_info
+
+from hive.server.follow_api import methods as follow_api
+from hive.server.tags_api import methods as tags_api
+
+from hive.server.database_api import methods as database_api
 
 from hive.server.db import Db
 
 # pylint: disable=too-many-lines
 
+def decimal_serialize(obj):
+    return simplejson.dumps(obj=obj, use_decimal=True)
+
+def decimal_deserialize(s):
+    return simplejson.loads(s=s, use_decimal=True)
+
 async def db_head_state(context):
     """Status/health check."""
     db = context['db']
@@ -49,6 +62,8 @@ def build_methods():
         db_head_state,
     )})
 
+    methods.add(**{'hive.get_info' : hive_api_get_info})
+
     methods.add(**{'condenser_api.' + method.__name__: method for method in (
         condenser_api.get_followers,
         condenser_api.get_following,
@@ -73,6 +88,7 @@ def build_methods():
         condenser_api.get_blog_entries,
         condenser_api.get_account_reputations,
         condenser_api.get_reblogged_by,
+        condenser_api.get_active_votes
     )})
 
     # dummy methods -- serve informational error
@@ -86,16 +102,16 @@ def build_methods():
         'follow_api.get_followers': condenser_api.get_followers,
         'follow_api.get_following': condenser_api.get_following,
         'follow_api.get_follow_count': condenser_api.get_follow_count,
-        'follow_api.get_account_reputations': condenser_api.get_account_reputations,
+        'follow_api.get_account_reputations': follow_api.get_account_reputations,
         'follow_api.get_blog': condenser_api.get_blog,
         'follow_api.get_blog_entries': condenser_api.get_blog_entries,
-        'follow_api.get_reblogged_by': condenser_api.get_reblogged_by,
+        'follow_api.get_reblogged_by': condenser_api.get_reblogged_by
     })
 
     # tags_api aliases
     methods.add(**{
-        'tags_api.get_discussion': condenser_api.get_content,
-        'tags_api.get_content_replies': condenser_api.get_content_replies,
+        'tags_api.get_discussion': tags_api.get_discussion,
+        'tags_api.get_content_replies': tags_api.get_content_replies,
         'tags_api.get_discussions_by_trending': condenser_api.get_discussions_by_trending,
         'tags_api.get_discussions_by_hot': condenser_api.get_discussions_by_hot,
         'tags_api.get_discussions_by_promoted': condenser_api.get_discussions_by_promoted,
@@ -104,7 +120,7 @@ def build_methods():
         'tags_api.get_discussions_by_comments': condenser_api.get_discussions_by_comments,
         'tags_api.get_discussions_by_author_before_date': condenser_api.get_discussions_by_author_before_date,
         'tags_api.get_post_discussions_by_payout': condenser_api.get_post_discussions_by_payout,
-        'tags_api.get_comment_discussions_by_payout': condenser_api.get_comment_discussions_by_payout,
+        'tags_api.get_comment_discussions_by_payout': condenser_api.get_comment_discussions_by_payout
     })
 
     # legacy `call` style adapter
@@ -123,6 +139,8 @@ def build_methods():
         bridge_api.get_profile,
         bridge_api.get_trending_topics,
         bridge_api.get_relationship_between_accounts,
+        bridge_api.get_follow_list,
+        bridge_api.does_user_follow_any_lists,
         hive_api_notify.post_notifications,
         hive_api_notify.account_notifications,
         hive_api_notify.unread_notifications,
@@ -136,6 +154,14 @@ def build_methods():
         hive_api_community.list_all_subscriptions,
     )})
 
+    # database_api methods
+    methods.add(**{
+        'database_api.list_comments' : database_api.list_comments,
+        'database_api.find_comments' : database_api.find_comments,
+        'database_api.list_votes' : database_api.list_votes,
+        'database_api.find_votes' : database_api.find_votes
+    })
+
     return methods
 
 def truncate_response_log(logger):
@@ -168,9 +194,6 @@ def run_server(conf):
     log = logging.getLogger(__name__)
     methods = build_methods()
 
-    mutes = Mutes(conf.get('muted_accounts_url'), conf.get('blacklist_api_url'))
-    Mutes.set_shared_instance(mutes)
-
     app = web.Application()
     app['config'] = dict()
     app['config']['args'] = conf.args()
@@ -182,15 +205,26 @@ def run_server(conf):
         args = app['config']['args']
         app['db'] = await Db.create(args['database_url'])
 
-        stats = PayoutStats(app['db'])
-        stats.set_shared_instance(stats)
-
     async def close_db(app):
         """Teardown db adapter."""
         app['db'].close()
         await app['db'].wait_closed()
 
+    async def show_info(app):
+        sql = "SELECT num FROM hive_blocks ORDER BY num DESC LIMIT 1"
+        database_head_block = await app['db'].query_one(sql)
+
+        from hive.version import VERSION, GIT_REVISION
+        log.info("hivemind_version : %s", VERSION)
+        log.info("hivemind_git_rev : %s", GIT_REVISION)
+
+        from hive.db.schema import DB_VERSION as SCHEMA_DB_VERSION
+        log.info("database_schema_version : %s", SCHEMA_DB_VERSION)
+        
+        log.info("database_head_block : %s", database_head_block)
+
     app.on_startup.append(init_db)
+    app.on_startup.append(show_info)
     app.on_cleanup.append(close_db)
 
     async def head_age(request):
@@ -247,10 +281,29 @@ def run_server(conf):
         """Handles all hive jsonrpc API requests."""
         request = await request.text()
         # debug=True refs https://github.com/bcb/jsonrpcserver/issues/71
-        response = await dispatch(request, methods=methods, debug=True, context=app)
-        if response.wanted:
+        response = None
+        try:
+            response = await dispatch(request, methods=methods, debug=True, context=app, serialize=decimal_serialize, deserialize=decimal_deserialize)
+        except simplejson.errors.JSONDecodeError as ex:
+            # first log exception
+            # TODO: consider removing this log - potential log spam
+            log.exception(ex)
+
+            # create and send error response
+            error_response = {
+                "jsonrpc":"2.0",
+                "error" : {
+                    "code": -32602,
+                    "data": "Invalid JSON in request: " + str(ex),
+                    "message": "Invalid parameters"
+                },
+                "id" : -1
+            }
             headers = {'Access-Control-Allow-Origin': '*'}
-            return web.json_response(response.deserialized(), status=200, headers=headers)
+            return web.json_response(error_response, status=200, headers=headers, dumps=decimal_serialize)
+        if response is not None and response.wanted:
+            headers = {'Access-Control-Allow-Origin': '*'}
+            return web.json_response(response.deserialized(), status=200, headers=headers, dumps=decimal_serialize)
         return web.Response()
 
     if conf.get('sync_to_s3'):
@@ -258,5 +311,36 @@ def run_server(conf):
     app.router.add_get('/.well-known/healthcheck.json', health)
     app.router.add_get('/health', health)
     app.router.add_post('/', jsonrpc_handler)
-
-    web.run_app(app, port=app['config']['args']['http_server_port'])
+    if 'auto_http_server_port' in app['config']['args'] and app['config']['args']['auto_http_server_port'] is not None:
+        log.debug("auto-http-server-port detected in program arguments, http_server_port will be overriden with port from given range")
+        port_range = app['config']['args']['auto_http_server_port']
+        port_range_len = len(port_range)
+        port_from = port_range[0]
+        port_to = port_range[1] if port_range_len == 2 else 65535
+        if port_to > 65535:
+            port_to = 65535
+        if port_from < 1024:
+            port_from = 1024
+
+        import socket
+        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        while port_from <= port_to:
+            try:
+                log.debug("Trying port: {}".format(port_from))
+                sock.bind(('', port_from))
+            except OSError as ex:
+                log.debug("Exception: {}".format(ex))
+                port_from += 1
+            except Exception as ex:
+                # log and rethrow exception
+                log.exception("Exception: {}".format(ex))
+                raise ex
+            else:
+                with open('hivemind.port', 'w') as port_file:
+                    port_file.write("{}\n".format(port_from))
+                web.run_app(app, sock=sock)
+                break
+        if port_from == port_to:
+            raise IOError('No free ports in given range')
+    else:
+        web.run_app(app, port=app['config']['args']['http_server_port'])
diff --git a/hive/server/tags_api/__init__.py b/hive/server/tags_api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e21a4b22e906d06b68cafb38173f99bad13e7d4d
--- /dev/null
+++ b/hive/server/tags_api/__init__.py
@@ -0,0 +1 @@
+""" Tags api """
\ No newline at end of file
diff --git a/hive/server/tags_api/methods.py b/hive/server/tags_api/methods.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c85e9df46cd95fc75dec156b051537fdd04fa42
--- /dev/null
+++ b/hive/server/tags_api/methods.py
@@ -0,0 +1,16 @@
+from hive.server.condenser_api.methods import _get_content_impl, _get_content_replies_impl
+from hive.server.common.helpers import (
+    return_error_info,
+    valid_account,
+    valid_permlink)
+
+@return_error_info
+async def get_discussion(context, author: str, permlink: str, observer=None):
+    db = context['db']
+    return await _get_content_impl(db, False, author, permlink, observer)
+
+@return_error_info
+async def get_content_replies(context, author: str, permlink: str):
+    db = context['db']
+    return await _get_content_replies_impl(db, False, author, permlink)
+
diff --git a/hive/steem/block/schedule.py b/hive/steem/block/schedule.py
index 1958232715702c87de916f21199bfe9e8244eac7..b7226d2743c28c9aa60b2a4fc21437d529b1f243 100644
--- a/hive/steem/block/schedule.py
+++ b/hive/steem/block/schedule.py
@@ -15,13 +15,14 @@ class BlockSchedule:
 
     BLOCK_INTERVAL = 3
 
-    def __init__(self, current_head_block):
+    def __init__(self, current_head_block, do_stale_block_check):
         self._start_block = current_head_block
         self._head_num = current_head_block
         self._next_expected = time() + self.BLOCK_INTERVAL / 2
         self._drift = self.BLOCK_INTERVAL / 2
         self._missed = 0
         self._last_date = None
+        self._do_stale_block_check = do_stale_block_check
 
     def wait_for_block(self, num):
         """Sleep until the requested block is expected to be available.
@@ -70,7 +71,7 @@ class BlockSchedule:
         It's possible a steemd node could fall behind or stop syncing;
         we can identify this case by comparing current time to latest
         received block time."""
-        if num == self._head_num:
+        if self._do_stale_block_check and num == self._head_num:
             gap = int(time() - utc_timestamp(date))
             assert gap > -60, 'system clock is %ds behind chain' % gap
             if gap > 60:
diff --git a/hive/steem/block/stream.py b/hive/steem/block/stream.py
index 02bfe24a40d9b28695442574a23ebead4c5f6656..fff42b508c85113cb4e9d84ac8e0375617c755d6 100644
--- a/hive/steem/block/stream.py
+++ b/hive/steem/block/stream.py
@@ -55,10 +55,10 @@ class BlockStream:
     """ETA-based block streamer."""
 
     @classmethod
-    def stream(cls, client, start_block, min_gap=0, max_gap=100):
+    def stream(cls, client, start_block, min_gap=0, max_gap=100, do_stale_block_check=True):
         """Instantiates a BlockStream and returns a generator."""
         streamer = BlockStream(client, min_gap, max_gap)
-        return streamer.start(start_block)
+        return streamer.start(start_block, do_stale_block_check)
 
     def __init__(self, client, min_gap=0, max_gap=100):
         assert not (min_gap < 0 or min_gap > 100)
@@ -70,7 +70,7 @@ class BlockStream:
         """Ensures gap between curr and head is within limits (max_gap)."""
         return not self._max_gap or head - curr < self._max_gap
 
-    def start(self, start_block):
+    def start(self, start_block, do_stale_block_check):
         """Stream blocks starting from `start_block`.
 
         Will run forever unless `max_gap` is specified and exceeded.
@@ -80,11 +80,16 @@ class BlockStream:
         prev = self._client.get_block(curr - 1)['block_id']
 
         queue = BlockQueue(self._min_gap, prev)
-        schedule = BlockSchedule(head)
+
+        schedule = BlockSchedule(head, do_stale_block_check)
 
         while self._gap_ok(curr, head):
             head = schedule.wait_for_block(curr)
-            block = self._client.get_block(curr, strict=False)
+            block = self._client.get_block(curr)
+
+            #block_num = int(block['block_id'][:8], base=16)
+            #log.info("stream is processing a block %d with timestamp: %s", block_num, block['timestamp'])
+
             schedule.check_block(curr, block)
 
             if not block:
diff --git a/hive/steem/blocks_provider.py b/hive/steem/blocks_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..36d9349c3ea5d1ff10e2d8fa7a74d475b6e474fe
--- /dev/null
+++ b/hive/steem/blocks_provider.py
@@ -0,0 +1,109 @@
+
+from concurrent.futures import ThreadPoolExecutor, as_completed
+import logging
+import queue
+from time import sleep
+import math
+
+from hive.indexer.mock_block_provider import MockBlockProvider
+
+log = logging.getLogger(__name__)
+
+class BlocksProvider:
+    """Starts threads which request node for blocks, and collect responses to one queue"""
+
+    def __init__(cls, http_client, number_of_threads, blocks_per_request, start_block, max_block, breaker):
+        """
+            http_client - object which will ask the node for blocks
+            number_of_threads - how many threads will be used to ask for blocks
+            start_block - block from which the processing starts
+            max_block - last to get block's number
+            breaker - callable object which returns true if processing must be continues
+        """
+
+        assert number_of_threads > 0
+        assert max_block > start_block
+        assert breaker
+        assert http_client
+        assert blocks_per_request >= 1
+
+        cls._responses_queues = []
+        cls._breaker = breaker
+        cls._start_block = start_block
+        cls._max_block = max_block # to inlude upperbound in results
+        cls._http_client = http_client
+        cls._thread_pool = ThreadPoolExecutor(number_of_threads + 1 ) #+1 for a collecting thread
+        cls._number_of_threads = number_of_threads
+        cls._blocks_per_request = blocks_per_request
+
+        # prepare quques and threads
+        for i in range( 0, number_of_threads):
+                cls._responses_queues.append( queue.Queue( maxsize = 50 ) )
+
+
+    def thread_body_get_block( cls, blocks_shift ):
+        for block in range ( cls._start_block + blocks_shift * cls._blocks_per_request, cls._max_block, cls._number_of_threads * cls._blocks_per_request ):
+            if not cls._breaker():
+                return;
+
+            results = []
+            if cls._blocks_per_request > 1:
+                query_param = [{'block_num': i} for i in range( block, min( [ block + cls._blocks_per_request, cls._max_block ] ))]
+                results = cls._http_client.exec( 'get_block', query_param, True )
+            else:
+                query_param = {'block_num': block}
+                results.append(cls._http_client.exec( 'get_block', query_param, False ))
+
+            if results:
+                while cls._breaker():
+                    try:
+                        cls._responses_queues[ blocks_shift ].put( results, True, 1 )
+                        break
+                    except queue.Full:
+                        continue
+
+    def thread_body_blocks_collector( cls, queue_for_blocks ):
+        currently_received_block =  cls._start_block - 1;
+        while cls._breaker():
+            # take in order all blocks from threads queues
+            for blocks_queue in range ( 0, cls._number_of_threads ):
+                if not cls._breaker():
+                    return;
+                while cls._breaker():
+                    try:
+                        blocks = cls._responses_queues[ blocks_queue ].get( True, 1)
+                        cls._responses_queues[ blocks_queue ].task_done()
+                        #split blocks range
+                        for block in blocks:
+                            block_mock = MockBlockProvider.get_block_data(currently_received_block+1, True)
+                            if block_mock is not None:
+                                if 'block' in block:
+                                    block["block"]["transactions"].extend( block_mock["transactions"] )
+                                    block["block"]["transaction_ids"].extend( block_mock["transaction_ids"] )
+                                else:
+                                    block["block"] = block_mock
+                            if not 'block' in 'block': # if block not exists in the node nor moc
+                                continue;
+
+                            while cls._breaker():
+                                try:
+                                    queue_for_blocks.put( block['block'], True, 1 )
+                                    currently_received_block += 1
+                                    if currently_received_block >= (cls._max_block - 1):
+                                        return
+                                    break
+                                except queue.Full:
+                                    continue
+                        break
+                    except queue.Empty:
+                        continue
+
+    def start(cls, queue_for_blocks):
+        futures = []
+        for future_number in range(0, cls._number_of_threads):
+            future = cls._thread_pool.submit( cls.thread_body_get_block, future_number  )
+            futures.append( future )
+
+        future = cls._thread_pool.submit( cls.thread_body_blocks_collector, queue_for_blocks )
+        futures.append( future )
+        return futures
diff --git a/hive/steem/client.py b/hive/steem/client.py
index f8efb7b3cd03a28ce601a5307ef00d6556a0adb4..ef874ea87e440c1130dc3d74ab2b3a21cbd53548 100644
--- a/hive/steem/client.py
+++ b/hive/steem/client.py
@@ -1,5 +1,8 @@
 """Tight and reliable steem API client for hive indexer."""
 
+from hive.indexer.mock_data_provider import MockDataProviderException
+import logging
+
 from time import perf_counter as perf
 from decimal import Decimal
 
@@ -7,6 +10,12 @@ from hive.utils.stats import Stats
 from hive.utils.normalize import parse_amount, steem_amount, vests_amount
 from hive.steem.http_client import HttpClient
 from hive.steem.block.stream import BlockStream
+from hive.steem.blocks_provider import BlocksProvider
+from hive.steem.vops_provider import VopsProvider
+from hive.indexer.mock_block_provider import MockBlockProvider
+from hive.indexer.mock_vops_provider import MockVopsProvider
+
+logger = logging.getLogger(__name__)
 
 class SteemClient:
     """Handles upstream calls to jussi/steemd, with batching and retrying."""
@@ -21,10 +30,11 @@ class SteemClient:
         self._max_workers = max_workers
         self._client = dict()
         for endpoint, endpoint_url in url.items():
-            print("Endpoint {} will be routed to node {}".format(endpoint, endpoint_url))
+            logger.info("Endpoint %s will be routed to node %s" % (endpoint, endpoint_url))
             self._client[endpoint] = HttpClient(nodes=[endpoint_url])
 
-    def get_accounts(self, accounts):
+    def get_accounts(self, acc):
+        accounts = [v for v in acc if v != '']
         """Fetch multiple accounts by name."""
         assert accounts, "no accounts passed to get_accounts"
         assert len(accounts) <= 1000, "max 1000 accounts"
@@ -44,13 +54,9 @@ class SteemClient:
 
     def get_content_batch(self, tuples):
         """Fetch multiple comment objects."""
-        posts = self.__exec_batch('get_content', tuples)
-        # TODO: how are we ensuring sequential results? need to set and sort id.
-        for post in posts: # sanity-checking jussi responses
-            assert 'author' in post, "invalid post: %s" % post
-        return posts
+        raise NotImplementedError("get_content is not implemented in hived")
 
-    def get_block(self, num, strict=True):
+    def get_block(self, num):
         """Fetches a single block.
 
         If the result does not contain a `block` key, it's assumed
@@ -58,19 +64,70 @@ class SteemClient:
         """
         result = self.__exec('get_block', {'block_num': num})
         if 'block' in result:
-            return result['block']
-        elif strict:
-            raise Exception('block %d not available' % num)
+            ret = result['block']
+
+            #logger.info("Found real block %d with timestamp: %s", num, ret['timestamp'])
+
+            MockBlockProvider.set_last_real_block_num_date(num, ret['timestamp'])
+            data = MockBlockProvider.get_block_data(num)
+            if data is not None:
+                ret["transactions"].extend(data["transactions"])
+                ret["transaction_ids"].extend(data["transaction_ids"])
+            return ret
         else:
-            return None
+            # if block does not exist in hived but exist in Mock Provider
+            # return block from block provider
+            mocked_block = MockBlockProvider.get_block_data(num, True)
+            #logger.info("Found real block %d with timestamp: %s", num, mocked_block['timestamp'])
+            return mocked_block
+
+    def get_blocks_provider( cls, lbound, ubound, breaker ):
+        """create and returns blocks provider
+            lbound - start block
+            ubound - end block
+            breaker - callable, returns false when processing must be stopped
+        """
+        new_blocks_provider = BlocksProvider(
+              cls._client["get_block"] if "get_block" in cls._client else cls._client["default"]
+            , cls._max_workers
+            , cls._max_batch
+            , lbound
+            , ubound
+            , breaker
+        )
+        return new_blocks_provider
+
+    def get_vops_provider( cls, conf, lbound, ubound, breaker ):
+        """create and returns blocks provider
+            conf - configuration
+            lbound - start block
+            ubound - end block
+            breaker - callable, returns false when processing must be stopped
+        """
+        new_vops_provider = VopsProvider(
+              conf
+            , cls
+            , cls._max_workers
+            , cls._max_batch
+            , lbound
+            , ubound
+            , breaker
+        )
+        return new_vops_provider
+
 
-    def stream_blocks(self, start_from, trail_blocks=0, max_gap=100):
+    def stream_blocks(self, start_from, trail_blocks=0, max_gap=100, do_stale_block_check=True):
         """Stream blocks. Returns a generator."""
-        return BlockStream.stream(self, start_from, trail_blocks, max_gap)
+        return BlockStream.stream(self, start_from, trail_blocks, max_gap, do_stale_block_check)
 
     def _gdgp(self):
         ret = self.__exec('get_dynamic_global_properties')
         assert 'time' in ret, "gdgp invalid resp: %s" % ret
+        mock_max_block_number = MockBlockProvider.get_max_block_number()
+        if mock_max_block_number > ret['head_block_number']:
+            ret['time'] = MockBlockProvider.get_block_data(mock_max_block_number)['timestamp']
+        ret['head_block_number'] = max([int(ret['head_block_number']), mock_max_block_number])
+        #ret['last_irreversible_block_num'] = max([int(ret['last_irreversible_block_num']), mock_max_block_number])
         return ret
 
     def head_time(self):
@@ -94,7 +151,8 @@ class SteemClient:
                   'confidential_sbd_supply', 'total_reward_fund_steem',
                   'total_reward_shares2']
         for key in unused:
-            del dgpo[key]
+            if key in dgpo:
+                del dgpo[key]
 
         return {
             'dgpo': dgpo,
@@ -104,7 +162,7 @@ class SteemClient:
 
     @staticmethod
     def _get_steem_per_mvest(dgpo):
-        steem = steem_amount(dgpo['total_vesting_fund_steem'])
+        steem = steem_amount(dgpo['total_vesting_fund_hive'])
         mvests = vests_amount(dgpo['total_vesting_shares']) / Decimal(1e6)
         return "%.6f" % (steem / mvests)
 
@@ -112,15 +170,20 @@ class SteemClient:
         # TODO: add latest feed price: get_feed_history.price_history[0]
         feed = self.__exec('get_feed_history')['current_median_history']
         units = dict([parse_amount(feed[k])[::-1] for k in ['base', 'quote']])
-        price = units['HBD'] / units['HIVE']
+        if 'TBD' in units and 'TESTS' in units:
+            price = units['TBD'] / units['TESTS']
+        else:
+            price = units['HBD'] / units['HIVE']
         return "%.6f" % price
 
     def _get_steem_price(self):
         orders = self.__exec('get_order_book', [1])
-        ask = Decimal(orders['asks'][0]['real_price'])
-        bid = Decimal(orders['bids'][0]['real_price'])
-        price = (ask + bid) / 2
-        return "%.6f" % price
+        if orders['asks'] and orders['bids']:
+            ask = Decimal(orders['asks'][0]['real_price'])
+            bid = Decimal(orders['bids'][0]['real_price'])
+            price = (ask + bid) / 2
+            return "%.6f" % price
+        return "0"
 
     def get_blocks_range(self, lbound, ubound):
         """Retrieves blocks in the range of [lbound, ubound)."""
@@ -128,14 +191,101 @@ class SteemClient:
         blocks = {}
 
         batch_params = [{'block_num': i} for i in block_nums]
+        idx = 0
         for result in self.__exec_batch('get_block', batch_params):
-            assert 'block' in result, "result w/o block key: %s" % result
-            block = result['block']
-            num = int(block['block_id'][:8], base=16)
-            blocks[num] = block
+            block_num = batch_params[idx]['block_num']
+            if 'block' in result:
+                block = result['block']
+                num = int(block['block_id'][:8], base=16)
+                assert block_num == num, "Reference block number and block number from result does not match"
+                blocks[num] = block
+                MockBlockProvider.set_last_real_block_num_date(num, block['timestamp'])
+                data = MockBlockProvider.get_block_data(num)
+                if data is not None:
+                    blocks[num]["transactions"].extend(data["transactions"])
+                    blocks[num]["transaction_ids"].extend(data["transaction_ids"])
+            else:
+                blocks[block_num] = MockBlockProvider.get_block_data(block_num, True)
+            idx += 1
 
         return [blocks[x] for x in block_nums]
 
+    def get_virtual_operations(self, block):
+        """ Get virtual ops from block """
+        result = self.__exec('get_ops_in_block', {"block_num":block, "only_virtual":True})
+        tracked_ops = ['author_reward_operation', 'comment_reward_operation', 'effective_comment_vote_operation', 'comment_payout_update_operation', 'ineffective_delete_comment_operation']
+        ret = []
+        result = result['ops'] if 'ops' in result else []
+        for vop in result:
+            if vop['op']['type'] in tracked_ops:
+                ret.append(vop['op'])
+        return ret
+
+    def enum_virtual_ops(self, conf, begin_block, end_block):
+        """ Get virtual ops for range of blocks """
+
+        ret = {}
+
+        from_block = begin_block
+
+        #According to definition of hive::plugins::acount_history::enum_vops_filter:
+
+        author_reward_operation                 = 0x000002
+        comment_reward_operation                = 0x000008
+        effective_comment_vote_operation        = 0x400000
+        comment_payout_update_operation         = 0x000800
+        ineffective_delete_comment_operation    = 0x800000
+
+        tracked_ops_filter = author_reward_operation | comment_reward_operation | effective_comment_vote_operation | comment_payout_update_operation | ineffective_delete_comment_operation
+
+        resume_on_operation = 0
+
+        while from_block < end_block:
+            call_result = self.__exec('enum_virtual_ops', {"block_range_begin":from_block, "block_range_end":end_block
+                , "group_by_block": True, "include_reversible": True, "operation_begin": resume_on_operation, "limit": 1000, "filter": tracked_ops_filter
+            })
+
+            if conf.get('log_virtual_op_calls'):
+                call = """
+                Call enum_virtual_ops:
+                Query: {{"block_range_begin":{}, "block_range_end":{}, "group_by_block": True, "operation_begin": {}, "limit": 1000, "filter": {} }}
+                Response: {}""".format ( from_block, end_block, resume_on_operation, tracked_ops_filter, call_result )
+                logger.info( call )
+
+
+            one_block_ops = {opb["block"] : {"timestamp":opb["timestamp"], "ops":[op["op"] for op in opb["ops"]]} for opb in call_result["ops_by_block"]}
+
+            if one_block_ops:
+                first_block = list(one_block_ops.keys())[0]
+                # if we continue collecting ops from previous iteration
+                if first_block in ret:
+                    ret.update( { first_block : { "timestamp":ret[ first_block ]["timestamp"], "ops":ret[ first_block ]["ops"] + one_block_ops[ first_block ]["ops"]} } )
+                    one_block_ops.pop( first_block, None )
+            ret.update( one_block_ops )
+
+            resume_on_operation = call_result['next_operation_begin'] if 'next_operation_begin' in call_result else 0
+
+            next_block = call_result['next_block_range_begin']
+
+            if next_block == 0:
+                break
+
+            if next_block < begin_block:
+                logger.error( "Next next block nr {} returned by enum_virtual_ops is smaller than begin block {}.".format( next_block, begin_block ) )
+                break
+
+            # Move to next block only if operations from current one have been processed completely.
+            from_block = next_block
+
+        MockVopsProvider.add_mock_vops(ret, begin_block, end_block)
+
+        return ret
+
+    def get_comment_pending_payouts(self, comments):
+        """ Get comment pending payout data """
+        ret = self.__exec('get_comment_pending_payouts', {'comments':comments})
+        return ret['cashout_infos']
+
     def __exec(self, method, params=None):
         """Perform a single steemd call."""
         start = perf()
diff --git a/hive/steem/http_client.py b/hive/steem/http_client.py
index c0bb7ebf99d0bf9d021b0e8318d63ef02fcf4662..5e902b6a647e1041199d20fa7f2a641b3b9049bc 100644
--- a/hive/steem/http_client.py
+++ b/hive/steem/http_client.py
@@ -84,11 +84,12 @@ class HttpClient(object):
     METHOD_API = dict(
         lookup_accounts='condenser_api',
         get_block='block_api',
-        get_content='condenser_api',
-        get_accounts='condenser_api',
         get_order_book='condenser_api',
-        get_feed_history='condenser_api',
+        get_feed_history='database_api',
         get_dynamic_global_properties='database_api',
+        get_comment_pending_payouts='database_api',
+        get_ops_in_block='account_history_api',
+        enum_virtual_ops='account_history_api'
     )
 
     def __init__(self, nodes, **kwargs):
@@ -145,7 +146,8 @@ class HttpClient(object):
         body_data = json.dumps(body, ensure_ascii=False).encode('utf8')
 
         tries = 0
-        while tries < 100:
+        # changed number of tries to 25
+        while tries < 25:
             tries += 1
             secs = -1
             info = None
diff --git a/hive/steem/massive_blocks_data_provider.py b/hive/steem/massive_blocks_data_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..14949432022fa15c2d25209981963f8fe99d0323
--- /dev/null
+++ b/hive/steem/massive_blocks_data_provider.py
@@ -0,0 +1,98 @@
+from hive.steem.blocks_provider import BlocksProvider
+from hive.steem.vops_provider import VopsProvider
+from hive.utils.stats import WaitingStatusManager as WSM
+
+import logging
+import queue
+
+log = logging.getLogger(__name__)
+
+class MassiveBlocksDataProvider:
+    def __init__(
+          cls
+        , conf
+        , node_client
+        , blocks_get_threads
+        , vops_get_threads
+        , number_of_blocks_data_in_one_batch
+        , lbound
+        , ubound
+        , breaker):
+        """
+            conf - configuration
+            node_client - SteemClient
+            blocks_get_threads - number of threads which get blocks from node
+            vops_get_threads - number of threads which get virtual operations from node
+            number_of_blocks_data_in_one_batch - number of blocks which will be asked for the node in one HTTP get
+            lbound - first block to get
+            ubound - last block to get
+            breaker - callable, returns False when processing must be stopped
+        """
+        cls.blocks_provider = BlocksProvider(
+              node_client._client["get_block"] if "get_block" in node_client._client else node_client._client["default"]
+            , blocks_get_threads
+            , number_of_blocks_data_in_one_batch
+            , lbound
+            , ubound
+            , breaker
+        )
+
+        cls.vops_provider = VopsProvider(
+              conf
+            , node_client
+            , vops_get_threads
+            , number_of_blocks_data_in_one_batch
+            , lbound
+            , ubound
+            , breaker
+        )
+
+        cls.vops_queue = queue.Queue( maxsize=10000 )
+        cls.blocks_queue = queue.Queue( maxsize=10000 )
+        cls.breaker = breaker
+
+    def _get_from_queue( cls, data_queue, number_of_elements ):
+        ret = []
+        for element in range( number_of_elements ):
+            if not cls.breaker():
+                break
+            while cls.breaker():
+                try:
+                    ret.append( data_queue.get(True, 1) )
+                    data_queue.task_done()
+                except queue.Empty:
+                    continue
+                break
+        return ret
+
+    def get( cls, number_of_blocks ):
+        """Returns blocks and vops data for next number_of_blocks"""
+        result = { 'vops': [], 'blocks': [] }
+
+        wait_vops_time = WSM.start()
+        if cls.vops_queue.qsize() < number_of_blocks and cls.breaker():
+                 log.info("Awaiting any vops to process...")
+
+        if not cls.vops_queue.empty() or cls.breaker():
+            vops = cls._get_from_queue( cls.vops_queue, number_of_blocks )
+
+            if cls.breaker():
+                assert len( vops ) == number_of_blocks
+                result[ 'vops' ] = vops
+        WSM.wait_stat('block_consumer_vop', WSM.stop(wait_vops_time))
+
+        wait_blocks_time = WSM.start()
+        if  ( cls.blocks_queue.qsize() < number_of_blocks ) and cls.breaker():
+            log.info("Awaiting any block to process...")
+
+        if not cls.blocks_queue.empty() or cls.breaker():
+            result[ 'blocks' ] = cls._get_from_queue( cls.blocks_queue, number_of_blocks )
+        WSM.wait_stat('block_consumer_block', WSM.stop(wait_blocks_time))
+
+        return result
+
+    def start(cls):
+        futures = cls.vops_provider.start( cls.vops_queue )
+        futures.extend( cls.blocks_provider.start( cls.blocks_queue ) )
+
+        return futures
diff --git a/hive/steem/vops_provider.py b/hive/steem/vops_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c841a1655eb8d9ba5fa5751b7fb3430586345ed
--- /dev/null
+++ b/hive/steem/vops_provider.py
@@ -0,0 +1,114 @@
+
+from concurrent.futures import ThreadPoolExecutor, as_completed
+import logging
+import queue
+import math
+from time import sleep
+
+from hive.indexer.mock_block_provider import MockBlockProvider
+
+log = logging.getLogger(__name__)
+
+class VopsProvider:
+    """Starts threads which request node for blocks, and collect responses to one queue"""
+
+    def __init__(cls, conf, client, number_of_threads, blocks_per_request, start_block, max_block, breaker):
+        """
+            conf - configuration
+            steem client - object which will ask the node for blocks
+            number_of_threads - how many threads will be used to ask for blocks
+            start_block - block from which the processing starts
+            max_block - last to get block's number
+            breaker - callable object which returns true if processing must be continues
+        """
+
+        assert conf
+        assert number_of_threads > 0
+        assert max_block > start_block
+        assert breaker
+        assert client
+        assert blocks_per_request >= 1
+
+        cls._conf = conf
+        cls._responses_queues = []
+        cls._breaker = breaker
+        cls._start_block = start_block
+        cls._max_block = max_block # to inlude upperbound in results
+        cls._client = client
+        cls._thread_pool = ThreadPoolExecutor(number_of_threads + 1 ) #+1 for a collecting thread
+        cls._number_of_threads = number_of_threads
+        cls._blocks_per_request = blocks_per_request
+        cls.currently_received_block =  cls._start_block - 1
+
+        # prepare quques and threads
+        for i in range( 0, number_of_threads):
+                cls._responses_queues.append( queue.Queue( maxsize = 50 ) )
+
+
+    def thread_body_get_block( cls, blocks_shift ):
+        for block in range ( cls._start_block + blocks_shift * cls._blocks_per_request, cls._max_block + cls._blocks_per_request, cls._number_of_threads * cls._blocks_per_request ):
+            if not cls._breaker():
+                return;
+
+            results = cls._client.enum_virtual_ops(cls._conf, block, block + cls._blocks_per_request )
+            while cls._breaker():
+                try:
+                    cls._responses_queues[ blocks_shift ].put( results, True, 1 )
+                    break
+                except queue.Full:
+                    continue
+
+    def _fill_queue_with_no_vops(cls, queue_for_vops, number_of_no_vops):
+        for vop in range( 0, number_of_no_vops):
+            while cls._breaker():
+                try:
+                    queue_for_vops.put( [], True, 1 )
+                    cls.currently_received_block += 1
+                    if cls.currently_received_block >= (cls._max_block - 1):
+                        return True
+                    break
+                except queue.Full:
+                    continue
+        return False
+
+    def thread_body_blocks_collector( cls, queue_for_vops ):
+        while cls._breaker():
+            # take in order all vops from threads queues
+            for vops_queue in range ( 0, cls._number_of_threads ):
+                if not cls._breaker():
+                    return;
+                while cls._breaker():
+                    try:
+                        vops = cls._responses_queues[ vops_queue ].get( True, 1)
+                        cls._responses_queues[ vops_queue ].task_done()
+                        #split blocks range
+                        if not vops:
+                            if cls._fill_queue_with_no_vops( queue_for_vops, cls._blocks_per_request ):
+                                return;
+                        else:
+                            for block in vops:
+                                if cls._fill_queue_with_no_vops( queue_for_vops, block - ( cls.currently_received_block + 1 ) ):
+                                    return;
+                                vop = vops[ block ]
+                                while cls._breaker():
+                                    try:
+                                        queue_for_vops.put( vop[ 'ops' ], True, 1 )
+                                        cls.currently_received_block += 1
+                                        if cls.currently_received_block >= (cls._max_block - 1):
+                                            return
+                                        break
+                                    except queue.Full:
+                                        continue
+                        break
+                    except queue.Empty:
+                        continue
+
+    def start(cls, queue_for_vops):
+        futures = []
+        for future_number in range(0, cls._number_of_threads):
+            future = cls._thread_pool.submit( cls.thread_body_get_block, future_number  )
+            futures.append( future )
+
+        future = cls._thread_pool.submit( cls.thread_body_blocks_collector, queue_for_vops )
+        futures.append( future )
+        return futures
diff --git a/hive/utils/account.py b/hive/utils/account.py
index caf38307c378d39473ae3d997d762462679bc768..f7d9a5776cc8ffe0d408617ad88400b1efcd18f9 100644
--- a/hive/utils/account.py
+++ b/hive/utils/account.py
@@ -3,8 +3,40 @@
 import ujson as json
 from hive.utils.normalize import trunc
 
-def safe_profile_metadata(account):
-    """Given an account, return sanitized profile data."""
+def get_profile_str(account):
+    _posting_json_metadata = ""
+    _json_metadata = ""
+
+    if account is not None:
+      if 'posting_json_metadata' in account:
+        _posting_json_metadata = account['posting_json_metadata']
+      if 'json_metadata' in account:
+        _json_metadata = account['json_metadata']
+
+    return ( _posting_json_metadata, _json_metadata )
+
+def get_db_profile(posting_json_metadata, json_metadata):
+    prof = {}
+    json_metadata_is_read = False
+
+    #`posting_json_metadata` should dominate, so at the start is necessary to load `posting_json_metadata`
+    # We can skip `posting_json_metadata` loading when it doesn't exist or content doesn't make any sense(f.e. '' or '{}' )
+    try:
+        if posting_json_metadata is None or len( posting_json_metadata ) <= 2:
+            json_metadata_is_read = True
+            prof = json.loads(json_metadata)['profile']
+        else:
+            prof = json.loads(posting_json_metadata)['profile']
+    except Exception:
+        try:
+            if not json_metadata_is_read:
+                prof = json.loads(json_metadata)['profile']
+        except Exception:
+            prof = {}
+
+    return prof
+
+def get_profile(account):
     prof = {}
 
     try:
@@ -20,20 +52,31 @@ def safe_profile_metadata(account):
         except Exception:
             prof = {}
 
+    return prof
+
+def process_profile(prof):
+    """Returns profile data."""
+
     name = str(prof['name']) if 'name' in prof else None
     about = str(prof['about']) if 'about' in prof else None
     location = str(prof['location']) if 'location' in prof else None
     website = str(prof['website']) if 'website' in prof else None
     profile_image = str(prof['profile_image']) if 'profile_image' in prof else None
     cover_image = str(prof['cover_image']) if 'cover_image' in prof else None
+    blacklist_description = str(prof['blacklist_description']) if 'blacklist_description' in prof else None
+    muted_list_description = str(prof['muted_list_description']) if 'muted_list_description' in prof else None
 
     name = _char_police(name)
     about = _char_police(about)
     location = _char_police(location)
+    blacklist_description = _char_police(blacklist_description)
+    muted_list_description = _char_police(muted_list_description)
 
     name = trunc(name, 20)
     about = trunc(about, 160)
     location = trunc(location, 30)
+    blacklist_description = trunc(blacklist_description, 256)
+    muted_list_description = trunc(muted_list_description, 256)
 
     if name and name[0:1] == '@':
         name = None
@@ -58,8 +101,18 @@ def safe_profile_metadata(account):
         website=website or '',
         profile_image=profile_image or '',
         cover_image=cover_image or '',
+        blacklist_description=blacklist_description or '',
+        muted_list_description=muted_list_description or '',
     )
 
+def safe_db_profile_metadata(posting_json_metadata, json_metadata):
+  prof = get_db_profile(posting_json_metadata, json_metadata)
+  return process_profile(prof)
+
+def safe_profile_metadata(account):
+  prof = get_profile(account)
+  return process_profile(prof)
+
 def _valid_url_proto(url):
     assert url
     return url[0:7] == 'http://' or url[0:8] == 'https://'
diff --git a/hive/utils/communities_rank.py b/hive/utils/communities_rank.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7d2896fd0b6acbff87de22eadc4a47c148ee0a0
--- /dev/null
+++ b/hive/utils/communities_rank.py
@@ -0,0 +1,7 @@
+from hive.db.adapter import Db
+
+DB = Db.instance()
+
+def update_communities_posts_and_rank():
+    sql = "SELECT update_communities_posts_data_and_rank()"
+    DB.query_no_return(sql)
diff --git a/hive/utils/misc.py b/hive/utils/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..f24de47a438ee7a9ea3680e7dbcf75178f26aa66
--- /dev/null
+++ b/hive/utils/misc.py
@@ -0,0 +1,4 @@
+def chunks(lst, n):
+    """Yield successive n-sized chunks from lst."""
+    for i in range(0, len(lst), n):
+        yield lst[i:i + n]
\ No newline at end of file
diff --git a/hive/utils/normalize.py b/hive/utils/normalize.py
index 4c8158535a25522d9889b5fc5b3f2cb2217e9c98..f0a2f61421d6a3c1d09ff0e3dc1e5fe340b2398d 100644
--- a/hive/utils/normalize.py
+++ b/hive/utils/normalize.py
@@ -3,6 +3,7 @@
 import logging
 import math
 import decimal
+
 from datetime import datetime
 from pytz import utc
 import ujson as json
@@ -13,6 +14,91 @@ NAI_MAP = {
     '@@000000037': 'VESTS',
 }
 
+NAI_PRECISION = {
+    '@@000000013': 3,
+    '@@000000021': 3,
+    '@@000000037': 6,
+}
+
+UNIT_NAI = {
+    'HBD' : '@@000000013',
+    'HIVE' : '@@000000021',
+    'VESTS' : '@@000000037'
+}
+
+# convert special chars into their octal formats recognized by sql
+SPECIAL_CHARS = {
+    "\x00" : " ", # nul char cannot be stored in string column (ABW: if we ever find the need to store nul chars we'll need bytea, not text)
+    "\r" : "\\015",
+    "\n" : "\\012",
+    "\v" : "\\013",
+    "\f" : "\\014",
+    "\\" : "\\134",
+    "'" : "\\047",
+    "%" : "\\045",
+    "_" : "\\137",
+    ":" : "\\072"
+}
+
+def to_nai(value):
+    """ Convert various amount notation to nai notation """
+    ret = None
+    if isinstance(value, dict):
+        assert 'amount' in value, "amount not found in dict"
+        assert 'precision' in value, "precision not found in dict"
+        assert 'nai' in value, "nai not found in dict"
+        ret = value
+
+    elif isinstance(value, str):
+        raw_amount, unit = value.split(' ')
+        assert unit in UNIT_NAI, "Unknown unit {}".format(unit)
+        nai = UNIT_NAI[unit]
+        precision = NAI_PRECISION[nai]
+        satoshis = int(decimal.Decimal(raw_amount) * (10**precision))
+        ret = {'amount' : str(satoshis), 'nai' : nai, 'precision' : precision}
+
+    elif isinstance(value, list):
+        satoshis, precision, nai = value
+        assert nai in NAI_MAP, "Unknown NAI {}".format(nai)
+
+    else:
+        raise Exception("Invalid input amount %s" % repr(value))
+    return ret
+
+
+def escape_characters(text):
+    """ Escape special charactes """
+    assert isinstance(text, str), "Expected string got: {}".format(type(text))
+    if len(text.strip()) == 0:
+        return "'" + text + "'"
+
+    ret = "E'"
+
+    for ch in text:
+        if ch in SPECIAL_CHARS:
+            dw = SPECIAL_CHARS[ch]
+            ret = ret + dw
+        else:
+            ordinal = ord(ch)
+            if ordinal <= 0x80 and ch.isprintable():
+                ret = ret + ch
+            else:
+                hexstr = hex(ordinal)[2:]
+                i = len(hexstr)
+                max = 4
+                escaped_value = '\\u'
+                if i > max:
+                    max = 8
+                    escaped_value = '\\U'
+                while i < max:
+                    escaped_value += '0'
+                    i += 1
+                escaped_value += hexstr
+                ret = ret + escaped_value
+
+    ret = ret + "'"
+    return ret
+
 def vests_amount(value):
     """Returns a decimal amount, asserting units are VESTS"""
     return parse_amount(value, 'VESTS')
@@ -49,7 +135,8 @@ def parse_amount(value, expected_unit=None):
         raise Exception("invalid input amount %s" % repr(value))
 
     if expected_unit:
-        assert unit == expected_unit
+# FIXME to be uncommented when payout collection will be corrected
+#        assert unit == expected_unit, "Unexpected unit: %s" % unit
         return dec_amount
 
     return (dec_amount, unit)
@@ -135,7 +222,7 @@ def rep_log10(rep):
     out = _log10(rep)
     out = max(out - 9, 0) * sign  # @ -9, $1 earned is approx magnitude 1
     out = (out * 9) + 25          # 9 points per magnitude. center at 25
-    return round(out, 2)
+    return float(round(out, 2))
 
 def rep_to_raw(rep):
     """Convert a UI-ready rep score back into its approx raw value."""
@@ -180,3 +267,21 @@ def int_log_level(str_log_level):
     if not isinstance(log_level, int):
         raise ValueError('Invalid log level: %s' % str_log_level)
     return log_level
+
+def asset_to_hbd_hive(price, asset):
+    """ Converts hive to hbd and hbd to hive based on price """
+    if asset['nai'] == price['base']['nai']:
+        result = int(asset['amount']) * int(price['quote']['amount']) / int(price['base']['amount'])
+        return {'amount' : result, 'nai' : price['quote']['nai'], 'precision' : price['quote']['precision']}
+    elif asset['nai'] == price['quote']['nai']:
+        result = int(asset['amount']) * int(price['base']['amount']) / int(price['quote']['amount'])
+        return {'amount' : result, 'nai' : price['base']['nai'], 'precision' : price['base']['precision']}
+    raise ValueError("Asset not supported")
+
+def time_string_with_t(time_iso8601):
+    """ Ensures that time in format ISO8601 use 'T' as a data time separator
+
+    Hived serialzie time wit 'T' as a separator. ISO allows for space as a separator
+    and SQL queries may return it.
+    """
+    return str(time_iso8601).replace(" ", "T")
diff --git a/hive/utils/post.py b/hive/utils/post.py
index 0d2b316f94e966c680e4ad78c70e414f170b3327..872d07ff97a2e115c08f8c26684ee9f0959f4b6c 100644
--- a/hive/utils/post.py
+++ b/hive/utils/post.py
@@ -2,11 +2,6 @@
 #pylint: disable=line-too-long,too-many-lines
 
 import re
-import math
-import ujson as json
-from funcy.seqs import first, distinct
-
-from hive.utils.normalize import sbd_amount, rep_log10, safe_img_url, parse_time, utc_timestamp
 
 def mentions(body):
     """Given a post body, return proper @-mentioned account names."""
@@ -21,237 +16,3 @@ def mentions(body):
         '([a-zA-Z0-9][a-zA-Z0-9\\-.]{1,14}[a-zA-Z0-9])'
         '(?![a-z])', body)
     return {grp.lower() for grp in matches}
-
-def post_to_internal(post, post_id, level='insert', promoted=None):
-    """Given a steemd post, build internal representation."""
-    # pylint: disable=bad-whitespace
-
-    #post['category'] = core['category']
-    #post['community_id'] = core['community_id']
-    #post['gray'] = core['is_muted']
-    #post['hide'] = not core['is_valid']
-
-    values = [('post_id', post_id)]
-
-    # immutable; write only once (*edge case: undeleted posts)
-    if level == 'insert':
-        values.extend([
-            ('author',   post['author']),
-            ('permlink', post['permlink']),
-            ('category', post['category']),
-            ('depth',    post['depth'])])
-
-    # always write, unless simple vote update
-    if level in ['insert', 'payout', 'update']:
-        basic = post_basic(post)
-        values.extend([
-            ('community_id',  post['community_id']), # immutable*
-            ('created_at',    post['created']),    # immutable*
-            ('updated_at',    post['last_update']),
-            ('title',         post['title']),
-            ('payout_at',     basic['payout_at']), # immutable*
-            ('preview',       basic['preview']),
-            ('body',          basic['body']),
-            ('img_url',       basic['image']),
-            ('is_nsfw',       basic['is_nsfw']),
-            ('is_declined',   basic['is_payout_declined']),
-            ('is_full_power', basic['is_full_power']),
-            ('is_paidout',    basic['is_paidout']),
-            ('json',          json.dumps(basic['json_metadata'])),
-            ('raw_json',      json.dumps(post_legacy(post))),
-        ])
-
-    # if there's a pending promoted value to write, pull it out
-    if promoted:
-        values.append(('promoted', promoted))
-
-    # update unconditionally
-    payout = post_payout(post)
-    stats = post_stats(post)
-
-    # //--
-    # if community - override fields.
-    # TODO: make conditional (date-based?)
-    assert 'community_id' in post, 'comm_id not loaded'
-    if post['community_id']:
-        stats['hide'] = post['hide']
-        stats['gray'] = post['gray']
-    # //--
-
-    values.extend([
-        ('payout',      payout['payout']),
-        ('rshares',     payout['rshares']),
-        ('votes',       payout['csvotes']),
-        ('sc_trend',    payout['sc_trend']),
-        ('sc_hot',      payout['sc_hot']),
-        ('flag_weight', stats['flag_weight']),
-        ('total_votes', stats['total_votes']),
-        ('up_votes',    stats['up_votes']),
-        ('is_hidden',   stats['hide']),
-        ('is_grayed',   stats['gray']),
-        ('author_rep',  stats['author_rep']),
-        ('children',    min(post['children'], 32767)),
-    ])
-
-    return values
-
-
-def post_basic(post):
-    """Basic post normalization: json-md, tags, and flags."""
-    md = {}
-    # At least one case where jsonMetadata was double-encoded: condenser#895
-    # jsonMetadata = JSON.parse(jsonMetadata);
-    try:
-        md = json.loads(post['json_metadata'])
-        if not isinstance(md, dict):
-            md = {}
-    except Exception:
-        pass
-
-    thumb_url = ''
-    if md and 'image' in md:
-        if md['image']:
-            if not isinstance(md['image'], list):
-                md['image'] = [md['image']]
-            md['image'] = list(filter(None, map(safe_img_url, md['image'])))
-        if md['image']:
-            thumb_url = md['image'][0]
-        else:
-            del md['image']
-
-    # clean up tags, check if nsfw
-    tags = [post['category']]
-    # if (typeof tags == 'string') tags = tags.split(' '); # legacy condenser compat
-    if md and 'tags' in md and isinstance(md['tags'], list):
-        tags = tags + md['tags']
-    tags = map(lambda tag: (str(tag) or '').strip('# ').lower()[:32], tags)
-    tags = filter(None, tags)
-    tags = list(distinct(tags))[:5]
-    is_nsfw = 'nsfw' in tags
-
-    body = post['body']
-    if body.find('\x00') > -1:
-        #url = post['author'] + '/' + post['permlink']
-        body = body.replace('\x00', '[NUL]')
-
-    # payout date is last_payout if paid, and cashout_time if pending.
-    is_paidout = (post['cashout_time'][0:4] == '1969')
-    payout_at = post['last_payout'] if is_paidout else post['cashout_time']
-
-    # payout is declined if max_payout = 0, or if 100% is burned
-    is_payout_declined = False
-    if sbd_amount(post['max_accepted_payout']) == 0:
-        is_payout_declined = True
-    elif len(post['beneficiaries']) == 1:
-        benny = first(post['beneficiaries'])
-        if benny['account'] == 'null' and int(benny['weight']) == 10000:
-            is_payout_declined = True
-
-    # payout entirely in SP
-    is_full_power = int(post['percent_steem_dollars']) == 0
-
-    return {
-        'json_metadata': md,
-        'image': thumb_url,
-        'tags': tags,
-        'is_nsfw': is_nsfw,
-        'body': body,
-        'preview': body[0:1024],
-
-        'payout_at': payout_at,
-        'is_paidout': is_paidout,
-        'is_payout_declined': is_payout_declined,
-        'is_full_power': is_full_power,
-    }
-
-def post_legacy(post):
-    """Return legacy fields which may be useful to save.
-
-    Some UI's may want to leverage these, but no point in indexing.
-    """
-    _legacy = ['id', 'url', 'root_comment', 'root_author', 'root_permlink',
-               'root_title', 'parent_author', 'parent_permlink',
-               'max_accepted_payout', 'percent_steem_dollars',
-               'curator_payout_value', 'allow_replies', 'allow_votes',
-               'allow_curation_rewards', 'beneficiaries']
-    return {k: v for k, v in post.items() if k in _legacy}
-
-def post_payout(post):
-    """Get current vote/payout data and recalculate trend/hot score."""
-    # total payout (completed and/or pending)
-    payout = sum([
-        sbd_amount(post['total_payout_value']),
-        sbd_amount(post['curator_payout_value']),
-        sbd_amount(post['pending_payout_value']),
-    ])
-
-    # `active_votes` was temporarily missing in dev -- ensure this condition
-    # is caught ASAP. if no active_votes then rshares MUST be 0. ref: steem#2568
-    assert post['active_votes'] or int(post['net_rshares']) == 0
-
-    # get total rshares, and create comma-separated vote data blob
-    rshares = sum(int(v['rshares']) for v in post['active_votes'])
-    csvotes = "\n".join(map(_vote_csv_row, post['active_votes']))
-
-    # trending scores
-    _timestamp = utc_timestamp(parse_time(post['created']))
-    sc_trend = _score(rshares, _timestamp, 240000)
-    sc_hot = _score(rshares, _timestamp, 10000)
-
-    return {
-        'payout': payout,
-        'rshares': rshares,
-        'csvotes': csvotes,
-        'sc_trend': sc_trend,
-        'sc_hot': sc_hot
-    }
-
-def _vote_csv_row(vote):
-    """Convert a vote object into minimal CSV line."""
-    rep = rep_log10(vote['reputation'])
-    return "%s,%s,%s,%s" % (vote['voter'], vote['rshares'], vote['percent'], rep)
-
-def _score(rshares, created_timestamp, timescale=480000):
-    """Calculate trending/hot score.
-
-    Source: calculate_score - https://github.com/steemit/steem/blob/8cd5f688d75092298bcffaa48a543ed9b01447a6/libraries/plugins/tags/tags_plugin.cpp#L239
-    """
-    mod_score = rshares / 10000000.0
-    order = math.log10(max((abs(mod_score), 1)))
-    sign = 1 if mod_score > 0 else -1
-    return sign * order + created_timestamp / timescale
-
-def post_stats(post):
-    """Get post statistics and derived properties.
-
-    Source: contentStats - https://github.com/steemit/condenser/blob/master/src/app/utils/StateFunctions.js#L109
-    """
-    neg_rshares = 0
-    total_votes = 0
-    up_votes = 0
-    for vote in post['active_votes']:
-        rshares = int(vote['rshares'])
-
-        if rshares == 0:
-            continue
-
-        total_votes += 1
-        if rshares > 0: up_votes += 1
-        if rshares < 0: neg_rshares += rshares
-
-    # take negative rshares, divide by 2, truncate 10 digits (plus neg sign),
-    #   and count digits. creates a cheap log10, stake-based flag weight.
-    #   result: 1 = approx $400 of downvoting stake; 2 = $4,000; etc
-    flag_weight = max((len(str(int(neg_rshares / 2))) - 11, 0))
-
-    author_rep = rep_log10(post['author_reputation'])
-    has_pending_payout = sbd_amount(post['pending_payout_value']) >= 0.02
-
-    return {
-        'hide': author_rep < 0 and not has_pending_payout,
-        'gray': author_rep < 1,
-        'author_rep': author_rep,
-        'flag_weight': flag_weight,
-        'total_votes': total_votes,
-        'up_votes': up_votes
-    }
diff --git a/hive/utils/post_active.py b/hive/utils/post_active.py
new file mode 100644
index 0000000000000000000000000000000000000000..86a272aca2d10bf5023db0230cf311ccb82c7dab
--- /dev/null
+++ b/hive/utils/post_active.py
@@ -0,0 +1,60 @@
+from hive.db.adapter import Db
+from hive.utils.timer import time_it
+
+DB = Db.instance()
+"""
+There are three cases when 'active' field in post is updated:
+1) when a descendant post comment was added (recursivly on any depth)
+2) when a descendant post comment was deleted (recursivly on any depth)
+3) when the post is updated - that one only updates that post active (not here)
+
+It means that, when the comment for posts is updated then its 'active' field
+does not propagate for its ancestors.
+"""
+
+update_active_sql = """
+    WITH RECURSIVE parent_posts ( parent_id, post_id, intrusive_active ) AS (
+      SELECT
+        hp1.parent_id as parent_id,
+        hp1.id as post_id,
+        CASE WHEN hp1.counter_deleted > 0 THEN hp1.active
+        ELSE hp1.created_at
+        END as intrusive_active
+      FROM hive_posts hp1
+      WHERE hp1.depth > 0 {}
+      UNION
+      SELECT
+        hp2.parent_id as parent_id,
+        hp2.id as post_id,
+        max_time_stamp(
+          CASE WHEN hp2.counter_deleted > 0 THEN hp2.active
+          ELSE hp2.created_at
+          END
+          , pp.intrusive_active
+        ) as intrusive_active
+      FROM parent_posts pp
+      JOIN hive_posts hp2 ON pp.parent_id = hp2.id
+      WHERE hp2.depth > 0
+    )
+    UPDATE
+      hive_posts
+    SET
+      active = new_active
+    FROM
+    (
+      SELECT hp.id as post_id, max_time_stamp( hp.active, MAX(pp.intrusive_active) ) as new_active
+      FROM parent_posts pp
+      JOIN hive_posts hp ON pp.parent_id = hp.id GROUP BY hp.id
+    ) as dataset
+    WHERE dataset.post_id = hive_posts.id;
+    """
+
+def update_all_posts_active():
+    DB.query_no_return(update_active_sql.format( "AND ( hp1.children = 0 )" ))
+
+@time_it
+def update_active_starting_from_posts_on_block( first_block_num, last_block_num ):
+    if first_block_num == last_block_num:
+            DB.query_no_return(update_active_sql.format( "AND hp1.block_num = {}" ).format(first_block_num) )
+            return
+    DB.query_no_return(update_active_sql.format( "AND hp1.block_num >= {} AND hp1.block_num <= {}" ).format(first_block_num, last_block_num) )
diff --git a/hive/utils/profiler.py b/hive/utils/profiler.py
index 809e9b9c7ba9e0e55340a6818ce350a9b40b36c7..22c59a4b1f8754b47fa35f027b4703e61aa795ab 100755
--- a/hive/utils/profiler.py
+++ b/hive/utils/profiler.py
@@ -1,4 +1,4 @@
-#!/usr/local/bin/python3
+#!/usr/bin/env python3
 """Hive profiling tools"""
 
 import cProfile
diff --git a/hive/utils/stats.py b/hive/utils/stats.py
index fc7d3a5543196e5d08658efeb25726a3b5a14a8a..e37daa480f4ed624f9329eec2bc29b535a327f03 100644
--- a/hive/utils/stats.py
+++ b/hive/utils/stats.py
@@ -3,11 +3,338 @@
 import atexit
 import logging
 
+from queue import Queue
 from time import perf_counter as perf
 from hive.utils.system import colorize, peak_usage_mb
+from psutil import pid_exists
+from os import getpid
 
 log = logging.getLogger(__name__)
 
+class BroadcastObject:
+    def __init__(self, category : str, value, unit):
+        self.category = category
+        self.value = value
+        self.unit = unit
+
+    def name(self):
+        ret = ""
+        for c in self.category:
+            if c.isalnum():
+                ret += c
+            else:
+                ret += "_"
+        return f"hivemind_{ret}"
+
+    def debug(self):
+        log.debug(f"{self.name()}_{self.unit}: {self.value :.2f}")
+
+    def __repr__(self):
+        return self.__str__()
+    
+    def __str__(self):
+        return str(self.__dict__)
+
+class PrometheusClient:
+
+    deamon = None
+    logs_to_broadcast = Queue()
+
+    @staticmethod
+    def work( port, pid ):
+        try:
+            import prometheus_client as prom
+            prom.start_http_server(port)
+
+            gauges = {}
+
+            while pid_exists(pid):
+                value : BroadcastObject = PrometheusClient.logs_to_broadcast.get()
+                value.debug()
+                value_name = value.name()
+
+                if value_name not in gauges.keys():
+                    gauge = prom.Gauge(value_name, '', unit=value.unit)
+                    gauge.set(value.value)
+                    gauges[value_name] = gauge
+                else:
+                    gauges[value_name].set(value.value)
+
+        except Exception as e:
+            log.error(f"Prometheus logging failed. Exception\n {e}")
+
+    def __init__(self, port):
+        if port is None:
+            return
+        else:
+            port = int(port)
+        if PrometheusClient.deamon is None:
+            try:
+                import prometheus_client
+            except ImportError:
+                log.warn("Failed to import prometheus client. Online stats disabled")
+                return
+            from threading import Thread
+            deamon = Thread(target=PrometheusClient.work, args=[ port, getpid() ], daemon=True)
+            deamon.start()
+
+    @staticmethod
+    def broadcast(obj):
+        if type(obj) == type(list()):
+            for v in obj:
+                PrometheusClient.broadcast(v)
+        elif type(obj) == type(BroadcastObject('', '', '')):
+            PrometheusClient.logs_to_broadcast.put(obj)
+        else:
+            raise Exception(f"Not expexcted type. Should be list or BroadcastObject, but: {type(obj)} given")
+
+class Stat:
+    def __init__(self, time):
+        self.time = time
+
+    def update(self, other):
+        assert type(self) == type(other)
+        attributes = self.__dict__
+        oatte = other.__dict__
+        for key, val in attributes.items():
+            setattr(self, key, oatte[key] + val)
+        return self
+
+    def __repr__(self):
+        return self.__dict__
+
+    def __lt__(self, other):
+        return self.time < other.time
+
+    def broadcast(self, name):
+        return BroadcastObject(name, self.time, 's')
+
+class StatusManager:
+
+    # Fully abstract class
+    def __init__(self):
+        assert False
+
+    @staticmethod
+    def start():
+        return perf()
+
+    @staticmethod
+    def stop( start : float ):
+        return perf() - start
+
+    @staticmethod
+    def merge_dicts(od1, od2, broadcast : bool = False, total_broadcast : bool = True):
+        if od2 is not None:
+            for k, v in od2.items():
+                if k in od1:
+                    od1[k].update(v)
+                else:
+                    od1[k] = v
+                
+                if broadcast:
+                    PrometheusClient.broadcast(v.broadcast(k))
+
+                if total_broadcast:
+                    PrometheusClient.broadcast( od1[k].broadcast( f"{k}_total" ) )
+
+        return od1
+
+    @staticmethod
+    def log_dict(col : dict) -> float:
+        sorted_stats = sorted(col.items(), key=lambda kv: kv[1], reverse=True)
+        measured_time = 0.0
+        for (k, v) in sorted_stats:
+            log.info("`{}`: {}".format(k, v))
+            measured_time += v.time
+        return measured_time
+
+    @staticmethod
+    def print_row():
+        log.info("#" * 20)
+
+class OPStat(Stat):
+    def __init__(self, time, count):
+        super().__init__(time)
+        self.count = count
+
+    def __str__(self):
+        return f"Processed {self.count :.0f} times in {self.time :.5f} seconds"
+
+    def broadcast(self, name : str):
+        n = name.lower()
+        if not n.endswith('operation'):
+            n = f"{n}_operation"
+        return list([ super().broadcast(n), BroadcastObject(n + "_count", self.count, 'b') ])
+
+class OPStatusManager(StatusManager):
+    # Summary for whole sync
+    global_stats = {}
+
+    # Currently processed blocks stats, merged to global stats, after `next_block`
+    cpbs = {}
+
+    @staticmethod
+    def op_stats( name, time, processed = 1 ):
+        if name in OPStatusManager.cpbs.keys():
+            OPStatusManager.cpbs[name].time += time
+            OPStatusManager.cpbs[name].count += processed
+        else:
+            OPStatusManager.cpbs[name] = OPStat(time, processed)
+
+    @staticmethod
+    def next_blocks():
+        OPStatusManager.global_stats = StatusManager.merge_dicts(
+            OPStatusManager.global_stats, 
+            OPStatusManager.cpbs,
+            True
+        )
+        OPStatusManager.cpbs.clear()
+
+    @staticmethod
+    def log_global(label : str):
+        StatusManager.print_row()
+        log.info(label)
+        tm = StatusManager.log_dict(OPStatusManager.global_stats)
+        log.info(f"Total time for processing operations time: {tm :.4f}s.")
+        return tm
+
+
+    @staticmethod
+    def log_current(label : str):
+        StatusManager.print_row()
+        log.info(label)
+        tm = StatusManager.log_dict(OPStatusManager.cpbs)
+        log.info(f"Current time for processing operations time: {tm :.4f}s.")
+        return tm
+
+class FlushStat(Stat):
+    def __init__(self, time, pushed):
+        super().__init__(time)
+        self.pushed = pushed
+
+    def __str__(self):
+        return f"Pushed {self.pushed :.0f} records in {self.time :.4f} seconds"
+
+    def broadcast(self, name : str):
+        n = f"flushing_{name.lower()}"
+        return list([ super().broadcast(n), BroadcastObject(n + "_items", self.pushed, 'b') ])
+
+class FlushStatusManager(StatusManager):
+    # Summary for whole sync
+    global_stats = {}
+
+    # Currently processed blocks stats, merged to global stats, after `next_block`
+    current_flushes = {}
+
+    @staticmethod
+    def flush_stat(name, time, pushed):
+        if name in FlushStatusManager.current_flushes.keys():
+            FlushStatusManager.current_flushes[name].time += time
+            FlushStatusManager.current_flushes[name].pushed += pushed
+        else:
+            FlushStatusManager.current_flushes[name] = FlushStat(time, pushed)
+
+    @staticmethod
+    def next_blocks():
+        FlushStatusManager.global_stats = StatusManager.merge_dicts(
+            FlushStatusManager.global_stats, 
+            FlushStatusManager.current_flushes,
+            True
+        )
+        FlushStatusManager.current_flushes.clear()
+
+    @staticmethod
+    def log_global(label : str):
+        StatusManager.print_row()
+        log.info(label)
+        tm = StatusManager.log_dict(FlushStatusManager.global_stats)
+        log.info(f"Total flushing time: {tm :.4f}s.")
+        return tm
+
+    @staticmethod
+    def log_current(label : str):
+        StatusManager.print_row()
+        log.info(label)
+        tm = StatusManager.log_dict(FlushStatusManager.current_flushes)
+        log.info(f"Current flushing time: {tm :.4f}s.")
+        return tm
+
+class WaitStat(Stat):
+    def __init__(self, time):
+        super().__init__(time)
+
+    def __str__(self):
+        return f"Waited {self.time :.4f} seconds"
+
+class WaitingStatusManager(StatusManager):
+    # Summary for whole sync
+    global_stats = {}
+
+    # Currently processed blocks stats, merged to global stats, after `next_block`
+    current_waits = {}
+
+    @staticmethod
+    def wait_stat(name, time):
+        if name in WaitingStatusManager.current_waits.keys():
+            WaitingStatusManager.current_waits[name].time += time
+        else:
+            WaitingStatusManager.current_waits[name] = WaitStat(time)
+
+    @staticmethod
+    def next_blocks():
+        WaitingStatusManager.global_stats = StatusManager.merge_dicts(
+            WaitingStatusManager.global_stats, 
+            WaitingStatusManager.current_waits,
+            True
+        )
+        WaitingStatusManager.current_waits.clear()
+
+    @staticmethod
+    def log_global(label : str):
+        StatusManager.print_row()
+        log.info(label)
+        tm = StatusManager.log_dict(WaitingStatusManager.global_stats)
+        log.info(f"Total waiting time: {tm :.4f}s.")
+        return tm
+
+    @staticmethod
+    def log_current(label : str):
+        StatusManager.print_row()
+        log.info(label)
+        tm = StatusManager.log_dict(WaitingStatusManager.current_waits)
+        log.info(f"Current waiting time: {tm :.4f}s.")
+        return tm
+
+def minmax(collection : dict, blocks : int, time : float, _from : int):
+    value = blocks/time
+    _to = _from + blocks
+    PrometheusClient.broadcast(BroadcastObject('block_processing_rate', value, 'bps'))
+    if len(collection.keys())  == 0:
+
+        collection['min'] = value
+        collection['min_from'] = _from
+        collection['min_to'] = _to
+
+        collection['max'] = value
+        collection['max_from'] = _from
+        collection['max_to'] = _to
+
+    else:
+
+        mn = min(collection['min'], value)
+        if mn == value:
+            collection['min'] = value
+            collection['min_from'] = _from
+            collection['min_to'] = _to
+        mx = max(collection['max'], value)
+        if mx == value:
+            collection['max'] = value
+            collection['max_from'] = _from
+            collection['max_to'] = _to
+    
+    return collection
+
 def _normalize_sql(sql, maxlen=180):
     """Collapse whitespace and middle-truncate if needed."""
     out = ' '.join(sql.split())
@@ -65,8 +392,16 @@ class StatsAbstract:
 
         log.info('%7s %9s %9s %9s', '-pct-', '-ttl-', '-avg-', '-cnt-')
         for call, ms, reqs in self.table(40):
+            try:
+              avg = ms/reqs
+              millisec = ms/self._ms
+            except ZeroDivisionError as ex:
+              avg = 0.0
+              millisec = 0.0
+            if reqs == 0:
+                reqs = 1
             log.info("% 6.1f%% % 7dms % 9.2f % 8dx -- %s",
-                     100 * ms/self._ms, ms, ms/reqs, reqs, call)
+                     100 * millisec, ms, avg, reqs, call)
         self.clear()
 
 
@@ -84,11 +419,13 @@ class SteemStats(StatsAbstract):
         'get_dynamic_global_properties': 20,
         'get_block': 50,
         'get_blocks_batch': 5,
-        'get_accounts': 3,
         'get_content': 4,
         'get_order_book': 20,
         'get_feed_history': 20,
         'lookup_accounts': 1000,
+        'get_comment_pending_payouts':1000,
+        'get_ops_in_block':500,
+        'enum_virtual_ops':1000
     }
 
     def __init__(self):
@@ -110,21 +447,27 @@ class SteemStats(StatsAbstract):
 class DbStats(StatsAbstract):
     """Tracks database query timings."""
     SLOW_QUERY_MS = 250
+    LOGGING_TRESHOLD = 50
 
     def __init__(self):
         super().__init__('db')
 
     def check_timing(self, call, ms, batch_size):
         """Warn if any query is slower than defined threshold."""
-        if ms > self.SLOW_QUERY_MS:
-            out = "[SQL][%dms] %s" % (ms, call[:250])
-            log.warning(colorize(out))
 
+        if ms > self.LOGGING_TRESHOLD:
+            log.warning("[SQL][%dms] %s", ms, call)
+            if ms > self.SLOW_QUERY_MS:
+                out = "[SQL][%dms] %s" % (ms, call[:250])
+                log.warning(colorize(out))
 
 class Stats:
     """Container for steemd and db timing data."""
     PRINT_THRESH_MINS = 1
 
+    COLLECT_DB_STATS = 0
+    COLLECT_NODE_STATS = 0
+
     _db = DbStats()
     _steemd = SteemStats()
     _secs = 0.0
@@ -134,14 +477,16 @@ class Stats:
     @classmethod
     def log_db(cls, sql, secs):
         """Log a database query. Incoming SQL is normalized."""
-        cls._db.add(_normalize_sql(sql), secs * 1000)
-        cls.add_secs(secs)
+        if cls.COLLECT_DB_STATS:
+            cls._db.add(_normalize_sql(sql), secs * 1000)
+            cls.add_secs(secs)
 
     @classmethod
     def log_steem(cls, method, secs, batch_size=1):
         """Log a steemd call."""
-        cls._steemd.add(method, secs * 1000, batch_size)
-        cls.add_secs(secs)
+        if cls.COLLECT_NODE_STATS:
+            cls._steemd.add(method, secs * 1000, batch_size)
+            cls.add_secs(secs)
 
     @classmethod
     def log_idle(cls, secs):
diff --git a/hive/utils/timer.py b/hive/utils/timer.py
index 046a906d19e44920488a53ece024cf00b5294632..3800f7cfcca0623ee265dfa4decdbbea34ddb9a7 100644
--- a/hive/utils/timer.py
+++ b/hive/utils/timer.py
@@ -2,6 +2,21 @@
 
 from time import perf_counter as perf
 from hive.utils.normalize import secs_to_str
+from functools import wraps
+
+import logging
+log = logging.getLogger(__name__)
+
+#timeit decorator for measuring method execution time
+def time_it(method):
+    @wraps(method)
+    def time_method(*args, **kwargs):
+        start_time = perf()
+        result = method(*args, **kwargs)
+        log.info("%s executed in %.4f s", method.__name__, perf() - start_time)
+        return result
+    return time_method
+
 
 class Timer:
     """Times long routines, printing status and ETA.
diff --git a/hive/utils/trends.py b/hive/utils/trends.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a8c245b20ff6fe3ee0ff3a07ed9f12974077fad
--- /dev/null
+++ b/hive/utils/trends.py
@@ -0,0 +1,40 @@
+from hive.db.adapter import Db
+from hive.utils.timer import time_it
+
+DB = Db.instance()
+
+def update_all_hot_and_tranding():
+    """Calculate and set hot and trending values of all posts"""
+    update_hot_and_tranding_for_block_range()
+
+NO_CONSTRAINT = -1
+
+@time_it
+def update_hot_and_tranding_for_block_range( first_block = NO_CONSTRAINT, last_block = NO_CONSTRAINT):
+    """Calculate and set hot and trending values of all posts"""
+    hot_and_trend_sql = """
+        UPDATE hive_posts ihp
+            set sc_hot = calculate_hot(ds.rshares_sum, ihp.created_at),
+            sc_trend = calculate_tranding(ds.rshares_sum, ihp.created_at)
+        FROM
+        (
+            SELECT hv.post_id as id, CAST(sum(hv.rshares) AS BIGINT) as rshares_sum
+            FROM hive_votes hv
+            {}
+            GROUP BY hv.post_id
+        ) as ds
+        WHERE ihp.id = ds.id AND ihp.is_paidout = False
+    """
+
+    sql = ""
+    if first_block == NO_CONSTRAINT and last_block == NO_CONSTRAINT:
+        sql = hot_and_trend_sql.format( "" )
+    elif last_block == NO_CONSTRAINT:
+        sql = hot_and_trend_sql.format( "WHERE block_num >= {}".format( first_block ) )
+    elif first_block == NO_CONSTRAINT:
+        sql = hot_and_trend_sql.format( "WHERE block_num <= {}".format( last_block ) )
+    elif first_block == last_block:
+        sql = hot_and_trend_sql.format( "WHERE block_num = {}".format( last_block ) )
+    else:
+        sql = hot_and_trend_sql.format( "WHERE block_num >= {} AND block_num <= {}".format( first_block, last_block ) )
+    DB.query_no_return(sql)
diff --git a/mock_data/block_data/community_op/flow.txt b/mock_data/block_data/community_op/flow.txt
new file mode 100644
index 0000000000000000000000000000000000000000..dd8778f8d9064a79f93089c243bca8a6958c47e4
--- /dev/null
+++ b/mock_data/block_data/community_op/flow.txt
@@ -0,0 +1,116 @@
+***block 4999999***
+account_create_operation( `hive-171487` )
+account_create_operation( `hive-171488` )
+account_create_operation( `hive-135485` )
+account_create_operation( `hive-117600` )
+account_create_operation( `hive-165317` )
+account_create_operation( `hive-186669` )
+account_create_operation( `hive-103459` )
+account_create_operation( `hive-188204` )
+account_create_operation( `hive-149232` )
+account_create_operation( `hive-104647` )
+comment_operation( `hive-135485`, `test-safari`, `secrets1`)
+comment_operation( `hive-135485`, `test-safari`, `secrets2`)
+transfer_opearation( `test-safari`, `null`, `0.010666 HBD`, `@test-safari/secrets2` ) - post promotion (with bad amount precision to see what happens - rounding occurs)
+comment_operation( `hive-117600`, `test-safari`, `secrets3`)
+comment_operation( `hive-117600`, `test-safari`, `secrets4`)
+comment_operation( `hive-117600`, `test-safari`, `secrets5`)
+comment_operation( `hive-117600`, `test-safari`, `secrets6`)
+custom_json_operation("[\"setRole\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"role\":\"admin\"}]")
+***block 5000000***
+custom_json_operation("[\"updateProps\",{\"community\":\"hive-135485\",\"props\":{\"title\":\"World News\",\"about\":\"A place for major news from around the world.\",\"is_nsfw\":true,\"description\":\"\",\"flag_text\":\"\"}}]")
+custom_json_operation("[\"setRole\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"role\":\"mod\"}]")
+custom_json_operation("[\"setUserTitle\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"title\":\"Bill Gates\"}]")
+custom_json_operation("[\"subscribe\",{\"community\":\"hive-135485\"}]")
+custom_json_operation("[\"unsubscribe\",{\"community\":\"hive-135485\"}]")
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\",\"notes\":\"spamming\"}]")
+custom_json_operation("[\"unmutePost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\",\"notes\":\"testibgn\"}]")
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\"}]")
+custom_json_operation("[\"unpinPost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\"}]")
+custom_json_operation("[\"flagPost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\",\"notes\":\"This is not a kitten..\"}]")
+***block 5000001***
+custom_json_operation("[\"setRole\",{\"community\":\"hive-171488\",\"account\":\"test-safari\",\"role\":\"admin\"}]")
+custom_json_operation("[\"setRole\",{\"community\":\"hive-171487\",\"account\":\"test-safari\",\"role\":\"admin\"}]")
+custom_json_operation("[\"updateProps\",{\"community\":\"hive-171488\",\"props\":{\"title\":\"Hello\",\"about\":\"Nothing.\",\"is_nsfw\":true,\"description\":\"Nothing\",\"flag_text\":\"Lol\"}}]")
+custom_json_operation("[\"updateProps\",{\"community\":\"hive-171487\",\"props\":{\"title\":\"Banana\",\"about\":\"Banana-nothing.\",\"is_nsfw\":true,\"description\":\"Cherry\",\"flag_text\":\"Lemon\"}}]")
+custom_json_operation("[\"setRole\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"role\":\"admin\"}]")
+custom_json_operation("[\"setRole\",{\"community\":\"hive-117600\",\"account\":\"roadscape\",\"role\":\"admin\"}]")
+custom_json_operation("[\"setRole\",{\"community\":\"hive-117600\",\"account\":\"gtg\",\"role\":\"member\"}]")
+***block 5000002***
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\",\"notes\":\"secrets1 are a spam\"}]")
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets2\",\"notes\":\"secrets2 are a spam\"}]")
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets5\",\"notes\":\"secret5 are a spam\"}]")
+custom_json_operation("[\"unmutePost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets2\",\"notes\":\"secrets1 are a spam, but I love them\"}]")
+***block 5000003***
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets3\"}]")
+custom_json_operation("[\"unpinPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets3\"}]")
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets4\"}]")
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets5\"}]")
+***block 5000004***
+custom_json_operation("[\"flagPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets5\",\"notes\":\"secrets5 are boring\"}]")
+custom_json_operation("[\"flagPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets6\",\"notes\":\"secrets5 are stupid\"}]")
+***block 5000005***
+custom_json_operation("test-safari" -> "[\"subscribe\",{\"community\":\"hive-171487\"}]")
+custom_json_operation("test-safari" -> "[\"subscribe\",{\"community\":\"hive-171488\"}]")
+custom_json_operation("test-safari" -> "[\"subscribe\",{\"community\":\"hive-135485\"}]")
+custom_json_operation("test-safari" -> "[\"subscribe\",{\"community\":\"hive-117600\"}]")
+custom_json_operation("test-safari" -> "[\"subscribe\",{\"community\":\"hive-165317\"}]")
+custom_json_operation("gtg" -> "[\"subscribe\",{\"community\":\"hive-171487\"}]")
+custom_json_operation("gtg" -> "[\"subscribe\",{\"community\":\"hive-171488\"}]")
+custom_json_operation("gtg" -> "[\"subscribe\",{\"community\":\"hive-135485\"}]")
+custom_json_operation("gtg" -> "[\"subscribe\",{\"community\":\"hive-186669\"}]")
+custom_json_operation("gtg" -> "[\"subscribe\",{\"community\":\"hive-103459\"}]")
+custom_json_operation("roadscape" -> "[\"subscribe\",{\"community\":\"hive-171487\"}]")
+custom_json_operation("roadscape" -> "[\"subscribe\",{\"community\":\"hive-171488\"}]")
+custom_json_operation("roadscape" -> "[\"subscribe\",{\"community\":\"hive-135485\"}]")
+custom_json_operation("roadscape" -> "[\"subscribe\",{\"community\":\"hive-186669\"}]")
+custom_json_operation("roadscape" -> "[\"subscribe\",{\"community\":\"hive-104647\"}]")
+***block 5000006***
+custom_json_operation("good-karma" -> "[\"subscribe\",{\"community\":\"hive-171487\"}]")
+custom_json_operation("good-karma" -> "[\"subscribe\",{\"community\":\"hive-171488\"}]")
+custom_json_operation("good-karma" -> "[\"subscribe\",{\"community\":\"hive-135485\"}]")
+custom_json_operation("good-karma" -> "[\"subscribe\",{\"community\":\"hive-117600\"}]")
+custom_json_operation("good-karma" -> "[\"subscribe\",{\"community\":\"hive-165317\"}]")
+custom_json_operation("good-karma" -> "[\"subscribe\",{\"community\":\"hive-186669\"}]")
+custom_json_operation("good-karma" -> "[\"subscribe\",{\"community\":\"hive-103459\"}]")
+custom_json_operation("good-karma" -> "[\"subscribe\",{\"community\":\"hive-188204\"}]")
+custom_json_operation("good-karma" -> "[\"subscribe\",{\"community\":\"hive-149232\"}]")
+custom_json_operation("good-karma" -> "[\"subscribe\",{\"community\":\"hive-104647\"}]")
+***block 5000007***
+custom_json_operation("[\"setRole\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"role\":\"admin\"}]")
+custom_json_operation("[\"setRole\",{\"community\":\"hive-117600\",\"account\":\"abit\",\"role\":\"admin\"}]")
+comment_operation( `hive-117600`, `abit`, `anaconda01`)
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"abit\",\"permlink\":\"anaconda01\"}]")
+comment_operation( `hive-117600`, `abit`, `anaconda02`)
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"abit\",\"permlink\":\"anaconda02\"}]")
+***block 5000008***
+comment_operation( `hive-117600`, `good-karma`, `spider01`)
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"permlink\":\"spider01\"}]")
+comment_operation( `hive-117600`, `good-karma`, `spider02`)
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"permlink\":\"spider02\"}]")
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"permlink\":\"spider02\",\"notes\":\"I hate spiders 02\"}]")
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"permlink\":\"spider01\",\"notes\":\"I hate spiders 01\"}]")
+custom_json_operation("[\"unmutePost\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"permlink\":\"spider01\",\"notes\":\"I hate spiders 02, but they are funny\"}]")
+***block 5000009***
+custom_json_operation("[\"setRole\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"role\":\"admin\"}]")
+comment_operation( `hive-135485`, `blocktrades`, `crocodile01`)
+comment_operation( `hive-135485`, `blocktrades`, `crocodile02`)
+comment_operation( `hive-135485`, `blocktrades`, `crocodile03`)
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile01\",\"notes\":\"I hate crocodiles 01\"}]")
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile02\",\"notes\":\"I hate crocodiles 02\"}]")
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile03\",\"notes\":\"I hate crocodiles 03\"}]")
+custom_json_operation("[\"unmutePost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile03\",\"notes\":\"I hate crocodiles 03, but they are cool\"}]")
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile02\"}]")
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile03\"}]")
+comment_operation( `hive-135485`, `blocktrades`, `elephant01`)
+***block 5000010***
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets6\"}]")
+delete_comment_operation(`test-safari`, `secrets3`)
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets6\",\"notes\":\"I don't like it\"}]")
+delete_comment_operation(`test-safari`, `secrets6`)
+custom_json_operation("[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"elephant01\",\"notes\":\"I don't like elephants\"}]")
+custom_json_operation("[\"pinPost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"elephant01\"}]")
+***block 5000011***
+custom_json_operation("gtg" -> "[\"unsubscribe\",{\"community\":\"hive-103459\"}]")
+custom_json_operation("good-karma" -> "[\"unsubscribe\",{\"community\":\"hive-103459\"}]")
+delete_comment_operation(`blocktrades`, `elephant01`)
\ No newline at end of file
diff --git a/mock_data/block_data/community_op/mock_block_data_community.json b/mock_data/block_data/community_op/mock_block_data_community.json
new file mode 100644
index 0000000000000000000000000000000000000000..4804a706a4de68d678944f21154239f8f399a52c
--- /dev/null
+++ b/mock_data/block_data/community_op/mock_block_data_community.json
@@ -0,0 +1,1770 @@
+{
+  "4999999": {
+    "previous": "004c4b3e03ea2eac2494790786bfb9e41a8669d9",
+    "timestamp": "2016-09-15T19:47:18",
+    "witness": "",
+    "transaction_merkle_root": "",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100000,
+        "ref_block_prefix": 0,
+        "expiration": "2020-03-23T12:08:00",
+        "operations": [
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "hive-171487",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "hive-171488",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "hive-135485",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "hive-117600",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "hive-165317",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "hive-186669",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "hive-103459",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "hive-188204",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "hive-149232",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "hive-104647",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-135485",
+              "author": "test-safari",
+              "permlink": "secrets1",
+              "title": "I love secrets 01",
+              "body": "tell me secret 01",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-135485",
+              "author": "test-safari",
+              "permlink": "secrets2",
+              "title": "I love secrets 02",
+              "body": "tell me secret 02",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "transfer_operation",
+            "value": {
+              "from": "test-safari",
+              "to": "null",
+              "amount": "0.010666 HBD",
+              "memo": "@test-safari/secrets2"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-117600",
+              "author": "test-safari",
+              "permlink": "secrets3",
+              "title": "I love secrets 03",
+              "body": "tell me secret 03",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-117600",
+              "author": "test-safari",
+              "permlink": "secrets4",
+              "title": "I love secrets 04",
+              "body": "tell me secret 04",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-117600",
+              "author": "test-safari",
+              "permlink": "secrets5",
+              "title": "I love secrets 05",
+              "body": "tell me secret 05",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-117600",
+              "author": "test-safari",
+              "permlink": "secrets6",
+              "title": "I love secrets 06",
+              "body": "tell me secret 06",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "hive-135485"
+              ],
+              "id": "community",
+              "json": "[\"setRole\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"role\":\"admin\"}]"
+            }
+          }
+        ],
+        "extensions": [],
+        "signatures": [
+          ""
+        ]
+      }
+    ],
+    "block_id": "004c4b3fc6a8735b4ab5433d59f4526e4a042644",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000000": {
+    "previous": "004c4b3fc6a8735b4ab5433d59f4526e4a042644",
+    "timestamp": "2016-09-15T19:47:21",
+    "witness": "initminer",
+    "transaction_merkle_root": "",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"updateProps\",{\"community\":\"hive-135485\",\"props\":{\"title\":\"World News\",\"about\":\"A place for major news from around the world.\",\"is_nsfw\":true,\"description\":\"\",\"flag_text\":\"\"}}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"setRole\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"role\":\"mod\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"setUserTitle\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"title\":\"Bill Gates\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-135485\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"unsubscribe\",{\"community\":\"hive-135485\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\",\"notes\":\"spamming\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"unmutePost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\",\"notes\":\"testibgn\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"unpinPost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"flagPost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\",\"notes\":\"This is not a kitten..\"}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b40245ffb07380a393fb2b3d841b76cdaec",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000001": {
+    "previous": "004c4b40245ffb07380a393fb2b3d841b76cdaec",
+    "timestamp": "2016-09-15T19:47:24",
+    "witness": "initminer",
+    "transaction_merkle_root": "",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "hive-171488"
+              ],
+              "id": "community",
+              "json": "[\"setRole\",{\"community\":\"hive-171488\",\"account\":\"test-safari\",\"role\":\"admin\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "hive-171487"
+              ],
+              "id": "community",
+              "json": "[\"setRole\",{\"community\":\"hive-171487\",\"account\":\"test-safari\",\"role\":\"admin\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"updateProps\",{\"community\":\"hive-171488\",\"props\":{\"title\":\"Hello\",\"about\":\"Nothing.\",\"is_nsfw\":true,\"description\":\"Nothing\",\"flag_text\":\"Lol\"}}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"updateProps\",{\"community\":\"hive-171487\",\"props\":{\"title\":\"Banana\",\"about\":\"Banana-nothing.\",\"is_nsfw\":true,\"description\":\"Cherry\",\"flag_text\":\"Lemon\"}}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "hive-117600"
+              ],
+              "id": "community",
+              "json": "[\"setRole\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"role\":\"admin\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"setRole\",{\"community\":\"hive-117600\",\"account\":\"roadscape\",\"role\":\"mod\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"setRole\",{\"community\":\"hive-117600\",\"account\":\"gtg\",\"role\":\"member\"}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4100000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000002": {
+    "previous": "004c4b4100000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:27",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets1\",\"notes\":\"secrets1 are a spam\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets2\",\"notes\":\"secrets2 are a spam\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets5\",\"notes\":\"secret5 are a spam\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"unmutePost\",{\"community\":\"hive-135485\",\"account\":\"test-safari\",\"permlink\":\"secrets2\",\"notes\":\"secrets1 are a spam, but I love them\"}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4200000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000003": {
+    "previous": "004c4b4200000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:30",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets3\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"unpinPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets3\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets4\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets5\"}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4300000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000004": {
+    "previous": "004c4b4300000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:33",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"flagPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets5\",\"notes\":\"secrets5 are boring\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"flagPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets6\",\"notes\":\"secrets5 are stupid\"}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4400000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000005": {
+    "previous": "004c4b4400000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:36",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-171487\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-171488\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-135485\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-117600\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-165317\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "gtg"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-171487\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "gtg"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-171488\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "gtg"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-135485\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "gtg"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-186669\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "gtg"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-103459\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "roadscape"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-171487\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "roadscape"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-171488\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "roadscape"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-135485\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "roadscape"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-186669\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "roadscape"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-104647\"}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4500000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000006": {
+    "previous": "004c4b4500000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:39",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-171487\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-171488\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-135485\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-117600\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-165317\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-186669\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-103459\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-188204\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-149232\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"subscribe\",{\"community\":\"hive-104647\"}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4600000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000007": {
+    "previous": "004c4b4600000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:42",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "hive-117600"
+              ],
+              "id": "community",
+              "json": "[\"setRole\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"role\":\"admin\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "hive-117600"
+              ],
+              "id": "community",
+              "json": "[\"setRole\",{\"community\":\"hive-117600\",\"account\":\"abit\",\"role\":\"admin\"}]"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-117600",
+              "author": "abit",
+              "permlink": "anaconda01",
+              "title": "powerful snake 01",
+              "body": "only snakes 01",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "abit"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"abit\",\"permlink\":\"anaconda01\"}]"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-117600",
+              "author": "abit",
+              "permlink": "anaconda02",
+              "title": "powerful snake 02",
+              "body": "only snakes 02",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "abit"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"abit\",\"permlink\":\"anaconda02\"}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4700000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000008": {
+    "previous": "004c4b4700000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:45",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-117600",
+              "author": "good-karma",
+              "permlink": "spider01",
+              "title": "powerful spider 01",
+              "body": "only spiders 01",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "abit"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"permlink\":\"spider01\"}]"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-117600",
+              "author": "good-karma",
+              "permlink": "spider02",
+              "title": "powerful spider 02",
+              "body": "only spiders 02",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "abit"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"permlink\":\"spider02\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "abit"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"permlink\":\"spider02\",\"notes\":\"I hate spiders 02\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "abit"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"permlink\":\"spider01\",\"notes\":\"I hate spiders 01\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "abit"
+              ],
+              "id": "community",
+              "json": "[\"unmutePost\",{\"community\":\"hive-117600\",\"account\":\"good-karma\",\"permlink\":\"spider01\",\"notes\":\"I hate spiders 02, but they are funny\"}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4800000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000009": {
+    "previous": "004c4b4800000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:48",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "hive-135485"
+              ],
+              "id": "community",
+              "json": "[\"setRole\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"role\":\"admin\"}]"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-135485",
+              "author": "blocktrades",
+              "permlink": "crocodile01",
+              "title": "powerful crocodile 01",
+              "body": "only crocodiles 01",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-135485",
+              "author": "blocktrades",
+              "permlink": "crocodile02",
+              "title": "powerful crocodile 02",
+              "body": "only crocodiles 02",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-135485",
+              "author": "blocktrades",
+              "permlink": "crocodile03",
+              "title": "powerful crocodile 03",
+              "body": "only crocodiles 03",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "blocktrades"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile01\",\"notes\":\"I hate crocodiles 01\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "blocktrades"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile02\",\"notes\":\"I hate crocodiles 02\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "blocktrades"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile03\",\"notes\":\"I hate crocodiles 03\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "blocktrades"
+              ],
+              "id": "community",
+              "json": "[\"unmutePost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile03\",\"notes\":\"I hate crocodiles 03, but they are cool\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "blocktrades"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile02\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "blocktrades"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"crocodile03\"}]"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "hive-135485",
+              "author": "blocktrades",
+              "permlink": "elephant01",
+              "title": "powerful elephant 01",
+              "body": "only elephants 01",
+              "json_metadata": "{}"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4900000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+        ]
+      }
+
+    ]
+  },
+  "5000010": {
+    "previous": "004c4b4900000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:51",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets6\"}]"
+            }
+          },
+          {
+            "type": "delete_comment_operation",
+            "value": {
+              "author": "test-safari",
+              "permlink": "secrets3"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-safari"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-117600\",\"account\":\"test-safari\",\"permlink\":\"secrets6\",\"notes\":\"I dont like it\"}]"
+            }
+          },
+          {
+            "type": "delete_comment_operation",
+            "value": {
+              "author": "test-safari",
+              "permlink": "secrets6"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "blocktrades"
+              ],
+              "id": "community",
+              "json": "[\"mutePost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"elephant01\",\"notes\":\"I don't like elephants\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "blocktrades"
+              ],
+              "id": "community",
+              "json": "[\"pinPost\",{\"community\":\"hive-135485\",\"account\":\"blocktrades\",\"permlink\":\"elephant01\"}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4a00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000011": {
+    "previous": "004c4b4a00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:54",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "gtg"
+              ],
+              "id": "community",
+              "json": "[\"unsubscribe\",{\"community\":\"hive-103459\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "good-karma"
+              ],
+              "id": "community",
+              "json": "[\"unsubscribe\",{\"community\":\"hive-103459\"}]"
+            }
+          },
+          {
+            "type": "delete_comment_operation",
+            "value": {
+              "author": "blocktrades",
+              "permlink": "elephant01"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4b00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000012": {
+    "previous": "004c4b4b00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:57",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4c00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000013": {
+    "previous": "004c4b4c00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:00",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4d00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000014": {
+    "previous": "004c4b4d00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:03",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4e00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000015": {
+    "previous": "004c4b4e00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:06",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4f00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000016": {
+    "previous": "004c4b4f00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:09",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b5000000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000017": {
+    "previous": "004c4b5000000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:12",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b5100000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  }
+}
diff --git a/mock_data/block_data/follow_op/mock_block_data_follow.json b/mock_data/block_data/follow_op/mock_block_data_follow.json
new file mode 100644
index 0000000000000000000000000000000000000000..37bebb093cb916bf88c1bf8f34e5a22494b6ad03
--- /dev/null
+++ b/mock_data/block_data/follow_op/mock_block_data_follow.json
@@ -0,0 +1,1211 @@
+{
+  "4999999": {
+    "previous": "004c4b3e03ea2eac2494790786bfb9e41a8669d9",
+    "timestamp": "2016-09-15T19:47:18",
+    "witness": "",
+    "transaction_merkle_root": "",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100000,
+        "ref_block_prefix": 0,
+        "expiration": "2020-03-23T12:08:00",
+        "operations": [
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "tester1",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "tester2",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "tester3",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "tester4",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "tester5",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "spaminator",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "hivewatchers",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "buildawhale",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "redeemer",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "ignoreall",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "{\"profile\":{\"blacklist_description\":\"People I don't like\",\"muted_list_description\":\"People that need to STFU\"}}",
+              "extensions": []
+            }
+          }
+        ],
+        "extensions": [],
+        "signatures": [
+          ""
+        ]
+      }
+    ],
+    "block_id": "004c4b3fc6a8735b4ab5433d59f4526e4a042644",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000000": {
+    "previous": "004c4b3fc6a8735b4ab5433d59f4526e4a042644",
+    "timestamp": "2016-09-15T19:47:21",
+    "witness": "initminer",
+    "transaction_merkle_root": "",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":[\"tester3\", \"tester4\"],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":[\"t'es'ter3\", \"<html><body><p>PPPPP</p></body></html>\"],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":[\"tester7\", \"<script>alert('hello world');</script>\"],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":[\"tester3\", \"tester4\"],\"what\":[\"blogo-doggo\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "te'%@ter1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"te'%@ter1\",\"following\":[\"tester3\", \"tester4\"],\"what\":[\"blog\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b40245ffb07380a393fb2b3d841b76cdaec",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000001": {
+    "previous": "004c4b40245ffb07380a393fb2b3d841b76cdaec",
+    "timestamp": "2016-09-15T19:47:24",
+    "witness": "initminer",
+    "transaction_merkle_root": "",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":[\"tester3\", \"tester4\"],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":[\"t'es'ter3\", \"<html><body><p>PPPPP</p></body></html>\"],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":[\"tester3\", \"gtg\"],\"what\":[\"blogo-doggo\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "te'%@ter1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"te'%@ter1\",\"following\":[\"gtg\", \"tester4\"],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "{\"tester1\":\"tester1\"}"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":{\"tester1\":\"tester1\"},\"following\":{\"gtg\":\"gtg\"},\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":{\"gtg\":\"gtg\"},\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":[\"tester3\", [\"gtg\"]],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":[\"tester1\"],\"following\":[\"tester3\", [\"gtg\"]],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "[\"tester1\"]"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":[\"tester1\"],\"following\":[\"tester3\", {\"gtg\":\"gtg\"}],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":[\"tester3\", {\"gtg\":\"gtg\"}],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":[\"tester7\", \"<script>alert('hello world');</script>\"],\"what\":[\"blog\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4100000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000002": {
+    "previous": "004c4b4100000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:27",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"ignore\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "spaminator"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"spaminator\",\"following\":[\"lyubovbar\",\"zaitsevalesyaa\",\"kingscrown\",\"trevonjb\",\"craig-grant\",\"ned\",\"mindhunter\"],\"what\":[\"blacklist\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "hivewatchers"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"hivewatchers\",\"following\":[\"lyubovbar\",\"rkpl\",\"blendplayz\",\"renzoarg\",\"kingscrown\",\"forevergala\",\"machinelearning\",\"ola1\",\"steembeast\",\"ekremi12\",\"steem4lyf\",\"caitlinm\",\"bruno1122\"],\"what\":[\"blacklist\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "buildawhale"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"buildawhale\",\"following\":[\"zaitsevalesyaa\",\"trevonjb\",\"earnest\",\"wildchild\",\"craig-grant\"],\"what\":[\"blacklist\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "redeemer"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"redeemer\",\"following\":[\"zaitsevalesyaa\",\"trevonjb\",\"craig-grant\"],\"what\":[\"blacklist\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "ignoreall"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"ignoreall\",\"following\":[\"gtg\",\"alice\",\"davr86\"],\"what\":[\"blacklist\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "ignoreall"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"ignoreall\",\"following\":[\"gtg\",\"alice\",\"davr86\",\"fyrstikken\",\"gavvet\",\"ooak\",\"kental\",\"r4fken\",\"roland.haynes\",\"agartha\",\"feline1991\"],\"what\":[\"ignore\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4200000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000003": {
+    "previous": "004c4b4200000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:30",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"blacklist\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4300000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000004": {
+    "previous": "004c4b4300000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:33",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"follow_blacklist\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "alice"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"alice\",\"following\":[\"spaminator\",\"hivewatchers\",\"buildawhale\",\"redeemer\"],\"what\":[\"follow_blacklist\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "alice"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"alice\",\"following\":\"ignoreall\",\"what\":[\"follow_muted\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4400000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000005": {
+    "previous": "004c4b4400000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:36",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"unblacklist\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4500000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000006": {
+    "previous": "004c4b4500000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:39",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"unfollow_blacklist\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4600000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000007": {
+    "previous": "004c4b4600000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:42",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"follow_muted\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4700000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000008": {
+    "previous": "004c4b4700000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:45",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"unfollow_muted\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4800000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000009": {
+    "previous": "004c4b4800000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:48",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"reset_blacklist\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4900000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000010": {
+    "previous": "004c4b4900000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:51",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"reset_following_list\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4a00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000011": {
+    "previous": "004c4b4a00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:54",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"reset_follow_blacklist\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4b00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000012": {
+    "previous": "004c4b4b00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:57",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"reset_follow_muted_list\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4c00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000013": {
+    "previous": "004c4b4c00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:00",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"reset_all_lists\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4d00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000014": {
+    "previous": "004c4b4d00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:03",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester2"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester2\",\"following\":[\"tester3\", \"tester4\"],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester3"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester3\",\"following\":[\"tester4\"],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester4"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester4\",\"following\":[\"tester5\", \"tester1\"],\"what\":[\"blog\"]}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester5"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester5\",\"following\":[\"tester1\", \"tester2\"],\"what\":[\"blog\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4e00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000017": {
+    "previous": "004c4b5000000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:12",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b5100000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  }
+}
\ No newline at end of file
diff --git a/mock_data/block_data/reblog_op/flow.txt b/mock_data/block_data/reblog_op/flow.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9e6c0d69cc811521665e99546882c280c3f0f26f
--- /dev/null
+++ b/mock_data/block_data/reblog_op/flow.txt
@@ -0,0 +1,80 @@
+***block 4999999***
+custom_json_operation("[\"reblog\",{\"account\":\"funny\",\"author\":\"steemit\",\"permlink\":\"firstpost\"}]") - very old post
+custom_json_operation("[\"reblog\",{\"account\":\"funny\",\"author\":\"steak\",\"permlink\":\"streak-test\"}]") - deleted post (should not be reblogged)
+custom_json_operation("[\"reblog\",{\"account\":\"funny\",\"author\":\"dollarvigilante\",\"permlink\":\"another-billionaire-warns-of-catastrophic-depths-not-seen-in-5-000-years-and-emphasizes-gold\"}]") - fresh post
+account_create_operation( `test-reblog-01` )
+account_create_operation( `test-reblog-02` )
+account_create_operation( `test-reblog-03` )
+account_create_operation( `test-reblog-04` )
+account_create_operation( `test-reblog-05` )
+account_create_operation( `test-reblog-06` )
+comment_operation( `test-reblog-01`, `parrot-01`)
+comment_operation( `test-reblog-02`, `parrot-02`)
+comment_operation( `test-reblog-03`, `parrot-03`)
+comment_operation( `test-reblog-04`, `parrot-04`)
+comment_operation( `test-reblog-05`, `parrot-05`)
+comment_operation( `test-reblog-06`, `parrot-06`)
+***block 5000000***
+custom_json_operation("{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-02\",\"permlink\":\"parrot-02\"}")
+custom_json_operation("{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-03\",\"permlink\":\"parrot-03\"}")
+custom_json_operation("{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}")
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-01\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}")
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-01\",\"author\":\"test-reblog-03\",\"permlink\":\"parrot-03\"}")
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-01\",\"author\":\"test-reblog-02\",\"permlink\":\"parrot-02\"}")
+
+***block 5000001***
+custom_json_operation("{\"account\":\"test-reblog-02\",\"author\":\"test-reblog-01\",\"permlink\":\"parrot-01\"}")
+custom_json_operation("{\"account\":\"test-reblog-02\",\"author\":\"test-reblog-02\",\"permlink\":\"parrot-02\"}")
+custom_json_operation("{\"account\":\"test-reblog-02\",\"author\":\"test-reblog-03\",\"permlink\":\"parrot-03\"}")
+custom_json_operation("{\"account\":\"test-reblog-03\",\"author\":\"test-reblog-02\",\"permlink\":\"parrot-02\"}")
+custom_json_operation("{\"account\":\"test-reblog-03\",\"author\":\"test-reblog-03\",\"permlink\":\"parrot-03\"}")
+custom_json_operation("{\"account\":\"test-reblog-03\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}")
+***block 5000002***
+custom_json_operation("{\"account\":\"test-reblog-04\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}")
+custom_json_operation("{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}")
+custom_json_operation("{\"account\":\"test-reblog-06\",\"author\":\"test-reblog-06\",\"permlink\":\"parrot-06\"}")
+custom_json_operation("{\"account\":\"test-reblog-06\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}")
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-04\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}")
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-06\",\"author\":\"test-reblog-06\",\"permlink\":\"parrot-06\"}")
+***block 5000003***
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-05\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}")
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-06\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}")
+***block 5000004***
+custom_json_operation("{\"account\":\"test-reblog-04\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}")
+custom_json_operation("{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}")
+custom_json_operation("{\"account\":\"test-reblog-06\",\"author\":\"test-reblog-06\",\"permlink\":\"parrot-06\"}")
+custom_json_operation("{\"account\":\"test-reblog-06\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}")
+delete_comment_operation(`test-reblog-04`, `parrot-04`)
+delete_comment_operation(`test-reblog-05`, `parrot-05`)
+***block 5000005***
+delete_comment_operation(`test-reblog-06`, `parrot-06`)
+comment_operation( `test-reblog-01`, `monkey-01`)
+comment_operation( `test-reblog-01`, `monkey-02`)
+comment_operation( `test-reblog-01`, `monkey-03`)
+comment_operation( `test-reblog-01`, `monkey-04`)
+comment_operation( `test-reblog-01`, `monkey-05`)
+custom_json_operation("{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-01\"}")
+delete_comment_operation(`test-reblog-01`, `monkey-01`)
+***block 5000006***
+custom_json_operation("{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-02\"}")
+custom_json_operation("{\"account\":\"test-reblog-03\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-02\"}")
+custom_json_operation("{\"account\":\"test-reblog-04\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-02\"}")
+***block 5000007***
+delete_comment_operation(`test-reblog-01`, `monkey-02`)
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-03\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}")
+custom_json_operation("{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-03\"}")
+custom_json_operation("{\"account\":\"test-reblog-02\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-03\"}")
+custom_json_operation("{\"account\":\"test-reblog-03\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-03\"}")
+custom_json_operation("{\"account\":\"test-reblog-04\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-03\"}")
+custom_json_operation("{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-03\"}")
+custom_json_operation("{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-04\"}")
+custom_json_operation("{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-05\"}")
+***block 5000008***
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-04\"}")
+custom_json_operation("{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-04\"}")
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-04\"}")
+custom_json_operation("{\"delete\":\"delete\",\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-04\"}")
+***block 5000009***
+delete_comment_operation(`test-reblog-01`, `monkey-04`)
+***block 5000010***
+***block 5000011***
diff --git a/mock_data/block_data/reblog_op/mock_block_data_reblog.json b/mock_data/block_data/reblog_op/mock_block_data_reblog.json
new file mode 100644
index 0000000000000000000000000000000000000000..24f07cc5b701c94829e79456576f6bf8281355f4
--- /dev/null
+++ b/mock_data/block_data/reblog_op/mock_block_data_reblog.json
@@ -0,0 +1,1245 @@
+{
+  "4999999": {
+    "previous": "004c4b3e03ea2eac2494790786bfb9e41a8669d9",
+    "timestamp": "2016-09-15T19:47:18",
+    "witness": "",
+    "transaction_merkle_root": "",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100000,
+        "ref_block_prefix": 0,
+        "expiration": "2020-03-23T12:08:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "funny"
+              ],
+              "id": "follow",
+              "json": "[\"reblog\",{\"account\":\"funny\",\"author\":\"steemit\",\"permlink\":\"fi\nrst'%@post\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "funny"
+              ],
+              "id": "follow",
+              "json": "[\"reblog\",{\"account\":\"funny\",\"author\":\"steemit\",\"permlink\":\"firstpost\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "funny"
+              ],
+              "id": "follow",
+              "json": "[\"reblog\",{\"account\":\"funny\",\"author\":\"steak\",\"permlink\":\"streak-test\"}]"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "funny"
+              ],
+              "id": "follow",
+              "json": "[\"reblog\",{\"account\":\"funny\",\"author\":\"dollarvigilante\",\"permlink\":\"another-billionaire-warns-of-catastrophic-depths-not-seen-in-5-000-years-and-emphasizes-gold\"}]"
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "test-reblog-01",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "test-reblog-02",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "test-reblog-03",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "test-reblog-04",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "test-reblog-05",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "account_create_operation",
+            "value": {
+              "creator": "test-safari",
+              "new_account_name": "test-reblog-06",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-01",
+              "permlink": "parrot-01",
+              "title": "I love parrots 01",
+              "body": "tell me about parrots 01",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-02",
+              "permlink": "parrot-02",
+              "title": "I love parrots 02",
+              "body": "tell me about parrots 02",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-03",
+              "permlink": "parrot-03",
+              "title": "I love parrots 03",
+              "body": "tell me about parrots 03",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-04",
+              "permlink": "parrot-04",
+              "title": "I love parrots 04",
+              "body": "tell me about parrot 04",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-05",
+              "permlink": "parrot-05",
+              "title": "I love parrots 05",
+              "body": "tell me about parrot 05",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-06",
+              "permlink": "parrot-06",
+              "title": "I love parrots 06",
+              "body": "tell me about parrot 06",
+              "json_metadata": "{}"
+            }
+          }
+        ],
+        "extensions": [],
+        "signatures": [
+          ""
+        ]
+      }
+    ],
+    "block_id": "004c4b3fc6a8735b4ab5433d59f4526e4a042644",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000000": {
+    "previous": "004c4b3fc6a8735b4ab5433d59f4526e4a042644",
+    "timestamp": "2016-09-15T19:47:21",
+    "witness": "initminer",
+    "transaction_merkle_root": "",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-01"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-02\",\"permlink\":\"parrot-02\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-01"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-03\",\"permlink\":\"parrot-03\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-01"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-01"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-01\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-01"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-01\",\"author\":\"test-reblog-03\",\"permlink\":\"parrot-03\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-01"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-01\",\"author\":\"test-reblog-02\",\"permlink\":\"parrot-02\"}"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b40245ffb07380a393fb2b3d841b76cdaec",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000001": {
+    "previous": "004c4b40245ffb07380a393fb2b3d841b76cdaec",
+    "timestamp": "2016-09-15T19:47:24",
+    "witness": "initminer",
+    "transaction_merkle_root": "",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-02"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-02\",\"author\":\"test-reblog-01\",\"permlink\":\"parrot-01\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-02"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-02\",\"author\":\"test-reblog-02\",\"permlink\":\"parrot-02\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-02"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-02\",\"author\":\"test-reblog-03\",\"permlink\":\"parrot-03\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-03"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-03\",\"author\":\"test-reblog-02\",\"permlink\":\"parrot-02\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-03"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-03\",\"author\":\"test-reblog-03\",\"permlink\":\"parrot-03\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-03"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-03\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4100000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000002": {
+    "previous": "004c4b4100000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:27",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-04"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-04\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-05"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-06"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-06\",\"author\":\"test-reblog-06\",\"permlink\":\"parrot-06\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-06"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-06\",\"author\":\"test-reblog-06\",\"permlink\":\"parrot-06\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-04"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-04\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-06"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-06\",\"author\":\"test-reblog-06\",\"permlink\":\"parrot-06\"}"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4200000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000003": {
+    "previous": "004c4b4200000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:30",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-05"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-05\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-06"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-06\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4300000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000004": {
+    "previous": "004c4b4300000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:33",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-04"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-04\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-05"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-06"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-06\",\"author\":\"test-reblog-06\",\"permlink\":\"parrot-06\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-06"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-06\",\"author\":\"test-reblog-05\",\"permlink\":\"parrot-05\"}"
+            }
+          },
+          {
+            "type": "delete_comment_operation",
+            "value": {
+              "author": "test-reblog-04",
+              "permlink": "parrot-04"
+            }
+          },
+          {
+            "type": "delete_comment_operation",
+            "value": {
+              "author": "test-reblog-05",
+              "permlink": "parrot-05"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4400000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000005": {
+    "previous": "004c4b4400000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:36",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "delete_comment_operation",
+            "value": {
+              "author": "test-reblog-06",
+              "permlink": "parrot-06"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-01",
+              "permlink": "monkey-01",
+              "title": "I love monkeys 01",
+              "body": "tell me about monkeys 01",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-01",
+              "permlink": "monkey-02",
+              "title": "I love monkeys 02",
+              "body": "tell me about monkeys 02",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-01",
+              "permlink": "monkey-03",
+              "title": "I love monkeys 03",
+              "body": "tell me about monkeys 03",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-01",
+              "permlink": "monkey-04",
+              "title": "I love monkeys 04",
+              "body": "tell me about monkeys 04",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "comment_operation",
+            "value": {
+              "parent_author": "",
+              "parent_permlink": "",
+              "author": "test-reblog-01",
+              "permlink": "monkey-05",
+              "title": "I love monkeys 05",
+              "body": "tell me about monkeys 05",
+              "json_metadata": "{}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-01"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-01\"}"
+            }
+          },
+          {
+            "type": "delete_comment_operation",
+            "value": {
+              "author": "test-reblog-01",
+              "permlink": "monkey-01"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4500000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000006": {
+    "previous": "004c4b4500000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:39",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-01"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-02\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-03"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-03\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-02\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-04"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-04\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-02\"}"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4600000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000007": {
+    "previous": "004c4b4600000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:42",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "delete_comment_operation",
+            "value": {
+              "author": "test-reblog-01",
+              "permlink": "monkey-02"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-03"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-03\",\"author\":\"test-reblog-04\",\"permlink\":\"parrot-04\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-01"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-01\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-03\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-02"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-02\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-03\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-03"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-03\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-03\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-04"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-04\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-03\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-05"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-03\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-05"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-04\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-05"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-05\"}"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4700000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000008": {
+    "previous": "004c4b4700000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:45",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-05"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-04\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-05"
+              ],
+              "id": "reblog",
+              "json": "{\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-04\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-05"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-04\"}"
+            }
+          },
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "test-reblog-05"
+              ],
+              "id": "reblog",
+              "json": "{\"delete\":\"delete\",\"account\":\"test-reblog-05\",\"author\":\"test-reblog-01\",\"permlink\":\"monkey-04\"}"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4800000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000009": {
+    "previous": "004c4b4800000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:48",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "delete_comment_operation",
+            "value": {
+              "author": "test-reblog-01",
+              "permlink": "monkey-04"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4900000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+        ]
+      }
+
+    ]
+  },
+  "5000010": {
+    "previous": "004c4b4900000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:51",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+        ]
+      }
+    ],
+    "block_id": "004c4b4a00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000011": {
+    "previous": "004c4b4a00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:54",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+        ]
+      }
+    ],
+    "block_id": "004c4b4b00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000012": {
+    "previous": "004c4b4b00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:57",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4c00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000013": {
+    "previous": "004c4b4c00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:00",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4d00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000014": {
+    "previous": "004c4b4d00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:03",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4e00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000015": {
+    "previous": "004c4b4e00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:06",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4f00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000016": {
+    "previous": "004c4b4f00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:09",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b5000000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000017": {
+    "previous": "004c4b5000000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:12",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b5100000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  }
+}
diff --git a/mock_data/block_data/vops_tests/mock_block_data_vops_prepare.json b/mock_data/block_data/vops_tests/mock_block_data_vops_prepare.json
new file mode 100644
index 0000000000000000000000000000000000000000..8d87c746735bcfc556f504d0df6527a523eea304
--- /dev/null
+++ b/mock_data/block_data/vops_tests/mock_block_data_vops_prepare.json
@@ -0,0 +1,34 @@
+{
+  "5000014": {
+    "previous": "004c4b4d00000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:48:03",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 100001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type" : "comment_operation",
+            "value" : {
+              "author": "tester1",
+              "body": "Tester1 post is here. This post was added by mock block provider. Enjoy!",
+              "json_metadata": "{\"app\":\"mock-block-provider\"}", 
+              "parent_author": "",
+              "parent_permlink": "",
+              "permlink": "tester1-post",
+              "title": "tester1-post"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4e00000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  }
+}
\ No newline at end of file
diff --git a/mock_data/examples/block_data/mock_block_data_example_001.json b/mock_data/examples/block_data/mock_block_data_example_001.json
new file mode 100644
index 0000000000000000000000000000000000000000..df8372db9378047c4401b92d69aca9b5c8a95e0a
--- /dev/null
+++ b/mock_data/examples/block_data/mock_block_data_example_001.json
@@ -0,0 +1,185 @@
+{
+  "5000001": {
+    "previous": "004c4b40245ffb07380a393fb2b3d841b76cdaec",
+    "timestamp": "2016-09-15T19:47:24",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 5000000,
+        "ref_block_prefix": 0,
+        "expiration": "2020-03-23T12:08:00",
+        "operations": [
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "tester1",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          },
+          {
+            "type": "create_claimed_account_operation",
+            "value": {
+              "creator": "esteemapp",
+              "new_account_name": "tester2",
+              "owner": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "active": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "posting": {
+                "weight_threshold": 1,
+                "account_auths": [],
+                "key_auths": [
+                  [
+                    "",
+                    1
+                  ]
+                ]
+              },
+              "memo_key": "",
+              "json_metadata": "",
+              "extensions": []
+            }
+          }
+        ],
+        "extensions": [],
+        "signatures": [
+          ""
+        ]
+      }
+    ],
+    "block_id": "004c4b4100000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000002": {
+    "previous": "004c4b4100000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:27",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [
+      {
+        "ref_block_num": 5000001,
+        "ref_block_prefix": 1,
+        "expiration": "2020-03-23T12:17:00",
+        "operations": [
+          {
+            "type": "custom_json_operation",
+            "value": {
+              "required_auths": [],
+              "required_posting_auths": [
+                "tester1"
+              ],
+              "id": "follow",
+              "json": "[\"follow\",{\"follower\":\"tester1\",\"following\":\"tester2\",\"what\":[\"blog\"]}]"
+            }
+          }
+        ]
+      }
+    ],
+    "block_id": "004c4b4200000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000003": {
+    "previous": "004c4b4200000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:30",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4300000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000004": {
+    "previous": "004c4b4300000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:33",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4400000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000005": {
+    "previous": "004c4b4400000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:36",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4500000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000006": {
+    "previous": "004c4b4500000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:39",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4600000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  }
+}
\ No newline at end of file
diff --git a/mock_data/examples/block_data/mock_block_data_example_002.json b/mock_data/examples/block_data/mock_block_data_example_002.json
new file mode 100644
index 0000000000000000000000000000000000000000..ce5d98e1d24308115478498acb16feb6ee4d03ef
--- /dev/null
+++ b/mock_data/examples/block_data/mock_block_data_example_002.json
@@ -0,0 +1,26 @@
+{
+  "5000007": {
+    "previous": "004c4b4600000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:39",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4700000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  },
+  "5000008": {
+    "previous": "004c4b4700000000000000000000000000000000",
+    "timestamp": "2016-09-15T19:47:39",
+    "witness": "initminer",
+    "transaction_merkle_root": "0000000000000000000000000000000000000000",
+    "extensions": [],
+    "witness_signature": "",
+    "transactions": [],
+    "block_id": "004c4b4800000000000000000000000000000000",
+    "signing_key": "",
+    "transaction_ids": []
+  }
+}
\ No newline at end of file
diff --git a/mock_data/examples/vops_data/mock_vops_data_example_001.json b/mock_data/examples/vops_data/mock_vops_data_example_001.json
new file mode 100644
index 0000000000000000000000000000000000000000..adeaedc4291093468bb9aa3ca213e04148ae68ab
--- /dev/null
+++ b/mock_data/examples/vops_data/mock_vops_data_example_001.json
@@ -0,0 +1,27 @@
+{
+  "ops": [
+    {
+      "trx_id": "0000000000000000000000000000000000000000",
+      "block": 300,
+      "trx_in_block": 4294967295,
+      "op_in_trx": 0,
+      "virtual_op": 1,
+      "timestamp": "2016-03-24T16:20:30",
+      "op": {
+        "type": "producer_reward_operation",
+        "value": {
+          "producer": "tester1",
+          "vesting_shares": {
+            "amount": "1000000",
+            "precision": 6,
+            "nai": "@@000000037"
+          }
+        }
+      },
+      "operation_id": "9223372039063639274"
+    }
+  ],
+  "ops_by_block": [],
+  "next_block_range_begin": 10977,
+  "next_operation_begin": 0
+}
\ No newline at end of file
diff --git a/mock_data/examples/vops_data/mock_vops_data_example_002.json b/mock_data/examples/vops_data/mock_vops_data_example_002.json
new file mode 100644
index 0000000000000000000000000000000000000000..040e8e044c7b8d244392a2d55bc58b2e5f312347
--- /dev/null
+++ b/mock_data/examples/vops_data/mock_vops_data_example_002.json
@@ -0,0 +1,27 @@
+{
+  "ops": [
+    {
+      "trx_id": "0000000000000000000000000000000000000000",
+      "block": 301,
+      "trx_in_block": 4294967295,
+      "op_in_trx": 0,
+      "virtual_op": 1,
+      "timestamp": "2016-03-24T16:20:33",
+      "op": {
+        "type": "producer_reward_operation",
+        "value": {
+          "producer": "tester1",
+          "vesting_shares": {
+            "amount": "1000000",
+            "precision": 6,
+            "nai": "@@000000037"
+          }
+        }
+      },
+      "operation_id": "9223372039063639274"
+    }
+  ],
+  "ops_by_block": [],
+  "next_block_range_begin": 10977,
+  "next_operation_begin": 0
+}
\ No newline at end of file
diff --git a/mock_data/examples/vops_data/mock_vops_data_example_003.json b/mock_data/examples/vops_data/mock_vops_data_example_003.json
new file mode 100644
index 0000000000000000000000000000000000000000..639201f9f283169e91199ca0fb7a99bd67ba118d
--- /dev/null
+++ b/mock_data/examples/vops_data/mock_vops_data_example_003.json
@@ -0,0 +1,34 @@
+{
+  "ops": [],
+  "ops_by_block": [
+    {
+      "block": 10532,
+      "irreversible": true,
+      "timestamp": "2016-03-25T01:01:24",
+      "ops": [
+        {
+          "trx_id": "0000000000000000000000000000000000000000",
+          "block": 10532,
+          "trx_in_block": 4294967295,
+          "op_in_trx": 0,
+          "virtual_op": 1,
+          "timestamp": "2016-03-25T01:01:24",
+          "op": {
+            "type": "producer_reward_operation",
+            "value": {
+              "producer": "analisa",
+              "vesting_shares": {
+                "amount": "1000000",
+                "precision": 6,
+                "nai": "@@000000037"
+              }
+            }
+          },
+          "operation_id": "9223372036854776508"
+        }
+      ]
+    }
+  ],
+  "next_block_range_begin": 10977,
+  "next_operation_begin": 0
+}
\ No newline at end of file
diff --git a/mock_data/vops_data/vops_tests/mock_vops_data_vops_tests.json b/mock_data/vops_data/vops_tests/mock_vops_data_vops_tests.json
new file mode 100644
index 0000000000000000000000000000000000000000..5216effde6c4138cd0ce339f3cef5352b82f681c
--- /dev/null
+++ b/mock_data/vops_data/vops_tests/mock_vops_data_vops_tests.json
@@ -0,0 +1,68 @@
+{
+  "ops": [],
+  "ops_by_block": [
+    {
+      "block": 5000015,
+      "irreversible": true,
+      "timestamp": "2016-09-15T19:48:06",
+      "ops": [
+        {
+          "trx_id": "0000000000000000000000000000000000000000",
+          "block": 5000015,
+          "trx_in_block": 4294967295,
+          "op_in_trx": 0,
+          "virtual_op": 1,
+          "timestamp": "2016-09-15T19:48:06",
+          "op": {
+            "type": "author_reward_operation",
+            "value": {
+              "author": "tester1",
+              "permlink": "tester1-post",
+
+              "hbd_payout": {
+                "amount": "123321",
+                "nai": "@@000000013",
+                "precision": 3
+              },
+              "hive_payout": {
+                "amount": "123321",
+                "nai": "@@000000021",
+                "precision": 3
+              },
+              "vesting_payout": {
+                "amount": "123321",
+                "nai": "@@000000037",
+                "precision": 6
+              }
+            }
+          },
+          "operation_id": "9223372036854776508"
+        },
+        {
+          "trx_id": "0000000000000000000000000000000000000000",
+          "block": 5000015,
+          "trx_in_block": 4294967295,
+          "op_in_trx": 0,
+          "virtual_op": 1,
+          "timestamp": "2016-09-15T19:48:06",
+          "op": {
+            "type": "comment_reward_operation",
+            "value": {
+              "author": "tester1",
+              "permlink": "tester1-post",
+
+              "payout": "123321",
+              "author_rewards": "123321",
+              "total_payout_value": "123321 HBD",
+              "curator_payout_value": "123321 HBD",
+              "beneficiary_payout_value": "123321"
+            }
+          },
+          "operation_id": "9223372036854776508"
+        }
+      ]
+    }
+  ],
+  "next_block_range_begin": 1,
+  "next_operation_begin": 0
+}
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..d984fdce760d5e180e8579533b0e34dee92ea540
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["setuptools >= 40.6.0", "wheel"]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index a0c013dbdfa672b2be95af0d7ad8644b61fa1e14..9794f2c41246508b74142a830678a3c03a7f7cd5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,7 +14,8 @@ funcy==1.14
 -e git+git@gitlab.syncad.com:hive/hivemind.git@ba54fc07c08c2fd08b37475c9a15d61c64e380c9#egg=hivemind
 humanize==2.4.0
 idna==2.9
-jsonrpcserver==4.0.1
+-e git+https://github.com/bcb/jsonrpcserver.git@8f3437a19b6d1a8f600ee2c9b112116c85f17827#egg=jsonrpcserver-4.1.3+8f3437a
+simplejson
 jsonschema==2.6.0
 Mako==1.1.2
 Markdown==2.4.1
@@ -38,3 +39,5 @@ tzlocal==2.0.0
 ujson==2.0.3
 urllib3==1.25.9
 yarl==1.4.2
+diff-match-patch==20200713
+tox==3.19.0
diff --git a/run_server_sync.sh b/run_server_sync.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b7fe66ea06744e5ab5c74c65861f29c9275eabdd
--- /dev/null
+++ b/run_server_sync.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# RUNNER_HIVEMIND_SYNC_MAX_BLOCK=5000017
+# RUNNER_HIVED_URL='{"default" : "192.168.6.136:8091"}''
+# RUNNER_POSTGRES_APP_USER=user
+# RUNNER_POSTGRES_APP_USER_PASSWORD=password
+# RUNNER_POSTGRES_HOST=localhost
+# RUNNER_POSTGRES_PORT=5432
+# HIVEMIND_DB_NAME=hivemind
+
+# ./run_server_sync.sh 5000017 '{"default" : "192.168.6.136:8091"}' mario_user mario_password localhost 5432 hivemind
+
+export RUNNER_HIVEMIND_SYNC_MAX_BLOCK=$1
+export RUNNER_HIVED_URL=$2
+export RUNNER_POSTGRES_APP_USER=$3
+export RUNNER_POSTGRES_APP_USER_PASSWORD=$4
+export RUNNER_POSTGRES_HOST=$5
+export RUNNER_POSTGRES_PORT=$6
+export HIVEMIND_DB_NAME=$7
+
+HIVE_SYNC_PATH="./scripts/ci/hive-sync.sh"
+
+"$HIVE_SYNC_PATH"
diff --git a/scripts/ci/backup/.gitlab-ci-ssh.yaml b/scripts/ci/backup/.gitlab-ci-ssh.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ad8fcd1906a9acbab88a109ba774317bdbe3182c
--- /dev/null
+++ b/scripts/ci/backup/.gitlab-ci-ssh.yaml
@@ -0,0 +1,265 @@
+stages:
+  - build
+  - test
+  - data-supply
+  - deploy
+  - e2e-test
+  - benchmark-tests
+  - post-deploy
+
+variables:
+  GIT_DEPTH: 1
+  LC_ALL: "C"
+  GIT_STRATEGY: clone
+  GIT_SUBMODULE_STRATEGY: recursive
+  GIT_CLONE_PATH: $CI_BUILDS_DIR/$CI_COMMIT_REF_SLUG/$CI_CONCURRENT_ID/project-name
+
+  HIVEMIND_SOURCE_HIVED_URL: $HIVEMIND_SOURCE_HIVED_URL
+  HIVEMIND_DB_NAME: "hive_$CI_COMMIT_REF_SLUG"
+  HIVEMIND_HTTP_PORT: $((HIVEMIND_HTTP_PORT + CI_CONCURRENT_ID))
+  # Configured at gitlab repository settings side
+  POSTGRES_USER: $HIVEMIND_POSTGRES_USER
+  POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+  POSTGRES_HOST_AUTH_METHOD: trust
+  # official way to provide password to psql: http://www.postgresql.org/docs/9.3/static/libpq-envars.html
+  PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+
+
+default:
+  before_script:
+    - pwd
+    - echo "CI_NODE_TOTAL is $CI_NODE_TOTAL"
+    - echo "CI_NODE_INDEX is $CI_NODE_INDEX"
+    - echo "CI_CONCURRENT_ID is $CI_CONCURRENT_ID"
+    - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
+
+hivemind_build:
+  stage: build
+  script:
+    - pip3 install --user --upgrade pip setuptools
+    - git fetch --tags
+    - git tag -f ci_implicit_tag
+    - echo $PYTHONUSERBASE
+    - "python3 setup.py bdist_egg"
+    - ls -l dist/*
+  artifacts:
+    paths:
+      - dist/
+    expire_in: 1 week
+  tags:
+     - hivemind
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+
+hivemind_sync:
+  stage: data-supply
+  environment:
+      name: "hive sync built from branch $CI_COMMIT_REF_NAME targeting database $HIVEMIND_DB_NAME"
+  needs:
+    - job: hivemind_build
+      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+    PYTHONUSERBASE: ./local-site
+  script:
+    - pip3 install --user --upgrade pip setuptools
+    # WARNING!!! temporarily hardcoded 5000017 instead $HIVEMIND_MAX_BLOCK
+    # revert that change when $HIVEMIND_MAX_BLOCK will be set to 5000017
+    - scripts/ci_sync.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" 5000017 $HIVEMIND_HTTP_PORT
+  artifacts:
+    paths:
+      - hivemind-sync.log
+    expire_in: 1 week
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+
+hivemind_start_server:
+  stage: deploy
+  environment:
+    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
+    url: "http://hive-4.pl.syncad.com:$HIVEMIND_HTTP_PORT"
+    on_stop: hivemind_stop_server
+  needs:
+    - job: hivemind_build
+      artifacts: true
+#    - job: hivemind_sync
+#      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+    PYTHONUSERBASE: ./local-site
+  script:
+    - scripts/ci_start_server.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_HTTP_PORT
+  artifacts:
+    paths:
+      - hive_server.pid
+    expire_in: 1 week
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+
+  tags:
+     - hivemind
+
+hivemind_stop_server:
+  stage: post-deploy
+  environment:
+    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
+    action: stop
+  variables:
+    GIT_STRATEGY: none
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+  script:
+    - scripts/ci_stop_server.sh hive_server.pid
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  tags:
+     - hivemind
+  artifacts:
+    paths:
+      - hive_server.log
+
+.hivemind_start_api_smoketest: &common_api_smoketest_job
+  stage: e2e-test
+  environment: hive-4.pl.syncad.com
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+
+bridge_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_patterns/ api_smoketest_bridge.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_bridge.xml
+
+bridge_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_negative/ api_smoketest_bridge_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_bridge_negative.xml
+
+condenser_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_patterns/ api_smoketest_condenser_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_condenser_api.xml
+
+condenser_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_negative/ api_smoketest_condenser_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_condenser_api_negative.xml
+
+database_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_patterns/ api_smoketest_database_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_database_api.xml
+
+database_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_negative/ api_smoketest_database_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_database_api_negative.xml
+
+follow_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_patterns/ api_smoketest_follow_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest.xml
+
+follow_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_negative/ api_smoketest_follow_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_follow_api_negative.xml
+
+tags_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_patterns/ api_smoketest_tags_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_tags_api.xml
+
+tags_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml
+
+mock_tests:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" mock_tests/ api_smoketest_mock_tests.xml
+
+api_smoketest_benchmark:
+  stage: benchmark-tests
+  environment: hive-4.pl.syncad.com
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  allow_failure: true
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+  script:
+    - ./scripts/ci_start_api_benchmarks.sh localhost $HIVEMIND_HTTP_PORT 5
+  artifacts:
+    when: always
+    paths:
+      - tavern_benchmarks_report.html
diff --git a/scripts/ci/benchmark_generator.py b/scripts/ci/benchmark_generator.py
new file mode 100755
index 0000000000000000000000000000000000000000..e32358b5ab7d5ee4ed51f72adfb02b4b0c40b7be
--- /dev/null
+++ b/scripts/ci/benchmark_generator.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+
+from json import dumps
+
+def make_benchmark_header():
+    return """from requests import post
+from json import dumps
+def send_rpc_query(address, data):
+    response = post(address, data=data)
+    response_json = response.json()
+    return response_json
+    """
+
+def make_benchmark(test_name, address, test_payload):
+    return """
+def test_{}(benchmark):
+    response_json = benchmark(send_rpc_query, "{}", dumps({}))
+    error = response_json.get("error", None)
+    result = response_json.get("result", None)
+
+    assert error is not None or result is not None, "No error or result in response"
+    """.format(test_name, address, test_payload)
+
+def get_request_from_yaml(path_to_yaml):
+    import yaml
+    yaml_document = None
+    with open(path_to_yaml, "r") as yaml_file:
+        yaml_document = yaml.load(yaml_file, Loader=yaml.BaseLoader)
+    if "stages" in yaml_document:
+        if "request" in yaml_document["stages"][0]:
+            json_parameters = yaml_document["stages"][0]["request"].get("json", None)
+            assert json_parameters is not None, "Unable to find json parameters in request"
+            return dumps(json_parameters)
+    return None
+
+def make_test_name_from_path(test_path):
+    splited = test_path.split("/")
+    return ("_".join(splited[-3:])).replace(".", "_").replace("-", "_")
+
+def make_benchmark_test_file(file_name, address, tests_root_dir):
+    import os
+    from fnmatch import fnmatch
+
+    pattern = "*.tavern.yaml"
+    test_files = []
+    for path, subdirs, files in os.walk(tests_root_dir):
+        for name in files:
+            if fnmatch(name, pattern):
+                test_files.append(os.path.join(path, name))
+
+    with open(file_name, "w") as benchmarks_file:
+        benchmarks_file.write(make_benchmark_header())
+        for test_file in test_files:
+            test_name = make_test_name_from_path(test_file)
+            test_payload = get_request_from_yaml(test_file)
+            benchmarks_file.write(make_benchmark(test_name, address, test_payload))
+            benchmarks_file.write("\n")
+
+if __name__ == "__main__":
+    import argparse
+    parser = argparse.ArgumentParser()
+    parser.add_argument("path_to_test_dir", type=str, help = "Path to test directory for given xml file")
+    parser.add_argument("benchmark_test_file_name", type=str, help="Name of the generated test file")
+    parser.add_argument("target_ip_address", type=str, help="Address of the hivemind")
+    args = parser.parse_args()
+
+    make_benchmark_test_file(args.benchmark_test_file_name, args.target_ip_address, args.path_to_test_dir)
+
+
+
+
diff --git a/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-1.yaml b/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..77413c9ebc7574fdd538a932964b2c9833482bbd
--- /dev/null
+++ b/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-1.yaml
@@ -0,0 +1,42 @@
+stages:
+  - run
+
+variables:
+
+  GIT_DEPTH: 10
+
+  GIT_STRATEGY: fetch # It's quick, but noticed errors with that, sometimes.
+  # GIT_STRATEGY: clone
+  # GIT_STRATEGY: none
+
+  GIT_SUBMODULE_STRATEGY: recursive
+  MY_VARIABLE: "bamboo"
+
+default:
+  image: hivemind/python:3.6
+  interruptible: false
+  cache: &global-cache
+    # Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
+    # key: "$CI_COMMIT_REF_NAME"
+    # Per project caching – use any key.
+    # Change this key, if you need to clear cache.
+    key: common-1
+    paths:
+      - .cache/
+      - .venv/
+      - .tox/
+  before_script:
+    - echo "I am before_script in child-1. MY_VARIABLE is $MY_VARIABLE"
+  after_script:
+    - echo "I am after_script in in child-1. MY_VARIABLE is $MY_VARIABLE"
+
+child-1-job:
+  stage: run
+  rules:
+    - when: manual
+  script:
+    - echo "I am script in child-1-job. MY_VARIABLE is $MY_VARIABLE"
+    - sleep 30
+    - exit 1
+  tags:
+    - hivemind-light-job
diff --git a/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-2.yaml b/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..307f0f68ae46d93c61b42f787fa08fdacfa90521
--- /dev/null
+++ b/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-2.yaml
@@ -0,0 +1,38 @@
+stages:
+  - run
+
+variables:
+
+  GIT_DEPTH: 10
+
+  GIT_STRATEGY: fetch # It's quick, but noticed errors with that, sometimes.
+  # GIT_STRATEGY: clone
+  # GIT_STRATEGY: none
+
+  GIT_SUBMODULE_STRATEGY: recursive
+  MY_VARIABLE: "bamboo"
+
+default:
+  image: hivemind/python:3.6
+  interruptible: false
+  cache: &global-cache
+    # Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
+    # key: "$CI_COMMIT_REF_NAME"
+    # Per project caching – use any key.
+    # Change this key, if you need to clear cache.
+    key: common-1
+    paths:
+      - .cache/
+      - .venv/
+      - .tox/
+  before_script:
+    - echo "I am before_script in child-2. MY_VARIABLE is $MY_VARIABLE"
+  after_script:
+    - echo "I am after_script in child-2. MY_VARIABLE is $MY_VARIABLE"
+
+child-2-job:
+  stage: run
+  script:
+    - echo "I am script in child-2-job. MY_VARIABLE is $MY_VARIABLE"
+  tags:
+    - hivemind-light-job
diff --git a/scripts/ci/child-pipelines/.gitlab-ci-dynamic.yaml b/scripts/ci/child-pipelines/.gitlab-ci-dynamic.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3c3e2d4d40e89b93be32f43f84b3f2b08c750044
--- /dev/null
+++ b/scripts/ci/child-pipelines/.gitlab-ci-dynamic.yaml
@@ -0,0 +1,33 @@
+# See https://gitlab.com/fgrimshaw/dynamic-ci
+# See https://gitlab.com/gitlab-org/gitlab/-/issues/212373
+
+# I tested this feature, but our current version of Gitlab 13.2.2
+# doesn't support it well. Child pipelines run with no problem,
+# but UI displays wrong badges, for instance job was marked as
+# still running, though it was finished. Also jobs with rule
+# "when: manual" where started without user's permission.
+# We need to wait for better support in Gitlab UI.
+
+stages:
+  - run
+
+variables:
+  GIT_STRATEGY: none
+
+trigger-child-1:
+  stage: run
+  rules:
+    - if: '$CI_COMMIT_MESSAGE =~ /child-1/'
+      when: always
+  trigger:
+    include: .gitlab-ci-child-pipeline-1.yaml
+    strategy: depend
+
+trigger-child-2:
+  stage: run
+  rules:
+    - if: '$CI_COMMIT_MESSAGE =~ /child-2/'
+      when: always
+  trigger:
+    include: .gitlab-ci-child-pipeline-2.yaml
+    strategy: depend
diff --git a/scripts/ci/collect-db-stats.sh b/scripts/ci/collect-db-stats.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f957af71f858f8260c43837a84910913ed2e0eb4
--- /dev/null
+++ b/scripts/ci/collect-db-stats.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+set -euo pipefail
+
+collect_db_stats() {
+
+    echo "Collecting statistics from database ${HIVEMIND_DB_NAME}"
+
+    mkdir -p pg-stats
+    DIR=$PWD/pg-stats
+
+    PGPASSWORD=${RUNNER_POSTGRES_APP_USER_PASSWORD} psql \
+        --username "${RUNNER_POSTGRES_APP_USER=}" \
+        --host ${RUNNER_POSTGRES_HOST} \
+        --port ${RUNNER_POSTGRES_PORT} \
+        --dbname ${HIVEMIND_DB_NAME} << EOF
+\timing
+\copy (select * from pg_settings) to '$DIR/pg_settings.csv' WITH CSV HEADER
+\copy (select * from pg_stat_user_tables) to '$DIR/pg_stat_user_tables.csv' WITH CSV HEADER
+
+-- Disabled, because this table is too big.
+-- \copy (select * from pg_stat_statements) to '$DIR/pg_stat_statements.csv' WITH CSV HEADER
+
+/*
+-- Looks to be unuseful.
+-- See https://github.com/powa-team/pg_qualstats
+\echo pg_qualstats index advisor
+SELECT v
+  FROM json_array_elements(
+    pg_qualstats_index_advisor(min_filter => 50)->'indexes') v
+  ORDER BY v::text COLLATE "C";
+
+\echo pg_qualstats unoptimised
+SELECT v
+  FROM json_array_elements(
+    pg_qualstats_index_advisor(min_filter => 50)->'unoptimised') v
+  ORDER BY v::text COLLATE "C";
+*/
+EOF
+
+}
+
+collect_db_stats
diff --git a/scripts/ci/create-db.sh b/scripts/ci/create-db.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6f3eacc175911e73c01701e0ba9fe7cfd6f69f8e
--- /dev/null
+++ b/scripts/ci/create-db.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+
+set -euo pipefail
+
+create_db() {
+
+    echo "Creating user ${RUNNER_POSTGRES_APP_USER} and database ${HIVEMIND_DB_NAME}, owned by this user"
+
+    TEMPLATE="template_monitoring"
+
+    PGPASSWORD=${RUNNER_POSTGRES_ADMIN_USER_PASSWORD} psql \
+        --username "${RUNNER_POSTGRES_ADMIN_USER}" \
+        --host ${RUNNER_POSTGRES_HOST} \
+        --port ${RUNNER_POSTGRES_PORT} \
+        --dbname postgres << EOF
+
+\echo Creating role ${RUNNER_POSTGRES_APP_USER}
+
+DO \$$
+BEGIN
+    IF EXISTS (SELECT * FROM pg_user
+            WHERE pg_user.usename = '${RUNNER_POSTGRES_APP_USER}') THEN
+        raise warning 'Role % already exists', '${RUNNER_POSTGRES_APP_USER}';
+    ELSE
+        CREATE ROLE ${RUNNER_POSTGRES_APP_USER}
+                WITH LOGIN PASSWORD '${RUNNER_POSTGRES_APP_USER_PASSWORD}';
+    END IF;
+END
+\$$;
+
+\echo Creating database ${HIVEMIND_DB_NAME}
+CREATE DATABASE ${HIVEMIND_DB_NAME} TEMPLATE ${TEMPLATE}
+    OWNER ${RUNNER_POSTGRES_APP_USER};
+COMMENT ON DATABASE ${HIVEMIND_DB_NAME} IS
+    'Database for Gitlab CI pipeline ${CI_PIPELINE_URL}, commit ${CI_COMMIT_SHORT_SHA}';
+
+\c ${HIVEMIND_DB_NAME}
+
+drop schema if exists hivemind_admin cascade;
+
+create schema hivemind_admin
+        authorization ${RUNNER_POSTGRES_APP_USER};
+
+CREATE SEQUENCE hivemind_admin.database_metadata_id_seq
+    INCREMENT 1
+    START 1
+    MINVALUE 1
+    MAXVALUE 2147483647
+    CACHE 1;
+
+CREATE TABLE hivemind_admin.database_metadata
+(
+    id integer NOT NULL DEFAULT
+        nextval('hivemind_admin.database_metadata_id_seq'::regclass),
+    database_name text,
+    ci_pipeline_url text,
+    ci_pipeline_id integer,
+    commit_sha text,
+    created_at timestamp with time zone DEFAULT now(),
+    CONSTRAINT database_metadata_pkey PRIMARY KEY (id)
+);
+
+alter sequence hivemind_admin.database_metadata_id_seq
+        OWNER TO ${RUNNER_POSTGRES_APP_USER};
+
+alter table hivemind_admin.database_metadata
+        OWNER TO ${RUNNER_POSTGRES_APP_USER};
+
+insert into hivemind_admin.database_metadata
+    (database_name, ci_pipeline_url, ci_pipeline_id, commit_sha)
+values (
+    '${HIVEMIND_DB_NAME}', '${CI_PIPELINE_URL}',
+    ${CI_PIPELINE_ID}, '${CI_COMMIT_SHORT_SHA}'
+    );
+
+-- VACUUM VERBOSE ANALYZE;
+
+\q
+EOF
+
+}
+
+create_db
diff --git a/scripts/ci/drop-db.sh b/scripts/ci/drop-db.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f3d928b5e1958f711bdb73d6ab757b14f5095549
--- /dev/null
+++ b/scripts/ci/drop-db.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# set -euo pipefail
+
+drop_db() {
+
+    echo "Dropping database ${HIVEMIND_DB_NAME}"
+
+    PGPASSWORD=${RUNNER_POSTGRES_ADMIN_USER_PASSWORD} dropdb \
+        --if-exists \
+        --username "${RUNNER_POSTGRES_ADMIN_USER}" \
+        --host ${RUNNER_POSTGRES_HOST} \
+        --port ${RUNNER_POSTGRES_PORT} \
+        ${HIVEMIND_DB_NAME}
+
+    RESULT=$?
+
+    if [[ ! $RESULT -eq 0 ]]; then
+        cat << EOF
+ERROR: cannot drop database ${HIVEMIND_DB_NAME}.
+Most often the reason is that database is used by other sessions.
+This can happen on Gitlab CI server, when jobs are picked by multiple,
+concurrent runners and database name is not unique on subsequent
+pipelines. If this is the case, please cancel any pending pipelines
+running for your branch or for your merge request, or wait until they
+finish. Then retry this pipeline.
+Exiting with error at this moment.
+EOF
+    exit $RESULT
+    else
+        echo "Database ${HIVEMIND_DB_NAME} has been dropped successfully"
+    fi
+
+}
+
+drop_db
diff --git a/scripts/ci/dump-db.sh b/scripts/ci/dump-db.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2b9f1c31e8e54d4db042dcc2ee54333c73262b60
--- /dev/null
+++ b/scripts/ci/dump-db.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+set -euo pipefail
+
+dump_db() {
+    echo "Dumping database ${HIVEMIND_DB_NAME}"
+
+    export PGPASSWORD=${RUNNER_POSTGRES_APP_USER_PASSWORD}
+    exec_path=$POSTGRES_CLIENT_TOOLS_PATH/$POSTGRES_MAJOR_VERSION/bin
+
+    echo "Using pg_dump version $($exec_path/pg_dump --version)"
+
+    time $exec_path/pg_dump \
+        --username="${RUNNER_POSTGRES_APP_USER}" \
+        --host="${RUNNER_POSTGRES_HOST}" \
+        --port="${RUNNER_POSTGRES_PORT}" \
+        --dbname="${HIVEMIND_DB_NAME}" \
+        --schema=public \
+        --format=directory \
+        --jobs=4 \
+        --compress=6 \
+        --quote-all-identifiers \
+        --lock-wait-timeout=30000 \
+        --no-privileges --no-acl \
+        --verbose \
+        --file="pg-dump-${HIVEMIND_DB_NAME}"
+
+    unset PGPASSWORD
+}
+
+dump_db
diff --git a/scripts/ci/get-postgres-version.sh b/scripts/ci/get-postgres-version.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4b0e05dd386785784b52f69ca391b36aed9a30b3
--- /dev/null
+++ b/scripts/ci/get-postgres-version.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# Get postgresql server version
+
+set -euo pipefail
+
+get_postgres_version() {
+    # Get major version of postgres server.
+    version=$(
+        PGPASSWORD=$RUNNER_POSTGRES_APP_USER_PASSWORD psql -X -A -t \
+            --username $RUNNER_POSTGRES_APP_USER \
+            --host $RUNNER_POSTGRES_HOST \
+            --port ${RUNNER_POSTGRES_PORT} \
+            --dbname postgres \
+            -c "show server_version_num;"
+        )
+    echo $(echo $version | cut -c1-2)
+}
+
+get_postgres_version
diff --git a/scripts/ci/goodies/.gitlab-ci-goodies.yaml b/scripts/ci/goodies/.gitlab-ci-goodies.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..535d4e9542daff911c4baf56f3c76deeed946218
--- /dev/null
+++ b/scripts/ci/goodies/.gitlab-ci-goodies.yaml
@@ -0,0 +1,154 @@
+# Useful snippets for Gitlab CI, but not used currently.
+
+.postgres-10: &postgres-10
+  name: hivemind/postgres:10
+  alias: db
+  command: [
+      "postgres",
+      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
+      "-c", "track_functions=pl",
+      "-c", "track_io_timing=on",
+      "-c", "track_activity_query_size=2048",
+      "-c", "pg_stat_statements.max=10000",
+      "-c", "pg_stat_statements.track=all",
+      "-c", "max_connections=100",
+      "-c", "shared_buffers=2GB",
+      "-c", "effective_cache_size=6GB",
+      "-c", "maintenance_work_mem=512MB",
+      "-c", "checkpoint_completion_target=0.9",
+      "-c", "wal_buffers=16MB",
+      "-c", "default_statistics_target=100",
+      "-c", "random_page_cost=1.1",
+      "-c", "effective_io_concurrency=200",
+      "-c", "work_mem=5242kB",
+      "-c", "min_wal_size=2GB",
+      "-c", "max_wal_size=8GB",
+      "-c", "max_worker_processes=4",
+      "-c", "max_parallel_workers_per_gather=2",
+      "-c", "max_parallel_workers=4",
+      ]
+
+.postgres-12: &postgres-12
+  name: hivemind/postgres:12
+  alias: db
+  command: [
+      "postgres",
+      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
+      "-c", "track_functions=pl",
+      "-c", "track_io_timing=on",
+      "-c", "track_activity_query_size=2048",
+      "-c", "pg_stat_statements.max=10000",
+      "-c", "pg_stat_statements.track=all",
+      "-c", "max_connections=100",
+      "-c", "shared_buffers=2GB",
+      "-c", "effective_cache_size=6GB",
+      "-c", "maintenance_work_mem=512MB",
+      "-c", "checkpoint_completion_target=0.9",
+      "-c", "wal_buffers=16MB",
+      "-c", "default_statistics_target=100",
+      "-c", "random_page_cost=1.1",
+      "-c", "effective_io_concurrency=200",
+      "-c", "work_mem=5242kB",
+      "-c", "min_wal_size=2GB",
+      "-c", "max_wal_size=8GB",
+      "-c", "max_worker_processes=4",
+      "-c", "max_parallel_workers_per_gather=2",
+      "-c", "max_parallel_workers=4",
+      ]
+
+.setup-setuptools: &setup-setuptools
+  - python -m venv .venv
+  - source .venv/bin/activate
+  - time pip install --upgrade pip setuptools wheel
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+  - time python setup.py develop
+
+.setup-setuptools-no-venv: &setup-setuptools-no-venv
+  # No virtual environment here.
+  # Setuptools will install all dependencies to PYTHONUSERBASE directory.
+  - export PYTHONUSERBASE=./local-site
+  - time pip install --upgrade pip setuptools wheel
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+  - mkdir -p `python -m site --user-site`
+  - python setup.py install --user --force
+  - ln -sf ./local-site/bin/hive "$HIVEMIND_EXEC_NAME"
+
+.setup-pipenv: &setup-pipenv
+  ## Note, that Pipfile must exist.
+  ## `--sequential` is slower, but doesn't emit messages about errors
+  ## and need to repeat install.
+  ## - pipenv sync --dev --bare --sequential
+  ## It's faster than `--sequential`, but emits messages about errors
+  ## and a need to repeat install, sometimes. However seems these
+  ## errors are negligible.
+  - time pipenv sync --dev --bare
+  - source .venv/bin/activate
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+
+
+##### Jobs #####
+
+.build-egg:
+  stage: build
+  needs: []
+  script:
+    - python setup.py bdist_egg
+    - ls -l dist/*
+  artifacts:
+    paths:
+      - dist/
+    expire_in: 7 days
+  tags:
+    - hivemind-light-job
+
+.build-wheel:
+  stage: build
+  needs: []
+  script:
+    - python setup.py bdist_wheel
+    - ls -l dist/*
+  artifacts:
+    paths:
+      - dist/
+    expire_in: 7 days
+  tags:
+    - hivemind-light-job
+
+# Postgres as docker service
+.hivemind-sync-postgres-as-service:
+  # <<: *default-rules
+  stage: data-supply
+  services:
+    - *postgres-10
+    # - *postgres-12
+  needs: []
+  script:
+    # - *hive-sync-script-common
+    # - ./scripts/ci/dump-db.sh
+  artifacts:
+    paths:
+      - hivemind-sync.log
+      - pg-stats
+      - pg-dump-${HIVEMIND_DB_NAME}
+    expire_in: 7 hours
+  tags:
+    - hivemind-heavy-job
+
+# Test job doing nothing (for debugging CI)
+.just-a-test:
+  stage: e2e-test
+  extends: .e2e-test-common
+  script:
+    - echo "Run some tests"
diff --git a/scripts/ci/hive-server.sh b/scripts/ci/hive-server.sh
new file mode 100755
index 0000000000000000000000000000000000000000..96ce0766e4f4641ddd0cebf0253cd1ed5a4b1974
--- /dev/null
+++ b/scripts/ci/hive-server.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+
+# TODO This script needs review.
+
+set -euo pipefail
+
+JOB=$1
+HIVEMIND_PID=0
+MERCY_KILL_TIMEOUT=5
+START_DELAY=5
+
+check_pid() {
+  if [ -f hive_server.pid ]; then
+    HIVEMIND_PID=`cat hive_server.pid`
+    if ps -p $HIVEMIND_PID > /dev/null
+    then
+      # Process is running
+      echo "Process pid $HIVEMIND_PID is running"
+    else
+      # Process is not running
+      echo "Process pid $HIVEMIND_PID is not running"
+      rm hive_server.pid
+      HIVEMIND_PID=0
+    fi
+  else
+    HIVEMIND_PID=0
+  fi
+}
+
+stop() {
+  if [ "$HIVEMIND_PID" -gt 0 ]; then
+    HIVEMIND_PID=`cat hive_server.pid`
+
+    # Send INT signal and give it some time to stop.
+    echo "Stopping hive server (pid $HIVEMIND_PID) gently (SIGINT)"
+    kill -SIGINT $HIVEMIND_PID || true;
+    sleep $MERCY_KILL_TIMEOUT
+
+    # Send TERM signal. Kill to be sure.
+    echo "Killing hive server (pid $HIVEMIND_PID) to be sure (SIGTERM)"
+    kill -9 $HIVEMIND_PID > /dev/null 2>&1 || true;
+
+    rm hive_server.pid;
+    echo "Hive server has been stopped"
+  else
+    echo "Hive server is not running"
+  fi
+}
+
+start() {
+
+  if [ "$HIVEMIND_PID" -gt 0 ]; then
+    echo "Hive server is already running (pid $HIVEMIND_PID)"
+    exit 0
+  fi
+
+  echo "Starting hive server on port ${RUNNER_HIVEMIND_SERVER_HTTP_PORT}"
+
+  USER=${RUNNER_POSTGRES_APP_USER}:${RUNNER_POSTGRES_APP_USER_PASSWORD}
+  OPTIONS="host=${RUNNER_POSTGRES_HOST}&port=${RUNNER_POSTGRES_PORT}"
+  DATABASE_URL="postgresql://${USER}@/${HIVEMIND_DB_NAME}?${OPTIONS}"
+
+  hive server \
+      --log-mask-sensitive-data \
+      --pid-file hive_server.pid \
+      --http-server-port ${RUNNER_HIVEMIND_SERVER_HTTP_PORT} \
+      --steemd-url "${RUNNER_HIVED_URL}" \
+      --database-url "${DATABASE_URL}" \
+      > hivemind-server.log 2>&1 &
+
+  HIVEMIND_PID=$!
+
+  for i in `seq 1 10`; do
+    if [ -f hive_server.pid ]; then
+      echo "Starting hive server (pid $HIVEMIND_PID)"
+      # Wait some time to allow its initialization.
+      sleep $START_DELAY
+      # Check if process is still running.
+      if ps -p $HIVEMIND_PID > /dev/null
+      then
+        echo "Hive server is running (pid $HIVEMIND_PID)"
+        # Write pid to file, sometimes there's wrong pid there.
+        echo $HIVEMIND_PID > hive_server.pid
+        exit 0
+      else
+        # Check if process executed successfully or not.
+        if wait $HIVEMIND_PID; then
+          echo "Hive server has been started (pid $HIVEMIND_PID)"
+          echo $HIVEMIND_PID > hive_server.pid
+          exit 0
+        else
+          RESULT=$?
+          echo "Hive server terminated abnormally (returned $RESULT)"
+          rm hive_server.pid;
+          exit $RESULT
+        fi
+      fi
+    else
+      sleep 1
+    fi
+  done
+
+  # If we are here, something went wrong.
+  echo "Timeout reached. Hive server has not been started, exiting."
+  rm hive_server.pid;
+  exit 1
+
+}
+
+
+main() {
+  check_pid
+  if [ "$JOB" = "start" ]; then
+    start
+  elif [ "$JOB" = "stop" ]; then
+    stop
+  else
+    echo "Invalid argument"
+    exit 1
+  fi
+}
+
+main
diff --git a/scripts/ci/hive-sync.sh b/scripts/ci/hive-sync.sh
new file mode 100755
index 0000000000000000000000000000000000000000..eadeab5aaceac4b3f5eb6183dd169b193d334f19
--- /dev/null
+++ b/scripts/ci/hive-sync.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# For debug only!
+# RUNNER_HIVEMIND_SYNC_MAX_BLOCK=10000
+# RUNNER_HIVED_URL='{"default":"http://hived-node:8091"}'
+# RUNNER_HIVED_URL='{"default":"http://172.17.0.1:8091"}'
+
+hive_sync() {
+    # Start hive sync process
+
+    cat << EOF
+Starting hive sync using hived url: ${RUNNER_HIVED_URL}.
+Max sync block is: ${RUNNER_HIVEMIND_SYNC_MAX_BLOCK}.
+EOF
+
+    USER=${RUNNER_POSTGRES_APP_USER}:${RUNNER_POSTGRES_APP_USER_PASSWORD}
+    OPTIONS="host=${RUNNER_POSTGRES_HOST}&port=${RUNNER_POSTGRES_PORT}"
+    DATABASE_URL="postgresql://${USER}@/${HIVEMIND_DB_NAME}?${OPTIONS}"
+
+    hive sync \
+        --log-mask-sensitive-data \
+        --pid-file hive_sync.pid \
+        --test-max-block=${RUNNER_HIVEMIND_SYNC_MAX_BLOCK} \
+        --test-profile=False \
+        --steemd-url "${RUNNER_HIVED_URL}" \
+        --prometheus-port 11011 \
+        --database-url "${DATABASE_URL}" \
+        --mock-block-data-path mock_data/block_data/follow_op/mock_block_data_follow.json \
+          mock_data/block_data/community_op/mock_block_data_community.json \
+          mock_data/block_data/reblog_op/mock_block_data_reblog.json \
+        --community-start-block 4999998 \
+        2>&1 | tee -i hivemind-sync.log
+
+}
+
+hive_sync
diff --git a/scripts/ci/hived-node/config.ini b/scripts/ci/hived-node/config.ini
new file mode 100644
index 0000000000000000000000000000000000000000..ecdc18b6d0c2896ed4dfc68928b1b93fe65ac1ef
--- /dev/null
+++ b/scripts/ci/hived-node/config.ini
@@ -0,0 +1,57 @@
+
+log-appender = {"appender":"stderr","stream":"std_error"}
+log-logger = {"name":"default","level":"info","appender":"stderr"}
+
+backtrace = yes
+
+plugin = webserver p2p json_rpc
+plugin = database_api
+# condenser_api enabled per abw request
+plugin = condenser_api
+plugin = block_api
+# gandalf enabled witness + rc
+plugin = witness
+plugin = rc
+
+# market_history enabled per abw request
+plugin = market_history
+plugin = market_history_api
+
+plugin = account_history_rocksdb
+plugin = account_history_api
+
+# gandalf enabled transaction status
+plugin = transaction_status
+plugin = transaction_status_api
+
+# gandalf enabled account by key
+plugin = account_by_key
+plugin = account_by_key_api
+
+# and few apis
+plugin = block_api network_broadcast_api rc_api
+
+history-disable-pruning = 1
+account-history-rocksdb-path = "blockchain/account-history-rocksdb-storage"
+
+# shared-file-dir = "/run/hive"
+shared-file-size = 20G
+shared-file-full-threshold = 9500
+shared-file-scale-rate = 1000
+
+flush-state-interval = 0
+
+market-history-bucket-size = [15,60,300,3600,86400]
+market-history-buckets-per-size = 5760
+
+p2p-endpoint = 0.0.0.0:2001
+p2p-seed-node =
+# gtg.openhive.network:2001
+
+transaction-status-block-depth = 64000
+transaction-status-track-after-block = 42000000
+
+webserver-http-endpoint = 0.0.0.0:8091
+webserver-ws-endpoint = 0.0.0.0:8090
+
+webserver-thread-pool-size = 8
diff --git a/scripts/ci/hived-node/entrypoint.sh b/scripts/ci/hived-node/entrypoint.sh
new file mode 100755
index 0000000000000000000000000000000000000000..cd0c81a5c8ff1a9d901deb20ba83403dcfa1eaf0
--- /dev/null
+++ b/scripts/ci/hived-node/entrypoint.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+SCRIPT=`realpath $0`
+SCRIPTPATH=`dirname $SCRIPT`
+
+DATADIR="${SCRIPTPATH}/datadir"
+
+HIVED="${SCRIPTPATH}/bin/hived"
+
+ARGS="$@"
+ARGS+=" "
+
+if [[ ! -z "$TRACK_ACCOUNT" ]]; then
+    ARGS+=" --plugin=account_history --plugin=account_history_api"
+    ARGS+=" --account-history-track-account-range=[\"$TRACK_ACCOUNT\",\"$TRACK_ACCOUNT\"]"
+fi
+
+if [[ "$USE_PUBLIC_BLOCKLOG" ]]; then
+  if [[ ! -e ${DATADIR}/blockchain/block_log ]]; then
+    if [[ ! -d ${DATADIR}/blockchain ]]; then
+      mkdir -p ${DATADIR}/blockchain
+    fi
+    echo "Hived: Downloading a block_log and replaying the blockchain"
+    echo "This may take a little while..."
+    wget -O ${DATADIR}/blockchain/block_log https://gtg.steem.house/get/blockchain/block_log
+    ARGS+=" --replay-blockchain"
+  fi
+fi
+
+"$HIVED" \
+  --data-dir="${DATADIR}" \
+  $ARGS \
+  2>&1
diff --git a/scripts/ci/hived-node/run.sh b/scripts/ci/hived-node/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..32841a9ae109d86fe254b38734710d09b95a9c88
--- /dev/null
+++ b/scripts/ci/hived-node/run.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+# Start hived in docker container, replay up to 5000000 blocks
+
+MYDIR="$PWD"
+WORKDIR="/usr/local/hive/consensus"
+IMAGE="registry.gitlab.syncad.com/hive/hive/consensus_node:00b5ff55"
+
+docker run -d \
+    --name hived-replay-5000000 \
+    -p 127.0.0.1:2001:2001 \
+    -p 127.0.0.1:8090:8090 \
+    -p 127.0.0.1:8091:8091 \
+    -v $MYDIR/config.ini:$WORKDIR/datadir/config.ini \
+    -v $MYDIR/blockchain/block_log:$WORKDIR/datadir/blockchain/block_log \
+    -v $MYDIR/entrypoint.sh:$WORKDIR/entrypoint.sh \
+    --entrypoint $WORKDIR/entrypoint.sh \
+    $IMAGE \
+    --replay-blockchain --stop-replay-at-block 5000000
diff --git a/scripts/ci/json_report_parser.py b/scripts/ci/json_report_parser.py
new file mode 100755
index 0000000000000000000000000000000000000000..38ff91a359915b84ccc9a2612aa0f7be75964aef
--- /dev/null
+++ b/scripts/ci/json_report_parser.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+""" Parse json file generated by pytest benchmarks and create htm report file
+    for files exceeding expected threshold print information to the console
+"""
+
+import os
+from sys import exit
+from json import dumps, load
+
+def get_request_from_yaml(path_to_yaml):
+    """ Extract request parameters from given yaml file
+    Parameters:
+    - path_to_yaml - path to yaml file
+    Returns:
+    - string with request parameters
+    """
+    import yaml
+    yaml_document = None
+    with open(path_to_yaml, "r") as yaml_file:
+        yaml_document = yaml.load(yaml_file, Loader=yaml.BaseLoader)
+    if "stages" in yaml_document:
+        if "request" in yaml_document["stages"][0]:
+            json_parameters = yaml_document["stages"][0]["request"].get("json", None)
+            assert json_parameters is not None, "Unable to find json parameters in request"
+            return dumps(json_parameters)
+    return ""
+
+def make_class_path_dict(root_dir):
+    """ Scan root dir for files with given pattern and construct dictionary
+    with keys as path with replaced ., -, / characters and values as file path
+    Parameters:
+    - root_dir - dir to scan for files
+    Returns:
+    - dict class_name -> path
+    """
+    from fnmatch import fnmatch
+
+    pattern = "*.tavern.yaml"
+
+    ret = {}
+
+    for path, _, files in os.walk(root_dir):
+        for name in files:
+            if fnmatch(name, pattern):
+                test_path = os.path.join(path, name)
+                ret[test_path.replace(".", "_").replace("-", "_").replace("/", "_")] = test_path
+    return ret
+
+def class_to_path(class_name, class_to_path_dic):
+    """ Return path to test file basing on class name
+    Parameters:
+    - class_name - test to find,
+    - class_to_path_dic - dict with class -> path key/values
+    Return:
+    - path to test file
+    """
+    from fnmatch import fnmatch
+    for c, p in class_to_path_dic.items():
+        if fnmatch(c, "*" + class_name):
+            return p
+    return None
+
+def json_report_parser(path_to_test_dir, json_file, time_threshold=1.0):
+    above_treshold = []
+    html_file, _ = os.path.splitext(json_file)
+    html_file = "tavern_report_" + html_file + ".html"
+    class_to_path_dic = make_class_path_dict(path_to_test_dir)
+    with open(html_file, "w") as ofile:
+        ofile.write("<html>\n")
+        ofile.write("  <head>\n")
+        ofile.write("    <style>\n")
+        ofile.write("      table, th, td {\n")
+        ofile.write("        border: 1px solid black;\n")
+        ofile.write("        border-collapse: collapse;\n")
+        ofile.write("      }\n")
+        ofile.write("      th, td {\n")
+        ofile.write("        padding: 15px;\n")
+        ofile.write("      }\n")
+        ofile.write("    </style>\n")
+        ofile.write("  </head>\n")
+        ofile.write("  <body>\n")
+        ofile.write("    <table>\n")
+        ofile.write("      <tr><th>Test name</th><th>Min time [ms]</th><th>Max time [ms]</th><th>Mean time [ms]</th></tr>\n")
+        json_data = None
+        with open(json_file, "r") as json_file:
+            json_data = load(json_file)
+        for benchmark in json_data['benchmarks']:
+            if float(benchmark['stats']['mean']) > time_threshold:
+                ofile.write("      <tr><td>{}<br/>Parameters: {}</td><td>{:.4f}</td><td>{:.4f}</td><td bgcolor=\"red\">{:.4f}</td></tr>\n".format(benchmark['name'], get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic)), benchmark['stats']['min'] * 1000, benchmark['stats']['max'] * 1000, benchmark['stats']['mean'] * 1000))
+                above_treshold.append((benchmark['name'], "{:.4f}".format(benchmark['stats']['mean'] * 1000), get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic))))
+            else:
+                ofile.write("      <tr><td>{}</td><td>{:.4f}</td><td>{:.4f}</td><td>{:.4f}</td></tr>\n".format(benchmark['name'], benchmark['stats']['min'] * 1000, benchmark['stats']['max'] * 1000, benchmark['stats']['mean'] * 1000))
+        ofile.write("    </table>\n")
+        ofile.write("  </body>\n")
+        ofile.write("</html>\n")
+    return above_treshold
+
+if __name__ == '__main__':
+    import argparse
+    parser = argparse.ArgumentParser()
+    parser.add_argument("path_to_test_dir", type = str, help = "Path to test directory for given json benchmark file")
+    parser.add_argument("json_file", type = str, help = "Path to benchmark json file")
+    parser.add_argument("--time-threshold", dest="time_threshold", type=float, default=1.0, help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.")
+    args = parser.parse_args()
+
+    if not json_report_parser(args.path_to_test_dir, args.json_file, args.time_threshold):
+        exit(1)
+    exit(0)
+
diff --git a/scripts/ci/postgres/10/Dockerfile b/scripts/ci/postgres/10/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..baf6ce22167ddc3da7d923fb2820c387136d49b5
--- /dev/null
+++ b/scripts/ci/postgres/10/Dockerfile
@@ -0,0 +1,18 @@
+FROM postgres:10.14
+
+LABEL description="Available non-standard extensions: plpython2, pg_qualstats."
+
+RUN apt-get update \
+        && apt-get install -y --no-install-recommends \
+            nano \
+            postgresql-plpython3-10 \
+            python3-psutil \
+            postgresql-10-pg-qualstats \
+        && rm -rf /var/lib/apt/lists/*
+
+RUN mkdir -p /docker-entrypoint-initdb.d
+
+# Create stuff for monitoring with pgwatch2 and pghero.
+COPY ./scripts/db-monitoring/setup/setup_monitoring.sh \
+        /docker-entrypoint-initdb.d/
+COPY ./scripts/db-monitoring/setup/sql-monitoring /sql-monitoring/
diff --git a/scripts/ci/postgres/10/postgresql.conf b/scripts/ci/postgres/10/postgresql.conf
new file mode 100644
index 0000000000000000000000000000000000000000..3c9abc6a6f68fae32f429070a42b65a4e7311993
--- /dev/null
+++ b/scripts/ci/postgres/10/postgresql.conf
@@ -0,0 +1,686 @@
+# -----------------------------
+# PostgreSQL configuration file
+# -----------------------------
+#
+# This file consists of lines of the form:
+#
+#   name = value
+#
+# (The "=" is optional.)  Whitespace may be used.  Comments are introduced with
+# "#" anywhere on a line.  The complete list of parameter names and allowed
+# values can be found in the PostgreSQL documentation.
+#
+# The commented-out settings shown in this file represent the default values.
+# Re-commenting a setting is NOT sufficient to revert it to the default value;
+# you need to reload the server.
+#
+# This file is read on server startup and when the server receives a SIGHUP
+# signal.  If you edit the file on a running system, you have to SIGHUP the
+# server for the changes to take effect, run "pg_ctl reload", or execute
+# "SELECT pg_reload_conf()".  Some parameters, which are marked below,
+# require a server shutdown and restart to take effect.
+#
+# Any parameter can also be given as a command-line option to the server, e.g.,
+# "postgres -c log_connections=on".  Some parameters can be changed at run time
+# with the "SET" SQL command.
+#
+# Memory units:  kB = kilobytes        Time units:  ms  = milliseconds
+#                MB = megabytes                     s   = seconds
+#                GB = gigabytes                     min = minutes
+#                TB = terabytes                     h   = hours
+#                                                   d   = days
+
+
+#------------------------------------------------------------------------------
+# FILE LOCATIONS
+#------------------------------------------------------------------------------
+
+# The default values of these variables are driven from the -D command-line
+# option or PGDATA environment variable, represented here as ConfigDir.
+
+#data_directory = 'ConfigDir'		# use data in another directory
+					# (change requires restart)
+#hba_file = 'ConfigDir/pg_hba.conf'	# host-based authentication file
+					# (change requires restart)
+#ident_file = 'ConfigDir/pg_ident.conf'	# ident configuration file
+					# (change requires restart)
+
+# If external_pid_file is not explicitly set, no extra PID file is written.
+#external_pid_file = ''			# write an extra PID file
+					# (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONNECTIONS AND AUTHENTICATION
+#------------------------------------------------------------------------------
+
+# - Connection Settings -
+
+listen_addresses = '*'
+					# comma-separated list of addresses;
+					# defaults to 'localhost'; use '*' for all
+					# (change requires restart)
+#port = 5432				# (change requires restart)
+max_connections = 100			# (change requires restart)
+#superuser_reserved_connections = 3	# (change requires restart)
+#unix_socket_directories = '/var/run/postgresql'	# comma-separated list of directories
+					# (change requires restart)
+#unix_socket_group = ''			# (change requires restart)
+#unix_socket_permissions = 0777		# begin with 0 to use octal notation
+					# (change requires restart)
+#bonjour = off				# advertise server via Bonjour
+					# (change requires restart)
+#bonjour_name = ''			# defaults to the computer name
+					# (change requires restart)
+
+# - Security and Authentication -
+
+#authentication_timeout = 1min		# 1s-600s
+#ssl = off
+#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
+#ssl_prefer_server_ciphers = on
+#ssl_ecdh_curve = 'prime256v1'
+#ssl_dh_params_file = ''
+#ssl_cert_file = 'server.crt'
+#ssl_key_file = 'server.key'
+#ssl_ca_file = ''
+#ssl_crl_file = ''
+#password_encryption = md5		# md5 or scram-sha-256
+#db_user_namespace = off
+#row_security = on
+
+# GSSAPI using Kerberos
+#krb_server_keyfile = ''
+#krb_caseins_users = off
+
+# - TCP Keepalives -
+# see "man 7 tcp" for details
+
+#tcp_keepalives_idle = 0		# TCP_KEEPIDLE, in seconds;
+					# 0 selects the system default
+#tcp_keepalives_interval = 0		# TCP_KEEPINTVL, in seconds;
+					# 0 selects the system default
+#tcp_keepalives_count = 0		# TCP_KEEPCNT;
+					# 0 selects the system default
+
+
+#------------------------------------------------------------------------------
+# RESOURCE USAGE (except WAL)
+#------------------------------------------------------------------------------
+
+# - Memory -
+
+shared_buffers = 128MB			# min 128kB
+					# (change requires restart)
+#huge_pages = try			# on, off, or try
+					# (change requires restart)
+#temp_buffers = 8MB			# min 800kB
+#max_prepared_transactions = 0		# zero disables the feature
+					# (change requires restart)
+# Caution: it is not advisable to set max_prepared_transactions nonzero unless
+# you actively intend to use prepared transactions.
+#work_mem = 4MB				# min 64kB
+#maintenance_work_mem = 64MB		# min 1MB
+#replacement_sort_tuples = 150000	# limits use of replacement selection sort
+#autovacuum_work_mem = -1		# min 1MB, or -1 to use maintenance_work_mem
+#max_stack_depth = 2MB			# min 100kB
+dynamic_shared_memory_type = posix	# the default is the first option
+					# supported by the operating system:
+					#   posix
+					#   sysv
+					#   windows
+					#   mmap
+					# use none to disable dynamic shared memory
+					# (change requires restart)
+
+# - Disk -
+
+#temp_file_limit = -1			# limits per-process temp file space
+					# in kB, or -1 for no limit
+
+# - Kernel Resource Usage -
+
+#max_files_per_process = 1000		# min 25
+					# (change requires restart)
+#shared_preload_libraries = ''		# (change requires restart)
+
+# - Cost-Based Vacuum Delay -
+
+#vacuum_cost_delay = 0			# 0-100 milliseconds
+#vacuum_cost_page_hit = 1		# 0-10000 credits
+#vacuum_cost_page_miss = 10		# 0-10000 credits
+#vacuum_cost_page_dirty = 20		# 0-10000 credits
+#vacuum_cost_limit = 200		# 1-10000 credits
+
+# - Background Writer -
+
+#bgwriter_delay = 200ms			# 10-10000ms between rounds
+#bgwriter_lru_maxpages = 100		# 0-1000 max buffers written/round
+#bgwriter_lru_multiplier = 2.0		# 0-10.0 multiplier on buffers scanned/round
+#bgwriter_flush_after = 512kB		# measured in pages, 0 disables
+
+# - Asynchronous Behavior -
+
+#effective_io_concurrency = 1		# 1-1000; 0 disables prefetching
+#max_worker_processes = 8		# (change requires restart)
+#max_parallel_workers_per_gather = 2	# taken from max_parallel_workers
+#max_parallel_workers = 8		# maximum number of max_worker_processes that
+					# can be used in parallel queries
+#old_snapshot_threshold = -1		# 1min-60d; -1 disables; 0 is immediate
+					# (change requires restart)
+#backend_flush_after = 0		# measured in pages, 0 disables
+
+
+#------------------------------------------------------------------------------
+# WRITE AHEAD LOG
+#------------------------------------------------------------------------------
+
+# - Settings -
+
+#wal_level = replica			# minimal, replica, or logical
+					# (change requires restart)
+#fsync = on				# flush data to disk for crash safety
+					# (turning this off can cause
+					# unrecoverable data corruption)
+#synchronous_commit = on		# synchronization level;
+					# off, local, remote_write, remote_apply, or on
+#wal_sync_method = fsync		# the default is the first option
+					# supported by the operating system:
+					#   open_datasync
+					#   fdatasync (default on Linux)
+					#   fsync
+					#   fsync_writethrough
+					#   open_sync
+#full_page_writes = on			# recover from partial page writes
+#wal_compression = off			# enable compression of full-page writes
+#wal_log_hints = off			# also do full page writes of non-critical updates
+					# (change requires restart)
+#wal_buffers = -1			# min 32kB, -1 sets based on shared_buffers
+					# (change requires restart)
+#wal_writer_delay = 200ms		# 1-10000 milliseconds
+#wal_writer_flush_after = 1MB		# measured in pages, 0 disables
+
+#commit_delay = 0			# range 0-100000, in microseconds
+#commit_siblings = 5			# range 1-1000
+
+# - Checkpoints -
+
+#checkpoint_timeout = 5min		# range 30s-1d
+#max_wal_size = 1GB
+#min_wal_size = 80MB
+#checkpoint_completion_target = 0.5	# checkpoint target duration, 0.0 - 1.0
+#checkpoint_flush_after = 256kB		# measured in pages, 0 disables
+#checkpoint_warning = 30s		# 0 disables
+
+# - Archiving -
+
+#archive_mode = off		# enables archiving; off, on, or always
+				# (change requires restart)
+#archive_command = ''		# command to use to archive a logfile segment
+				# placeholders: %p = path of file to archive
+				#               %f = file name only
+				# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
+#archive_timeout = 0		# force a logfile segment switch after this
+				# number of seconds; 0 disables
+
+
+#------------------------------------------------------------------------------
+# REPLICATION
+#------------------------------------------------------------------------------
+
+# - Sending Server(s) -
+
+# Set these on the master and on any standby that will send replication data.
+
+#max_wal_senders = 10		# max number of walsender processes
+				# (change requires restart)
+#wal_keep_segments = 0		# in logfile segments, 16MB each; 0 disables
+#wal_sender_timeout = 60s	# in milliseconds; 0 disables
+
+#max_replication_slots = 10	# max number of replication slots
+				# (change requires restart)
+#track_commit_timestamp = off	# collect timestamp of transaction commit
+				# (change requires restart)
+
+# - Master Server -
+
+# These settings are ignored on a standby server.
+
+#synchronous_standby_names = ''	# standby servers that provide sync rep
+				# method to choose sync standbys, number of sync standbys,
+				# and comma-separated list of application_name
+				# from standby(s); '*' = all
+#vacuum_defer_cleanup_age = 0	# number of xacts by which cleanup is delayed
+
+# - Standby Servers -
+
+# These settings are ignored on a master server.
+
+#hot_standby = on			# "off" disallows queries during recovery
+					# (change requires restart)
+#max_standby_archive_delay = 30s	# max delay before canceling queries
+					# when reading WAL from archive;
+					# -1 allows indefinite delay
+#max_standby_streaming_delay = 30s	# max delay before canceling queries
+					# when reading streaming WAL;
+					# -1 allows indefinite delay
+#wal_receiver_status_interval = 10s	# send replies at least this often
+					# 0 disables
+#hot_standby_feedback = off		# send info from standby to prevent
+					# query conflicts
+#wal_receiver_timeout = 60s		# time that receiver waits for
+					# communication from master
+					# in milliseconds; 0 disables
+#wal_retrieve_retry_interval = 5s	# time to wait before retrying to
+					# retrieve WAL after a failed attempt
+
+# - Subscribers -
+
+# These settings are ignored on a publisher.
+
+#max_logical_replication_workers = 4	# taken from max_worker_processes
+					# (change requires restart)
+#max_sync_workers_per_subscription = 2	# taken from max_logical_replication_workers
+
+
+#------------------------------------------------------------------------------
+# QUERY TUNING
+#------------------------------------------------------------------------------
+
+# - Planner Method Configuration -
+
+#enable_bitmapscan = on
+#enable_hashagg = on
+#enable_hashjoin = on
+#enable_indexscan = on
+#enable_indexonlyscan = on
+#enable_material = on
+#enable_mergejoin = on
+#enable_nestloop = on
+#enable_seqscan = on
+#enable_sort = on
+#enable_tidscan = on
+
+# - Planner Cost Constants -
+
+#seq_page_cost = 1.0			# measured on an arbitrary scale
+#random_page_cost = 4.0			# same scale as above
+#cpu_tuple_cost = 0.01			# same scale as above
+#cpu_index_tuple_cost = 0.005		# same scale as above
+#cpu_operator_cost = 0.0025		# same scale as above
+#parallel_tuple_cost = 0.1		# same scale as above
+#parallel_setup_cost = 1000.0	# same scale as above
+#min_parallel_table_scan_size = 8MB
+#min_parallel_index_scan_size = 512kB
+#effective_cache_size = 4GB
+
+# - Genetic Query Optimizer -
+
+#geqo = on
+#geqo_threshold = 12
+#geqo_effort = 5			# range 1-10
+#geqo_pool_size = 0			# selects default based on effort
+#geqo_generations = 0			# selects default based on effort
+#geqo_selection_bias = 2.0		# range 1.5-2.0
+#geqo_seed = 0.0			# range 0.0-1.0
+
+# - Other Planner Options -
+
+#default_statistics_target = 100	# range 1-10000
+#constraint_exclusion = partition	# on, off, or partition
+#cursor_tuple_fraction = 0.1		# range 0.0-1.0
+#from_collapse_limit = 8
+#join_collapse_limit = 8		# 1 disables collapsing of explicit
+					# JOIN clauses
+#force_parallel_mode = off
+
+
+#------------------------------------------------------------------------------
+# ERROR REPORTING AND LOGGING
+#------------------------------------------------------------------------------
+
+# - Where to Log -
+
+#log_destination = 'stderr'		# Valid values are combinations of
+					# stderr, csvlog, syslog, and eventlog,
+					# depending on platform.  csvlog
+					# requires logging_collector to be on.
+
+# This is used when logging to stderr:
+#logging_collector = off		# Enable capturing of stderr and csvlog
+					# into log files. Required to be on for
+					# csvlogs.
+					# (change requires restart)
+
+# These are only used if logging_collector is on:
+#log_directory = 'log'			# directory where log files are written,
+					# can be absolute or relative to PGDATA
+#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'	# log file name pattern,
+					# can include strftime() escapes
+#log_file_mode = 0600			# creation mode for log files,
+					# begin with 0 to use octal notation
+#log_truncate_on_rotation = off		# If on, an existing log file with the
+					# same name as the new log file will be
+					# truncated rather than appended to.
+					# But such truncation only occurs on
+					# time-driven rotation, not on restarts
+					# or size-driven rotation.  Default is
+					# off, meaning append to existing files
+					# in all cases.
+#log_rotation_age = 1d			# Automatic rotation of logfiles will
+					# happen after that time.  0 disables.
+#log_rotation_size = 10MB		# Automatic rotation of logfiles will
+					# happen after that much log output.
+					# 0 disables.
+
+# These are relevant when logging to syslog:
+#syslog_facility = 'LOCAL0'
+#syslog_ident = 'postgres'
+#syslog_sequence_numbers = on
+#syslog_split_messages = on
+
+# This is only relevant when logging to eventlog (win32):
+# (change requires restart)
+#event_source = 'PostgreSQL'
+
+# - When to Log -
+
+#log_min_messages = warning		# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   info
+					#   notice
+					#   warning
+					#   error
+					#   log
+					#   fatal
+					#   panic
+
+#log_min_error_statement = error	# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   info
+					#   notice
+					#   warning
+					#   error
+					#   log
+					#   fatal
+					#   panic (effectively off)
+
+#log_min_duration_statement = -1	# -1 is disabled, 0 logs all statements
+					# and their durations, > 0 logs only
+					# statements running at least this number
+					# of milliseconds
+
+
+# - What to Log -
+
+#debug_print_parse = off
+#debug_print_rewritten = off
+#debug_print_plan = off
+#debug_pretty_print = on
+#log_checkpoints = off
+#log_connections = off
+#log_disconnections = off
+#log_duration = off
+#log_error_verbosity = default		# terse, default, or verbose messages
+#log_hostname = off
+#log_line_prefix = '%m [%p] '		# special values:
+					#   %a = application name
+					#   %u = user name
+					#   %d = database name
+					#   %r = remote host and port
+					#   %h = remote host
+					#   %p = process ID
+					#   %t = timestamp without milliseconds
+					#   %m = timestamp with milliseconds
+					#   %n = timestamp with milliseconds (as a Unix epoch)
+					#   %i = command tag
+					#   %e = SQL state
+					#   %c = session ID
+					#   %l = session line number
+					#   %s = session start timestamp
+					#   %v = virtual transaction ID
+					#   %x = transaction ID (0 if none)
+					#   %q = stop here in non-session
+					#        processes
+					#   %% = '%'
+					# e.g. '<%u%%%d> '
+#log_lock_waits = off			# log lock waits >= deadlock_timeout
+#log_statement = 'none'			# none, ddl, mod, all
+#log_replication_commands = off
+#log_temp_files = -1			# log temporary files equal or larger
+					# than the specified size in kilobytes;
+					# -1 disables, 0 logs all temp files
+log_timezone = 'Etc/UTC'
+
+
+# - Process Title -
+
+#cluster_name = ''			# added to process titles if nonempty
+					# (change requires restart)
+#update_process_title = on
+
+
+#------------------------------------------------------------------------------
+# RUNTIME STATISTICS
+#------------------------------------------------------------------------------
+
+# - Query/Index Statistics Collector -
+
+#track_activities = on
+#track_counts = on
+#track_io_timing = off
+#track_functions = none			# none, pl, all
+#track_activity_query_size = 1024	# (change requires restart)
+#stats_temp_directory = 'pg_stat_tmp'
+
+
+# - Statistics Monitoring -
+
+#log_parser_stats = off
+#log_planner_stats = off
+#log_executor_stats = off
+#log_statement_stats = off
+
+
+#------------------------------------------------------------------------------
+# AUTOVACUUM PARAMETERS
+#------------------------------------------------------------------------------
+
+#autovacuum = on			# Enable autovacuum subprocess?  'on'
+					# requires track_counts to also be on.
+#log_autovacuum_min_duration = -1	# -1 disables, 0 logs all actions and
+					# their durations, > 0 logs only
+					# actions running at least this number
+					# of milliseconds.
+#autovacuum_max_workers = 3		# max number of autovacuum subprocesses
+					# (change requires restart)
+#autovacuum_naptime = 1min		# time between autovacuum runs
+#autovacuum_vacuum_threshold = 50	# min number of row updates before
+					# vacuum
+#autovacuum_analyze_threshold = 50	# min number of row updates before
+					# analyze
+#autovacuum_vacuum_scale_factor = 0.2	# fraction of table size before vacuum
+#autovacuum_analyze_scale_factor = 0.1	# fraction of table size before analyze
+#autovacuum_freeze_max_age = 200000000	# maximum XID age before forced vacuum
+					# (change requires restart)
+#autovacuum_multixact_freeze_max_age = 400000000	# maximum multixact age
+					# before forced vacuum
+					# (change requires restart)
+#autovacuum_vacuum_cost_delay = 20ms	# default vacuum cost delay for
+					# autovacuum, in milliseconds;
+					# -1 means use vacuum_cost_delay
+#autovacuum_vacuum_cost_limit = -1	# default vacuum cost limit for
+					# autovacuum, -1 means use
+					# vacuum_cost_limit
+
+
+#------------------------------------------------------------------------------
+# CLIENT CONNECTION DEFAULTS
+#------------------------------------------------------------------------------
+
+# - Statement Behavior -
+
+#client_min_messages = notice		# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   log
+					#   notice
+					#   warning
+					#   error
+#search_path = '"$user", public'	# schema names
+#default_tablespace = ''		# a tablespace name, '' uses the default
+#temp_tablespaces = ''			# a list of tablespace names, '' uses
+					# only default tablespace
+#check_function_bodies = on
+#default_transaction_isolation = 'read committed'
+#default_transaction_read_only = off
+#default_transaction_deferrable = off
+#session_replication_role = 'origin'
+#statement_timeout = 0			# in milliseconds, 0 is disabled
+#lock_timeout = 0			# in milliseconds, 0 is disabled
+#idle_in_transaction_session_timeout = 0	# in milliseconds, 0 is disabled
+#vacuum_freeze_min_age = 50000000
+#vacuum_freeze_table_age = 150000000
+#vacuum_multixact_freeze_min_age = 5000000
+#vacuum_multixact_freeze_table_age = 150000000
+#bytea_output = 'hex'			# hex, escape
+#xmlbinary = 'base64'
+#xmloption = 'content'
+#gin_fuzzy_search_limit = 0
+#gin_pending_list_limit = 4MB
+
+# - Locale and Formatting -
+
+datestyle = 'iso, mdy'
+#intervalstyle = 'postgres'
+timezone = 'Etc/UTC'
+#timezone_abbreviations = 'Default'     # Select the set of available time zone
+					# abbreviations.  Currently, there are
+					#   Default
+					#   Australia (historical usage)
+					#   India
+					# You can create your own file in
+					# share/timezonesets/.
+#extra_float_digits = 0			# min -15, max 3
+#client_encoding = sql_ascii		# actually, defaults to database
+					# encoding
+
+# These settings are initialized by initdb, but they can be changed.
+lc_messages = 'en_US.utf8'			# locale for system error message
+					# strings
+lc_monetary = 'en_US.utf8'			# locale for monetary formatting
+lc_numeric = 'en_US.utf8'			# locale for number formatting
+lc_time = 'en_US.utf8'				# locale for time formatting
+
+# default configuration for text search
+default_text_search_config = 'pg_catalog.english'
+
+# - Other Defaults -
+
+#dynamic_library_path = '$libdir'
+#local_preload_libraries = ''
+#session_preload_libraries = ''
+
+
+#------------------------------------------------------------------------------
+# LOCK MANAGEMENT
+#------------------------------------------------------------------------------
+
+#deadlock_timeout = 1s
+#max_locks_per_transaction = 64		# min 10
+					# (change requires restart)
+#max_pred_locks_per_transaction = 64	# min 10
+					# (change requires restart)
+#max_pred_locks_per_relation = -2	# negative values mean
+					# (max_pred_locks_per_transaction
+					#  / -max_pred_locks_per_relation) - 1
+#max_pred_locks_per_page = 2            # min 0
+
+
+#------------------------------------------------------------------------------
+# VERSION/PLATFORM COMPATIBILITY
+#------------------------------------------------------------------------------
+
+# - Previous PostgreSQL Versions -
+
+#array_nulls = on
+#backslash_quote = safe_encoding	# on, off, or safe_encoding
+#default_with_oids = off
+#escape_string_warning = on
+#lo_compat_privileges = off
+#operator_precedence_warning = off
+#quote_all_identifiers = off
+#standard_conforming_strings = on
+#synchronize_seqscans = on
+
+# - Other Platforms and Clients -
+
+#transform_null_equals = off
+
+
+#------------------------------------------------------------------------------
+# ERROR HANDLING
+#------------------------------------------------------------------------------
+
+#exit_on_error = off			# terminate session on any error?
+#restart_after_crash = on		# reinitialize after backend crash?
+#data_sync_retry = off			# retry or panic on failure to fsync
+					# data?
+					# (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONFIG FILE INCLUDES
+#------------------------------------------------------------------------------
+
+# These options allow settings to be loaded from files other than the
+# default postgresql.conf.  Note that these are directives, not variable
+# assignments, so they can usefully be given more than once.
+
+#include_dir = '...'			# include files ending in '.conf' from
+					# a directory, e.g., 'conf.d'
+#include_if_exists = '...'		# include file only if it exists
+#include = '...'			# include file
+
+
+#------------------------------------------------------------------------------
+# CUSTOMIZED OPTIONS
+#------------------------------------------------------------------------------
+
+# Add settings for extensions here
+
+
+# https://pgtune.leopard.in.ua/#/ oltp 48G ram, 12 cpus, ssd
+
+shared_preload_libraries=pg_stat_statementspg_qualstats
+track_functions=pl
+track_io_timing=on
+track_activity_query_size=2048
+pg_stat_statements.max=10000
+pg_stat_statements.track=all
+max_connections=100
+shared_buffers=12GB
+effective_cache_size=36GB
+maintenance_work_mem=2GB
+checkpoint_completion_target=0.9
+wal_buffers=16MB
+default_statistics_target=100
+random_page_cost=1.1
+effective_io_concurrency=200
+work_mem=31457kB
+min_wal_size=2GB
+max_wal_size=8GB
+max_worker_processes=12
+max_parallel_workers_per_gather=4
+max_parallel_workers=12
diff --git a/scripts/ci/postgres/12/Dockerfile b/scripts/ci/postgres/12/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..2004df9783c376cf995910f03956f2c900f8226b
--- /dev/null
+++ b/scripts/ci/postgres/12/Dockerfile
@@ -0,0 +1,18 @@
+FROM postgres:12.4
+
+LABEL description="Available non-standard extensions: plpython2, pg_qualstats."
+
+RUN apt-get update \
+        && apt-get install -y --no-install-recommends \
+            nano \
+            postgresql-plpython3-12 \
+            python3-psutil \
+            postgresql-12-pg-qualstats \
+        && rm -rf /var/lib/apt/lists/*
+
+RUN mkdir -p /docker-entrypoint-initdb.d
+
+# Create stuff for monitoring with pgwatch2 and pghero.
+COPY ./scripts/db-monitoring/setup/setup_monitoring.sh \
+        /docker-entrypoint-initdb.d/
+COPY ./scripts/db-monitoring/setup/sql-monitoring /sql-monitoring/
diff --git a/scripts/ci/postgres/12/postgresql.conf b/scripts/ci/postgres/12/postgresql.conf
new file mode 100644
index 0000000000000000000000000000000000000000..1a2dbb245cc9a243e4512af72ce2266e064c47e7
--- /dev/null
+++ b/scripts/ci/postgres/12/postgresql.conf
@@ -0,0 +1,776 @@
+# -----------------------------
+# PostgreSQL configuration file
+# -----------------------------
+#
+# This file consists of lines of the form:
+#
+#   name = value
+#
+# (The "=" is optional.)  Whitespace may be used.  Comments are introduced with
+# "#" anywhere on a line.  The complete list of parameter names and allowed
+# values can be found in the PostgreSQL documentation.
+#
+# The commented-out settings shown in this file represent the default values.
+# Re-commenting a setting is NOT sufficient to revert it to the default value;
+# you need to reload the server.
+#
+# This file is read on server startup and when the server receives a SIGHUP
+# signal.  If you edit the file on a running system, you have to SIGHUP the
+# server for the changes to take effect, run "pg_ctl reload", or execute
+# "SELECT pg_reload_conf()".  Some parameters, which are marked below,
+# require a server shutdown and restart to take effect.
+#
+# Any parameter can also be given as a command-line option to the server, e.g.,
+# "postgres -c log_connections=on".  Some parameters can be changed at run time
+# with the "SET" SQL command.
+#
+# Memory units:  kB = kilobytes        Time units:  ms  = milliseconds
+#                MB = megabytes                     s   = seconds
+#                GB = gigabytes                     min = minutes
+#                TB = terabytes                     h   = hours
+#                                                   d   = days
+
+
+#------------------------------------------------------------------------------
+# FILE LOCATIONS
+#------------------------------------------------------------------------------
+
+# The default values of these variables are driven from the -D command-line
+# option or PGDATA environment variable, represented here as ConfigDir.
+
+#data_directory = 'ConfigDir'		# use data in another directory
+					# (change requires restart)
+#hba_file = 'ConfigDir/pg_hba.conf'	# host-based authentication file
+					# (change requires restart)
+#ident_file = 'ConfigDir/pg_ident.conf'	# ident configuration file
+					# (change requires restart)
+
+# If external_pid_file is not explicitly set, no extra PID file is written.
+#external_pid_file = ''			# write an extra PID file
+					# (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONNECTIONS AND AUTHENTICATION
+#------------------------------------------------------------------------------
+
+# - Connection Settings -
+
+listen_addresses = '*'
+					# comma-separated list of addresses;
+					# defaults to 'localhost'; use '*' for all
+					# (change requires restart)
+#port = 5432				# (change requires restart)
+max_connections = 100			# (change requires restart)
+#superuser_reserved_connections = 3	# (change requires restart)
+#unix_socket_directories = '/var/run/postgresql'	# comma-separated list of directories
+					# (change requires restart)
+#unix_socket_group = ''			# (change requires restart)
+#unix_socket_permissions = 0777		# begin with 0 to use octal notation
+					# (change requires restart)
+#bonjour = off				# advertise server via Bonjour
+					# (change requires restart)
+#bonjour_name = ''			# defaults to the computer name
+					# (change requires restart)
+
+# - TCP settings -
+# see "man 7 tcp" for details
+
+#tcp_keepalives_idle = 0		# TCP_KEEPIDLE, in seconds;
+					# 0 selects the system default
+#tcp_keepalives_interval = 0		# TCP_KEEPINTVL, in seconds;
+					# 0 selects the system default
+#tcp_keepalives_count = 0		# TCP_KEEPCNT;
+					# 0 selects the system default
+#tcp_user_timeout = 0			# TCP_USER_TIMEOUT, in milliseconds;
+					# 0 selects the system default
+
+# - Authentication -
+
+#authentication_timeout = 1min		# 1s-600s
+#password_encryption = md5		# md5 or scram-sha-256
+#db_user_namespace = off
+
+# GSSAPI using Kerberos
+#krb_server_keyfile = ''
+#krb_caseins_users = off
+
+# - SSL -
+
+#ssl = off
+#ssl_ca_file = ''
+#ssl_cert_file = 'server.crt'
+#ssl_crl_file = ''
+#ssl_key_file = 'server.key'
+#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
+#ssl_prefer_server_ciphers = on
+#ssl_ecdh_curve = 'prime256v1'
+#ssl_min_protocol_version = 'TLSv1'
+#ssl_max_protocol_version = ''
+#ssl_dh_params_file = ''
+#ssl_passphrase_command = ''
+#ssl_passphrase_command_supports_reload = off
+
+
+#------------------------------------------------------------------------------
+# RESOURCE USAGE (except WAL)
+#------------------------------------------------------------------------------
+
+# - Memory -
+
+shared_buffers = 128MB			# min 128kB
+					# (change requires restart)
+#huge_pages = try			# on, off, or try
+					# (change requires restart)
+#temp_buffers = 8MB			# min 800kB
+#max_prepared_transactions = 0		# zero disables the feature
+					# (change requires restart)
+# Caution: it is not advisable to set max_prepared_transactions nonzero unless
+# you actively intend to use prepared transactions.
+#work_mem = 4MB				# min 64kB
+#maintenance_work_mem = 64MB		# min 1MB
+#autovacuum_work_mem = -1		# min 1MB, or -1 to use maintenance_work_mem
+#max_stack_depth = 2MB			# min 100kB
+#shared_memory_type = mmap		# the default is the first option
+					# supported by the operating system:
+					#   mmap
+					#   sysv
+					#   windows
+					# (change requires restart)
+dynamic_shared_memory_type = posix	# the default is the first option
+					# supported by the operating system:
+					#   posix
+					#   sysv
+					#   windows
+					#   mmap
+					# (change requires restart)
+
+# - Disk -
+
+#temp_file_limit = -1			# limits per-process temp file space
+					# in kB, or -1 for no limit
+
+# - Kernel Resources -
+
+#max_files_per_process = 1000		# min 25
+					# (change requires restart)
+
+# - Cost-Based Vacuum Delay -
+
+#vacuum_cost_delay = 0			# 0-100 milliseconds (0 disables)
+#vacuum_cost_page_hit = 1		# 0-10000 credits
+#vacuum_cost_page_miss = 10		# 0-10000 credits
+#vacuum_cost_page_dirty = 20		# 0-10000 credits
+#vacuum_cost_limit = 200		# 1-10000 credits
+
+# - Background Writer -
+
+#bgwriter_delay = 200ms			# 10-10000ms between rounds
+#bgwriter_lru_maxpages = 100		# max buffers written/round, 0 disables
+#bgwriter_lru_multiplier = 2.0		# 0-10.0 multiplier on buffers scanned/round
+#bgwriter_flush_after = 512kB		# measured in pages, 0 disables
+
+# - Asynchronous Behavior -
+
+#effective_io_concurrency = 1		# 1-1000; 0 disables prefetching
+#max_worker_processes = 8		# (change requires restart)
+#max_parallel_maintenance_workers = 2	# taken from max_parallel_workers
+#max_parallel_workers_per_gather = 2	# taken from max_parallel_workers
+#parallel_leader_participation = on
+#max_parallel_workers = 8		# maximum number of max_worker_processes that
+					# can be used in parallel operations
+#old_snapshot_threshold = -1		# 1min-60d; -1 disables; 0 is immediate
+					# (change requires restart)
+#backend_flush_after = 0		# measured in pages, 0 disables
+
+
+#------------------------------------------------------------------------------
+# WRITE-AHEAD LOG
+#------------------------------------------------------------------------------
+
+# - Settings -
+
+#wal_level = replica			# minimal, replica, or logical
+					# (change requires restart)
+#fsync = on				# flush data to disk for crash safety
+					# (turning this off can cause
+					# unrecoverable data corruption)
+#synchronous_commit = on		# synchronization level;
+					# off, local, remote_write, remote_apply, or on
+#wal_sync_method = fsync		# the default is the first option
+					# supported by the operating system:
+					#   open_datasync
+					#   fdatasync (default on Linux)
+					#   fsync
+					#   fsync_writethrough
+					#   open_sync
+#full_page_writes = on			# recover from partial page writes
+#wal_compression = off			# enable compression of full-page writes
+#wal_log_hints = off			# also do full page writes of non-critical updates
+					# (change requires restart)
+#wal_init_zero = on			# zero-fill new WAL files
+#wal_recycle = on			# recycle WAL files
+#wal_buffers = -1			# min 32kB, -1 sets based on shared_buffers
+					# (change requires restart)
+#wal_writer_delay = 200ms		# 1-10000 milliseconds
+#wal_writer_flush_after = 1MB		# measured in pages, 0 disables
+
+#commit_delay = 0			# range 0-100000, in microseconds
+#commit_siblings = 5			# range 1-1000
+
+# - Checkpoints -
+
+#checkpoint_timeout = 5min		# range 30s-1d
+max_wal_size = 1GB
+min_wal_size = 80MB
+#checkpoint_completion_target = 0.5	# checkpoint target duration, 0.0 - 1.0
+#checkpoint_flush_after = 256kB		# measured in pages, 0 disables
+#checkpoint_warning = 30s		# 0 disables
+
+# - Archiving -
+
+#archive_mode = off		# enables archiving; off, on, or always
+				# (change requires restart)
+#archive_command = ''		# command to use to archive a logfile segment
+				# placeholders: %p = path of file to archive
+				#               %f = file name only
+				# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
+#archive_timeout = 0		# force a logfile segment switch after this
+				# number of seconds; 0 disables
+
+# - Archive Recovery -
+
+# These are only used in recovery mode.
+
+#restore_command = ''		# command to use to restore an archived logfile segment
+				# placeholders: %p = path of file to restore
+				#               %f = file name only
+				# e.g. 'cp /mnt/server/archivedir/%f %p'
+				# (change requires restart)
+#archive_cleanup_command = ''	# command to execute at every restartpoint
+#recovery_end_command = ''	# command to execute at completion of recovery
+
+# - Recovery Target -
+
+# Set these only when performing a targeted recovery.
+
+#recovery_target = ''		# 'immediate' to end recovery as soon as a
+                                # consistent state is reached
+				# (change requires restart)
+#recovery_target_name = ''	# the named restore point to which recovery will proceed
+				# (change requires restart)
+#recovery_target_time = ''	# the time stamp up to which recovery will proceed
+				# (change requires restart)
+#recovery_target_xid = ''	# the transaction ID up to which recovery will proceed
+				# (change requires restart)
+#recovery_target_lsn = ''	# the WAL LSN up to which recovery will proceed
+				# (change requires restart)
+#recovery_target_inclusive = on # Specifies whether to stop:
+				# just after the specified recovery target (on)
+				# just before the recovery target (off)
+				# (change requires restart)
+#recovery_target_timeline = 'latest'	# 'current', 'latest', or timeline ID
+				# (change requires restart)
+#recovery_target_action = 'pause'	# 'pause', 'promote', 'shutdown'
+				# (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# REPLICATION
+#------------------------------------------------------------------------------
+
+# - Sending Servers -
+
+# Set these on the master and on any standby that will send replication data.
+
+#max_wal_senders = 10		# max number of walsender processes
+				# (change requires restart)
+#wal_keep_segments = 0		# in logfile segments; 0 disables
+#wal_sender_timeout = 60s	# in milliseconds; 0 disables
+
+#max_replication_slots = 10	# max number of replication slots
+				# (change requires restart)
+#track_commit_timestamp = off	# collect timestamp of transaction commit
+				# (change requires restart)
+
+# - Master Server -
+
+# These settings are ignored on a standby server.
+
+#synchronous_standby_names = ''	# standby servers that provide sync rep
+				# method to choose sync standbys, number of sync standbys,
+				# and comma-separated list of application_name
+				# from standby(s); '*' = all
+#vacuum_defer_cleanup_age = 0	# number of xacts by which cleanup is delayed
+
+# - Standby Servers -
+
+# These settings are ignored on a master server.
+
+#primary_conninfo = ''			# connection string to sending server
+					# (change requires restart)
+#primary_slot_name = ''			# replication slot on sending server
+					# (change requires restart)
+#promote_trigger_file = ''		# file name whose presence ends recovery
+#hot_standby = on			# "off" disallows queries during recovery
+					# (change requires restart)
+#max_standby_archive_delay = 30s	# max delay before canceling queries
+					# when reading WAL from archive;
+					# -1 allows indefinite delay
+#max_standby_streaming_delay = 30s	# max delay before canceling queries
+					# when reading streaming WAL;
+					# -1 allows indefinite delay
+#wal_receiver_status_interval = 10s	# send replies at least this often
+					# 0 disables
+#hot_standby_feedback = off		# send info from standby to prevent
+					# query conflicts
+#wal_receiver_timeout = 60s		# time that receiver waits for
+					# communication from master
+					# in milliseconds; 0 disables
+#wal_retrieve_retry_interval = 5s	# time to wait before retrying to
+					# retrieve WAL after a failed attempt
+#recovery_min_apply_delay = 0		# minimum delay for applying changes during recovery
+
+# - Subscribers -
+
+# These settings are ignored on a publisher.
+
+#max_logical_replication_workers = 4	# taken from max_worker_processes
+					# (change requires restart)
+#max_sync_workers_per_subscription = 2	# taken from max_logical_replication_workers
+
+
+#------------------------------------------------------------------------------
+# QUERY TUNING
+#------------------------------------------------------------------------------
+
+# - Planner Method Configuration -
+
+#enable_bitmapscan = on
+#enable_hashagg = on
+#enable_hashjoin = on
+#enable_indexscan = on
+#enable_indexonlyscan = on
+#enable_material = on
+#enable_mergejoin = on
+#enable_nestloop = on
+#enable_parallel_append = on
+#enable_seqscan = on
+#enable_sort = on
+#enable_tidscan = on
+#enable_partitionwise_join = off
+#enable_partitionwise_aggregate = off
+#enable_parallel_hash = on
+#enable_partition_pruning = on
+
+# - Planner Cost Constants -
+
+#seq_page_cost = 1.0			# measured on an arbitrary scale
+#random_page_cost = 4.0			# same scale as above
+#cpu_tuple_cost = 0.01			# same scale as above
+#cpu_index_tuple_cost = 0.005		# same scale as above
+#cpu_operator_cost = 0.0025		# same scale as above
+#parallel_tuple_cost = 0.1		# same scale as above
+#parallel_setup_cost = 1000.0	# same scale as above
+
+#jit_above_cost = 100000		# perform JIT compilation if available
+					# and query more expensive than this;
+					# -1 disables
+#jit_inline_above_cost = 500000		# inline small functions if query is
+					# more expensive than this; -1 disables
+#jit_optimize_above_cost = 500000	# use expensive JIT optimizations if
+					# query is more expensive than this;
+					# -1 disables
+
+#min_parallel_table_scan_size = 8MB
+#min_parallel_index_scan_size = 512kB
+#effective_cache_size = 4GB
+
+# - Genetic Query Optimizer -
+
+#geqo = on
+#geqo_threshold = 12
+#geqo_effort = 5			# range 1-10
+#geqo_pool_size = 0			# selects default based on effort
+#geqo_generations = 0			# selects default based on effort
+#geqo_selection_bias = 2.0		# range 1.5-2.0
+#geqo_seed = 0.0			# range 0.0-1.0
+
+# - Other Planner Options -
+
+#default_statistics_target = 100	# range 1-10000
+#constraint_exclusion = partition	# on, off, or partition
+#cursor_tuple_fraction = 0.1		# range 0.0-1.0
+#from_collapse_limit = 8
+#join_collapse_limit = 8		# 1 disables collapsing of explicit
+					# JOIN clauses
+#force_parallel_mode = off
+#jit = on				# allow JIT compilation
+#plan_cache_mode = auto			# auto, force_generic_plan or
+					# force_custom_plan
+
+
+#------------------------------------------------------------------------------
+# REPORTING AND LOGGING
+#------------------------------------------------------------------------------
+
+# - Where to Log -
+
+#log_destination = 'stderr'		# Valid values are combinations of
+					# stderr, csvlog, syslog, and eventlog,
+					# depending on platform.  csvlog
+					# requires logging_collector to be on.
+
+# This is used when logging to stderr:
+#logging_collector = off		# Enable capturing of stderr and csvlog
+					# into log files. Required to be on for
+					# csvlogs.
+					# (change requires restart)
+
+# These are only used if logging_collector is on:
+#log_directory = 'log'			# directory where log files are written,
+					# can be absolute or relative to PGDATA
+#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'	# log file name pattern,
+					# can include strftime() escapes
+#log_file_mode = 0600			# creation mode for log files,
+					# begin with 0 to use octal notation
+#log_truncate_on_rotation = off		# If on, an existing log file with the
+					# same name as the new log file will be
+					# truncated rather than appended to.
+					# But such truncation only occurs on
+					# time-driven rotation, not on restarts
+					# or size-driven rotation.  Default is
+					# off, meaning append to existing files
+					# in all cases.
+#log_rotation_age = 1d			# Automatic rotation of logfiles will
+					# happen after that time.  0 disables.
+#log_rotation_size = 10MB		# Automatic rotation of logfiles will
+					# happen after that much log output.
+					# 0 disables.
+
+# These are relevant when logging to syslog:
+#syslog_facility = 'LOCAL0'
+#syslog_ident = 'postgres'
+#syslog_sequence_numbers = on
+#syslog_split_messages = on
+
+# This is only relevant when logging to eventlog (win32):
+# (change requires restart)
+#event_source = 'PostgreSQL'
+
+# - When to Log -
+
+#log_min_messages = warning		# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   info
+					#   notice
+					#   warning
+					#   error
+					#   log
+					#   fatal
+					#   panic
+
+#log_min_error_statement = error	# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   info
+					#   notice
+					#   warning
+					#   error
+					#   log
+					#   fatal
+					#   panic (effectively off)
+
+#log_min_duration_statement = -1	# -1 is disabled, 0 logs all statements
+					# and their durations, > 0 logs only
+					# statements running at least this number
+					# of milliseconds
+
+#log_transaction_sample_rate = 0.0	# Fraction of transactions whose statements
+					# are logged regardless of their duration. 1.0 logs all
+					# statements from all transactions, 0.0 never logs.
+
+# - What to Log -
+
+#debug_print_parse = off
+#debug_print_rewritten = off
+#debug_print_plan = off
+#debug_pretty_print = on
+#log_checkpoints = off
+#log_connections = off
+#log_disconnections = off
+#log_duration = off
+#log_error_verbosity = default		# terse, default, or verbose messages
+#log_hostname = off
+#log_line_prefix = '%m [%p] '		# special values:
+					#   %a = application name
+					#   %u = user name
+					#   %d = database name
+					#   %r = remote host and port
+					#   %h = remote host
+					#   %p = process ID
+					#   %t = timestamp without milliseconds
+					#   %m = timestamp with milliseconds
+					#   %n = timestamp with milliseconds (as a Unix epoch)
+					#   %i = command tag
+					#   %e = SQL state
+					#   %c = session ID
+					#   %l = session line number
+					#   %s = session start timestamp
+					#   %v = virtual transaction ID
+					#   %x = transaction ID (0 if none)
+					#   %q = stop here in non-session
+					#        processes
+					#   %% = '%'
+					# e.g. '<%u%%%d> '
+#log_lock_waits = off			# log lock waits >= deadlock_timeout
+#log_statement = 'none'			# none, ddl, mod, all
+#log_replication_commands = off
+#log_temp_files = -1			# log temporary files equal or larger
+					# than the specified size in kilobytes;
+					# -1 disables, 0 logs all temp files
+log_timezone = 'Etc/UTC'
+
+#------------------------------------------------------------------------------
+# PROCESS TITLE
+#------------------------------------------------------------------------------
+
+#cluster_name = ''			# added to process titles if nonempty
+					# (change requires restart)
+#update_process_title = on
+
+
+#------------------------------------------------------------------------------
+# STATISTICS
+#------------------------------------------------------------------------------
+
+# - Query and Index Statistics Collector -
+
+#track_activities = on
+#track_counts = on
+#track_io_timing = off
+#track_functions = none			# none, pl, all
+#track_activity_query_size = 1024	# (change requires restart)
+#stats_temp_directory = 'pg_stat_tmp'
+
+
+# - Monitoring -
+
+#log_parser_stats = off
+#log_planner_stats = off
+#log_executor_stats = off
+#log_statement_stats = off
+
+
+#------------------------------------------------------------------------------
+# AUTOVACUUM
+#------------------------------------------------------------------------------
+
+#autovacuum = on			# Enable autovacuum subprocess?  'on'
+					# requires track_counts to also be on.
+#log_autovacuum_min_duration = -1	# -1 disables, 0 logs all actions and
+					# their durations, > 0 logs only
+					# actions running at least this number
+					# of milliseconds.
+#autovacuum_max_workers = 3		# max number of autovacuum subprocesses
+					# (change requires restart)
+#autovacuum_naptime = 1min		# time between autovacuum runs
+#autovacuum_vacuum_threshold = 50	# min number of row updates before
+					# vacuum
+#autovacuum_analyze_threshold = 50	# min number of row updates before
+					# analyze
+#autovacuum_vacuum_scale_factor = 0.2	# fraction of table size before vacuum
+#autovacuum_analyze_scale_factor = 0.1	# fraction of table size before analyze
+#autovacuum_freeze_max_age = 200000000	# maximum XID age before forced vacuum
+					# (change requires restart)
+#autovacuum_multixact_freeze_max_age = 400000000	# maximum multixact age
+					# before forced vacuum
+					# (change requires restart)
+#autovacuum_vacuum_cost_delay = 2ms	# default vacuum cost delay for
+					# autovacuum, in milliseconds;
+					# -1 means use vacuum_cost_delay
+#autovacuum_vacuum_cost_limit = -1	# default vacuum cost limit for
+					# autovacuum, -1 means use
+					# vacuum_cost_limit
+
+
+#------------------------------------------------------------------------------
+# CLIENT CONNECTION DEFAULTS
+#------------------------------------------------------------------------------
+
+# - Statement Behavior -
+
+#client_min_messages = notice		# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   log
+					#   notice
+					#   warning
+					#   error
+#search_path = '"$user", public'	# schema names
+#row_security = on
+#default_tablespace = ''		# a tablespace name, '' uses the default
+#temp_tablespaces = ''			# a list of tablespace names, '' uses
+					# only default tablespace
+#default_table_access_method = 'heap'
+#check_function_bodies = on
+#default_transaction_isolation = 'read committed'
+#default_transaction_read_only = off
+#default_transaction_deferrable = off
+#session_replication_role = 'origin'
+#statement_timeout = 0			# in milliseconds, 0 is disabled
+#lock_timeout = 0			# in milliseconds, 0 is disabled
+#idle_in_transaction_session_timeout = 0	# in milliseconds, 0 is disabled
+#vacuum_freeze_min_age = 50000000
+#vacuum_freeze_table_age = 150000000
+#vacuum_multixact_freeze_min_age = 5000000
+#vacuum_multixact_freeze_table_age = 150000000
+#vacuum_cleanup_index_scale_factor = 0.1	# fraction of total number of tuples
+						# before index cleanup, 0 always performs
+						# index cleanup
+#bytea_output = 'hex'			# hex, escape
+#xmlbinary = 'base64'
+#xmloption = 'content'
+#gin_fuzzy_search_limit = 0
+#gin_pending_list_limit = 4MB
+
+# - Locale and Formatting -
+
+datestyle = 'iso, mdy'
+#intervalstyle = 'postgres'
+timezone = 'Etc/UTC'
+#timezone_abbreviations = 'Default'     # Select the set of available time zone
+					# abbreviations.  Currently, there are
+					#   Default
+					#   Australia (historical usage)
+					#   India
+					# You can create your own file in
+					# share/timezonesets/.
+#extra_float_digits = 1			# min -15, max 3; any value >0 actually
+					# selects precise output mode
+#client_encoding = sql_ascii		# actually, defaults to database
+					# encoding
+
+# These settings are initialized by initdb, but they can be changed.
+lc_messages = 'en_US.utf8'			# locale for system error message
+					# strings
+lc_monetary = 'en_US.utf8'			# locale for monetary formatting
+lc_numeric = 'en_US.utf8'			# locale for number formatting
+lc_time = 'en_US.utf8'				# locale for time formatting
+
+# default configuration for text search
+default_text_search_config = 'pg_catalog.english'
+
+# - Shared Library Preloading -
+
+#shared_preload_libraries = ''	# (change requires restart)
+#local_preload_libraries = ''
+#session_preload_libraries = ''
+#jit_provider = 'llvmjit'		# JIT library to use
+
+# - Other Defaults -
+
+#dynamic_library_path = '$libdir'
+
+
+#------------------------------------------------------------------------------
+# LOCK MANAGEMENT
+#------------------------------------------------------------------------------
+
+#deadlock_timeout = 1s
+#max_locks_per_transaction = 64		# min 10
+					# (change requires restart)
+#max_pred_locks_per_transaction = 64	# min 10
+					# (change requires restart)
+#max_pred_locks_per_relation = -2	# negative values mean
+					# (max_pred_locks_per_transaction
+					#  / -max_pred_locks_per_relation) - 1
+#max_pred_locks_per_page = 2            # min 0
+
+
+#------------------------------------------------------------------------------
+# VERSION AND PLATFORM COMPATIBILITY
+#------------------------------------------------------------------------------
+
+# - Previous PostgreSQL Versions -
+
+#array_nulls = on
+#backslash_quote = safe_encoding	# on, off, or safe_encoding
+#escape_string_warning = on
+#lo_compat_privileges = off
+#operator_precedence_warning = off
+#quote_all_identifiers = off
+#standard_conforming_strings = on
+#synchronize_seqscans = on
+
+# - Other Platforms and Clients -
+
+#transform_null_equals = off
+
+
+#------------------------------------------------------------------------------
+# ERROR HANDLING
+#------------------------------------------------------------------------------
+
+#exit_on_error = off			# terminate session on any error?
+#restart_after_crash = on		# reinitialize after backend crash?
+#data_sync_retry = off			# retry or panic on failure to fsync
+					# data?
+					# (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONFIG FILE INCLUDES
+#------------------------------------------------------------------------------
+
+# These options allow settings to be loaded from files other than the
+# default postgresql.conf.  Note that these are directives, not variable
+# assignments, so they can usefully be given more than once.
+
+#include_dir = '...'			# include files ending in '.conf' from
+					# a directory, e.g., 'conf.d'
+#include_if_exists = '...'		# include file only if it exists
+#include = '...'			# include file
+
+
+#------------------------------------------------------------------------------
+# CUSTOMIZED OPTIONS
+#------------------------------------------------------------------------------
+
+# Add settings for extensions here
+
+
+# https://pgtune.leopard.in.ua/#/ oltp 48G ram, 12 cpus, ssd
+
+shared_preload_libraries=pg_stat_statementspg_qualstats
+track_functions=pl
+track_io_timing=on
+track_activity_query_size=2048
+pg_stat_statements.max=10000
+pg_stat_statements.track=all
+max_connections=100
+shared_buffers=12GB
+effective_cache_size=36GB
+maintenance_work_mem=2GB
+checkpoint_completion_target=0.9
+wal_buffers=16MB
+default_statistics_target=100
+random_page_cost=1.1
+effective_io_concurrency=200
+work_mem=31457kB
+min_wal_size=2GB
+max_wal_size=8GB
+max_worker_processes=12
+max_parallel_workers_per_gather=4
+max_parallel_workers=12
+max_parallel_maintenance_workers=4
diff --git a/scripts/ci/python/3.6/Dockerfile b/scripts/ci/python/3.6/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..69b046c159281790efb9413e1ace50b18d5c52a0
--- /dev/null
+++ b/scripts/ci/python/3.6/Dockerfile
@@ -0,0 +1,46 @@
+FROM python:3.6.12-buster
+
+# Setup python environment.
+ENV LANG C.UTF-8
+ENV LC_ALL C.UTF-8
+ENV PYTHONDONTWRITEBYTECODE 1
+ENV PYTHONFAULTHANDLER 1
+
+# Install debian packages.
+RUN apt-get update \
+    && apt-get install -y --no-install-recommends \
+        curl \
+        ca-certificates \
+        gnupg \
+    && rm -rf /var/lib/apt/lists/*
+
+# Install debian pgdg repository.
+RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
+RUN echo "deb http://apt.postgresql.org/pub/repos/apt buster-pgdg main" \
+        > /etc/apt/sources.list.d/pgdg.list
+RUN apt-get update
+# Install postgresql client programs for various postgresl versions.
+RUN apt-get install -y --no-install-recommends \
+        postgresql-client-10 \
+        postgresql-client-11 \
+        postgresql-client-12 \
+        postgresql-client-13 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Upgrade some crucial python packages.
+RUN pip install --upgrade pip setuptools wheel
+
+# Install python dependencies via pip.
+RUN pip install pipenv poetry
+
+ARG user
+ENV user ${user}
+
+## Add user ##
+RUN groupadd --gid 1000 ${user} \
+    && useradd --create-home --uid 1000 --gid ${user} ${user}
+USER ${user}
+WORKDIR /home/${user}
+RUN chown -R ${user}:${user} /home/${user}
+
+CMD [ "python3" ]
diff --git a/scripts/ci/python/3.6/dev.dockerfile b/scripts/ci/python/3.6/dev.dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..19fc00387378571659818eec7ab8eeac65d8eb19
--- /dev/null
+++ b/scripts/ci/python/3.6/dev.dockerfile
@@ -0,0 +1,51 @@
+FROM python:3.6.12-buster
+
+# For running python as non-root user, e.g. on devel machine.
+
+# Setup python environment.
+ENV LANG C.UTF-8
+ENV LC_ALL C.UTF-8
+ENV PYTHONDONTWRITEBYTECODE 1
+ENV PYTHONFAULTHANDLER 1
+
+# Install debian packages.
+RUN apt-get update \
+    && apt-get install -y --no-install-recommends \
+        curl \
+        ca-certificates \
+        gnupg \
+    && rm -rf /var/lib/apt/lists/*
+
+# Install debian pgdg repository.
+RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
+RUN echo "deb http://apt.postgresql.org/pub/repos/apt buster-pgdg main" \
+        > /etc/apt/sources.list.d/pgdg.list
+RUN apt-get update
+# Install postgresql client programs for various postgresl versions.
+RUN apt-get install -y --no-install-recommends \
+        postgresql-client-10 \
+        postgresql-client-11 \
+        postgresql-client-12 \
+        postgresql-client-13 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Upgrade some crucial python packages.
+RUN pip install --upgrade pip setuptools wheel
+
+# Install python dependencies via pip.
+RUN pip install pipenv poetry
+
+ARG user
+ENV user ${user}
+
+## Add user ##
+RUN groupadd --gid 1000 ${user} \
+    && useradd --create-home --uid 1000 --gid ${user} ${user}
+
+RUN mkdir -p /home/${user}/src/hivemind
+RUN chown -R ${user}:${user} /home/${user}
+
+WORKDIR /home/${user}/src/hivemind
+USER ${user}
+
+CMD [ "python3" ]
diff --git a/scripts/ci/python/3.8/Dockerfile b/scripts/ci/python/3.8/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..9a7bf7893c6de9c23141e678ec6df0e0373fce2e
--- /dev/null
+++ b/scripts/ci/python/3.8/Dockerfile
@@ -0,0 +1,46 @@
+FROM python:3.8.3-buster
+
+# Setup python environment.
+ENV LANG C.UTF-8
+ENV LC_ALL C.UTF-8
+ENV PYTHONDONTWRITEBYTECODE 1
+ENV PYTHONFAULTHANDLER 1
+
+# Install debian packages.
+RUN apt-get update \
+    && apt-get install -y --no-install-recommends \
+        curl \
+        ca-certificates \
+        gnupg \
+    && rm -rf /var/lib/apt/lists/*
+
+# Install debian pgdg repository.
+RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
+RUN echo "deb http://apt.postgresql.org/pub/repos/apt buster-pgdg main" \
+        > /etc/apt/sources.list.d/pgdg.list
+RUN apt-get update
+# Install postgresql client programs for various postgresl versions.
+RUN apt-get install -y --no-install-recommends \
+        postgresql-client-10 \
+        postgresql-client-11 \
+        postgresql-client-12 \
+        postgresql-client-13 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Upgrade some crucial python packages.
+RUN pip install --upgrade pip setuptools wheel
+
+# Install python dependencies via pip.
+RUN pip install pipenv poetry
+
+ARG user
+ENV user ${user}
+
+## Add user ##
+RUN groupadd --gid 1000 ${user} \
+    && useradd --create-home --uid 1000 --gid ${user} ${user}
+USER ${user}
+WORKDIR /home/${user}
+RUN chown -R ${user}:${user} /home/${user}
+
+CMD [ "python3" ]
diff --git a/scripts/ci/setup_env.py b/scripts/ci/setup_env.py
new file mode 100755
index 0000000000000000000000000000000000000000..8f24d82d5c36c0a9bc725d5b9fa7d9c426a922c9
--- /dev/null
+++ b/scripts/ci/setup_env.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+"""
+Tool for Gitlab runner to read environment from project variable
+and setup bash environment.
+When running on Gitlab CI you can  do this:
+```
+eval "$(cat $MY_ENV_VARIABLE | ./scripts/ci/setup_env.py)"
+echo "RUNNER_ID is $RUNNER_ID"
+```
+In bash you can do this:
+```
+eval "$(cat ./.tmp/env.yaml | ./scripts/ci/setup_env.py)"
+echo "RUNNER_ID is $RUNNER_ID"
+```
+"""
+
+import logging
+import sys
+import argparse
+import yaml
+
+FORMAT = '# %(asctime)s - %(name)s - %(levelname)s - %(message)s '
+logging.basicConfig(format=FORMAT)
+logger = logging.getLogger(__name__)
+
+
+def output(message, outfile, end='\n'):
+    """Print data to outfile"""
+    print(message, file=outfile, end=end)
+
+
+def read(infile):
+    """Read data from infile"""
+    if hasattr(infile, 'read'):
+        # data = json.loads(infile.read())
+        data = yaml.safe_load(infile.read())
+    else:
+        # data = json.loads(infile)
+        data = yaml.safe_load(infile)
+    return data
+
+
+def setup_env(current_runner_id, hive_sync_runner_id, infile, outfile, end, **kwargs):
+    """
+    Resolve and output environment for bash in pending CI job.
+    Assumption: all jobs in pipeline must use the same database.
+    We need to point current runner to the database used by runner,
+    that did hive sync (first stage in pipeline).
+    """
+
+    logger.debug('current_runner_id: %s', current_runner_id)
+    logger.debug('hive_sync_runner_id: %s', hive_sync_runner_id)
+
+    data = read(infile)
+    logger.debug('data: %s', data)
+
+    current_runner = data['runners'][str(current_runner_id)]
+    if hive_sync_runner_id == 0:
+        hive_sync_runner = current_runner
+    else:
+        hive_sync_runner = data['runners'][str(hive_sync_runner_id)]
+
+    if hive_sync_runner_id == 0:
+        # Do nothing, obviously. Current runner does hive sync itself.
+        logger.debug('case 1')
+        runner = current_runner
+    elif current_runner_id == hive_sync_runner_id:
+        # Do nothing, obviously. Current runner is the same, as runner
+        # that did hive sync.
+        logger.debug('case 2')
+        runner = current_runner
+    else:
+        if current_runner['host'] == hive_sync_runner['host']:
+            # We assume that all executors on the same machine
+            # use the same postgres server with the same credentials
+            # and unix socket connection configuration. So do nothing.
+            logger.debug('case 3')
+            runner = current_runner
+        else:
+            # Take postgres stuff from runner that did hive sync,
+            # but point current runner to postgres on the host of runner
+            # that did hive sync (exposed on network, we assume).
+            logger.debug('case 4')
+            runner = {}
+            for key, value in current_runner.items():
+                if key.startswith('postgres'):
+                    if key == 'postgres_host':
+                        runner[key] = hive_sync_runner['host']
+                    else:
+                        runner[key] = hive_sync_runner[key]
+                else:
+                    runner[key] = value
+
+    for key in runner:
+        output(
+            f'export RUNNER_{key.upper()}="{str(runner[key])}"',
+            outfile,
+            end,
+            )
+
+    for key in data['common']:
+        output(
+            f"export RUNNER_{key.upper()}=\"{str(data['common'][key])}\"",
+            outfile,
+            end,
+            )
+
+
+def parse_args():
+    """Parse command line arguments"""
+    parser = argparse.ArgumentParser(
+        description=__doc__,
+        formatter_class=argparse.RawDescriptionHelpFormatter
+        )
+    parser.add_argument(
+        'infile',
+        type=argparse.FileType('r'),
+        nargs='?',
+        default=sys.stdin,
+        help='Input file or pipe via STDIN'
+        )
+    parser.add_argument(
+        '-o', '--outfile',
+        type=argparse.FileType('w'),
+        default=sys.stdout,
+        help='Output file, STDOUT if not set'
+        )
+    parser.add_argument(
+        "-e", "--end",
+        dest='end',
+        default='\n',
+        help='String at the end of line in output'
+        )
+    parser.add_argument(
+        "-s", "--hive-sync-runner-id",
+        required=True,
+        type=int,
+        help='Id of runner which did hive sync, 0 when current runner does hive sync actually'
+        )
+    parser.add_argument(
+        "-c", "--current-runner-id",
+        required=True,
+        type=int,
+        help='Id of current runner'
+        )
+    parser.add_argument(
+        '--log-level',
+        default='INFO',
+        dest='log_level',
+        choices=['debug', 'info', 'warning', 'error'],
+        help='Log level (string)',
+        )
+
+    result = parser.parse_args()
+
+    # configure logger and print config
+    root = logging.getLogger()
+    root.setLevel(result.log_level.upper())
+
+    return result
+
+
+def main():
+    """Main dispatcher function"""
+    flags = parse_args()
+    setup_env(**vars(flags))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/scripts/ci/start-api-benchmarks.sh b/scripts/ci/start-api-benchmarks.sh
new file mode 100755
index 0000000000000000000000000000000000000000..959940920dae3744898474e56be9fae064fecb53
--- /dev/null
+++ b/scripts/ci/start-api-benchmarks.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+set -e
+
+pip install tox
+
+export HIVEMIND_ADDRESS=$1
+export HIVEMIND_PORT=$2
+ITERATIONS=${3:-5}
+JOBS=${4:-auto}
+export TAVERN_DISABLE_COMPARATOR=true
+
+echo Attempting to start benchmarks on hivemind instance listening on: $HIVEMIND_ADDRESS port: $HIVEMIND_PORT
+
+for (( i=0; i<$ITERATIONS; i++ ))
+do
+  echo About to run iteration $i
+  tox -e tavern-benchmark -- \
+      -W ignore::pytest.PytestDeprecationWarning \
+      -n $JOBS \
+      --junitxml=../../../../benchmarks-$i.xml
+  echo Done!
+done
diff --git a/scripts/ci/start-api-smoketest.sh b/scripts/ci/start-api-smoketest.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b3b4221ee29f5339d93b90ce9e3b15d14a29320d
--- /dev/null
+++ b/scripts/ci/start-api-smoketest.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+set -e
+
+# Existence of file `tox-installed` means that a preceding script
+# has installed tox already.
+if [ ! -f "tox-installed" ]; then
+    pip install tox
+fi
+
+export HIVEMIND_ADDRESS=$1
+export HIVEMIND_PORT=$2
+TEST_GROUP=$3
+JUNITXML=$4
+JOBS=${5:-auto}
+export TAVERN_DIR="tests/tests_api/hivemind/tavern"
+
+echo "Starting tests on hivemind server running on ${HIVEMIND_ADDRESS}:${HIVEMIND_PORT}"
+
+echo "Selected test group (if empty all will be executed): $TEST_GROUP"
+
+tox -e tavern -- \
+    -W ignore::pytest.PytestDeprecationWarning \
+    -n $JOBS \
+    --junitxml=../../../../$JUNITXML $TEST_GROUP
diff --git a/scripts/ci/start_api_benchmark.py b/scripts/ci/start_api_benchmark.py
new file mode 100755
index 0000000000000000000000000000000000000000..cf726cc3b034d1eae333e07d02c41ade9a30cf32
--- /dev/null
+++ b/scripts/ci/start_api_benchmark.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python3
+import os
+import subprocess
+from json import load, dump
+from benchmark_generator import make_benchmark_test_file
+from json_report_parser import json_report_parser
+
+def get_test_directories(tests_root_dir):
+    ret = []
+    for name in os.listdir(tests_root_dir):
+        dir_path = os.path.join(tests_root_dir, name)
+        if os.path.isdir(dir_path):
+            ret.append(dir_path)
+    return ret
+
+def find_data_in_benchmarks(name, json_data):
+    for benchmark in json_data['benchmarks']:
+        if benchmark['name'] == name:
+            return (benchmark['stats']['min'], benchmark['stats']['max'], benchmark['stats']['mean'])
+    return (None, None, None)
+
+def join_benchmark_data(file_name, json_files):
+    from statistics import mean
+    jsons = []
+    for json_file in json_files:
+        with open(json_file, "r") as src:
+            jsons.append(load(src))
+    for benchmark in jsons[0]['benchmarks']:
+        bmin = []
+        bmax = []
+        bmean = []
+        for j in jsons:
+            data = find_data_in_benchmarks(benchmark['name'], j)
+            if data[0] is not None:
+                bmin.append(data[0])
+            if data[1] is not None:
+                bmax.append(data[1])
+            if data[2] is not None:
+                bmean.append(data[2])
+        benchmark['stats']['min'] = min(bmin)
+        benchmark['stats']['max'] = max(bmax)
+        benchmark['stats']['mean'] = mean(bmean)
+
+    with open("{}.json".format(file_name), "w") as out:
+        dump(jsons[0], out)
+
+if __name__ == "__main__":
+    import argparse
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument("hivemind_address", type=str, help="Address of hivemind instance")
+    parser.add_argument("hivemind_port", type=int, help="Port of hivemind instance")
+    parser.add_argument("tests_root_dir", type=str, help="Path to tests root dir")
+    parser.add_argument("--benchmark-runs", type=int, default=3, help="How many benchmark runs")
+    parser.add_argument("--time-threshold", dest="time_threshold", type=float, default=1.0, help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.")
+    args = parser.parse_args()
+
+    assert os.path.exists(args.tests_root_dir), "Directory does not exist"
+    assert args.benchmark_runs > 0, "Benchmarks runs option has to be positive number"
+
+    hivemind_url = "http://{}:{}".format(args.hivemind_address, args.hivemind_port)
+    test_directories = get_test_directories(args.tests_root_dir)
+
+    benchmarks_files = []
+    for test_directory in test_directories:
+        benchmark_file_name = "benchmark_" + test_directory.split("/")[-1] + ".py"
+        make_benchmark_test_file(benchmark_file_name, hivemind_url, test_directory)
+        benchmarks_files.append(benchmark_file_name)
+
+    benchmark_json_files = {}
+    for run in range(args.benchmark_runs):
+        for benchmark_file in benchmarks_files:
+            name, ext = os.path.splitext(benchmark_file)
+            json_file_name = "{}-{:03d}.json".format(name, run)
+            cmd = [
+              "pytest",
+              "--benchmark-max-time=0.000001",
+              "--benchmark-min-rounds=10",
+              "--benchmark-json={}".format(json_file_name),
+              benchmark_file
+            ]
+            if name in benchmark_json_files:
+                benchmark_json_files[name].append(json_file_name)
+            else:
+                benchmark_json_files[name] = [json_file_name]
+            ret = subprocess.run(cmd)
+            if ret.returncode != 0:
+                print("Error while running `{}`".format(' '.join(cmd)))
+                exit(1)
+
+    for name, json_files in benchmark_json_files.items():
+        join_benchmark_data(name, json_files)
+
+    failed = []
+    for test_directory in test_directories:
+        json_file_name = "benchmark_" + test_directory.split("/")[-1] + ".json"
+        ret = json_report_parser(test_directory, json_file_name, args.time_threshold)
+        if ret:
+          failed.extend(ret)
+
+    if failed:
+        from prettytable import PrettyTable
+        summary = PrettyTable()
+        print("########## Test failed with following tests above {}ms threshold ##########".format(args.time_threshold * 1000))
+        summary.field_names = ['Test name', 'Mean time [ms]', 'Call parameters']
+        for entry in failed:
+            summary.add_row(entry)
+        print(summary)
+        exit(2)
+    exit(0)
+
diff --git a/scripts/ci/timer.sh b/scripts/ci/timer.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f8ebfb3504bdc68c3107e61a5779ad8e083e8683
--- /dev/null
+++ b/scripts/ci/timer.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+set -euo pipefail
+
+JOB=$1
+
+start() {
+  mkdir -p ".tmp"
+  echo `date +%s` > ".tmp/timer-start"
+  echo "Timer: started at:" $(date -u +"%Y-%m-%dT%H:%M:%SZ")
+}
+
+check() {
+    echo "Timer: current time:" $(date -u +"%Y-%m-%dT%H:%M:%SZ")
+    start=$(cat ".tmp/timer-start" 2>/dev/null || echo 0)
+    end=`date +%s`
+    if [ "$start" -gt "0" ]; then
+        runtime=$((end-start))
+        echo "Timer: time elapsed: ${runtime} s"
+    fi
+}
+
+main() {
+  if [ "$JOB" = "start" ]; then
+    start
+  elif [ "$JOB" = "check" ]; then
+    check
+  else
+    echo "Invalid argument"
+    exit 1
+  fi
+}
+
+main
diff --git a/scripts/ci/wait-for-postgres.sh b/scripts/ci/wait-for-postgres.sh
new file mode 100755
index 0000000000000000000000000000000000000000..38a098b6d0e2d8cb0c6c52b7cdfb1b5c7b785d32
--- /dev/null
+++ b/scripts/ci/wait-for-postgres.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+# wait-for-postgres.sh
+# Use in docker-compose:
+# command: ["./wait-for-postgres.sh", "name-of-postgres-service", "python", "app.py"]
+
+set -e
+
+LIMIT=10 #seconds
+
+HOST=$1
+PORT=$2
+
+if [ -z "$HOST" ]
+then
+    HOST="$RUNNER_POSTGRES_HOST"
+fi
+if [ -z "$PORT" ]
+then
+    PORT="$RUNNER_POSTGRES_PORT"
+fi
+
+wait_for_postgres() {
+    counter=0
+    echo "Waiting for postgres on ${HOST}:${PORT}."
+    while ! pg_isready \
+            --host $HOST \
+            --port $PORT \
+            --timeout=1 --quiet; do
+        counter=$((counter+1))
+        sleep 1
+        if [ $counter -eq $LIMIT ]; then
+            echo "Timeout reached, postgres is unavailable, exiting."
+            exit 1
+        fi
+    done
+}
+
+output_configuration() {
+
+    mkdir -p pg-stats
+    DIR=$PWD/pg-stats
+
+    echo "Postgres is up (discovered after ${counter}s)."
+    echo "-------------------------------------------------"
+    echo "Postgres version and configuration"
+    echo "-------------------------------------------------"
+    PGPASSWORD=$RUNNER_POSTGRES_ADMIN_USER_PASSWORD psql \
+            --username "$RUNNER_POSTGRES_ADMIN_USER" \
+            --host "$HOST" \
+            --port $PORT \
+            --dbname postgres <<EOF
+SELECT version();
+-- select name, setting, unit from pg_settings;
+-- show all;
+\copy (select name, setting, unit from pg_settings) to '$DIR/pg_settings_on_start.csv' WITH CSV HEADER
+\q
+EOF
+    echo "-------------------------------------------------"
+
+}
+
+wait_for_postgres
+output_configuration
diff --git a/scripts/ci_start_api_benchmarks.sh b/scripts/ci_start_api_benchmarks.sh
new file mode 100755
index 0000000000000000000000000000000000000000..361629b8dbe9e714e295cc4b7a2ab695445663af
--- /dev/null
+++ b/scripts/ci_start_api_benchmarks.sh
@@ -0,0 +1,20 @@
+#!/bin/bash 
+
+set -e
+pip3 install tox --user
+
+export HIVEMIND_ADDRESS=$1
+export HIVEMIND_PORT=$2
+export TAVERN_DISABLE_COMPARATOR=true
+
+echo Attempting to start benchmarks on hivemind instance listeing on: $HIVEMIND_ADDRESS port: $HIVEMIND_PORT
+
+ITERATIONS=$3
+
+for (( i=0; i<$ITERATIONS; i++ ))
+do
+  echo About to run iteration $i
+  tox -e tavern-benchmark -- -W ignore::pytest.PytestDeprecationWarning -n auto --junitxml=../../../../benchmarks-$i.xml 
+  echo Done!
+done
+./scripts/xml_report_parser.py . ./tests/tests_api/hivemind/tavern
diff --git a/scripts/ci_start_api_smoketest.sh b/scripts/ci_start_api_smoketest.sh
new file mode 100755
index 0000000000000000000000000000000000000000..fb27185e9efc5be4fa2dedc1cf411bca841e44c1
--- /dev/null
+++ b/scripts/ci_start_api_smoketest.sh
@@ -0,0 +1,14 @@
+#!/bin/bash 
+
+set -e
+pip3 install tox --user
+
+export HIVEMIND_ADDRESS=$1
+export HIVEMIND_PORT=$2
+export TAVERN_DIR="tests/tests_api/hivemind/tavern"
+
+echo Attempting to start tests on hivemind instance listeing on: $HIVEMIND_ADDRESS port: $HIVEMIND_PORT
+
+echo "Selected test group (if empty all will be executed): $3"
+
+tox -e tavern -- -W ignore::pytest.PytestDeprecationWarning -n auto --junitxml=../../../../$4 $3
diff --git a/scripts/ci_start_server.sh b/scripts/ci_start_server.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d936772ae725491632f7d33eba607cacc5a53bfb
--- /dev/null
+++ b/scripts/ci_start_server.sh
@@ -0,0 +1,58 @@
+#!/bin/bash 
+
+set -xe
+
+HIVEMIND_DB_NAME=$1
+HIVEMIND_POSTGRESQL_CONNECTION_STRING=$2
+HIVEMIND_SOURCE_HIVED_URL=$3
+HIVEMIND_HTTP_PORT=$4
+
+PYTHONUSERBASE=./local-site
+
+DB_NAME=${HIVEMIND_DB_NAME//-/_}
+DB_NAME=${DB_NAME//\[/_}
+DB_NAME=${DB_NAME//]/_}
+
+DB_URL=$HIVEMIND_POSTGRESQL_CONNECTION_STRING/$DB_NAME
+
+# Reuse DB_NAME as name of symbolic link pointing local hive "binary".
+HIVE_NAME=$DB_NAME
+
+SAVED_PID=0
+
+if [ -f hive_server.pid ]; then
+  SAVED_PID=`cat hive_server.pid`
+  kill -SIGINT $SAVED_PID || true;
+  sleep 5
+  kill -9 $SAVED_PID || true;
+
+  rm hive_server.pid;
+fi
+
+fuser $HIVEMIND_HTTP_PORT/tcp -k -INT || true
+sleep 5
+
+fuser $HIVEMIND_HTTP_PORT/tcp -k -KILL || true
+sleep 5
+
+ls -l dist/*
+rm -rf ./local-site
+mkdir -p `python3 -m site --user-site`
+python3 setup.py install --user --force
+ln -sf ./local-site/bin/hive $HIVE_NAME
+./$HIVE_NAME -h
+
+rm -rf hive_server.log
+
+echo Attempting to start hive server listening on $HIVEMIND_HTTP_PORT port...
+screen -L -Logfile hive_server.log -dmS $HIVE_NAME ./$HIVE_NAME server --pid-file hive_server.pid --http-server-port $HIVEMIND_HTTP_PORT --steemd-url "$HIVEMIND_SOURCE_HIVED_URL" --database-url $DB_URL
+for i in `seq 1 10`; do if [ -f hive_server.pid ]; then break; else sleep 1; fi;  done
+
+SAVED_PID=`cat hive_server.pid`
+LISTENING_PID=$(fuser $HIVEMIND_HTTP_PORT/tcp 2>/dev/null)
+echo "Retrieved hive pid is: $SAVED_PID"
+echo "Listening hive pid is: $LISTENING_PID"
+
+cat hive_server.log 
+if [ "$SAVED_PID" != "$LISTENING_PID" ]; then echo "Saved pid: $SAVED_PID vs listening pid: $LISTENING_PID mismatch..."; fi
+
diff --git a/scripts/ci_stop_server.sh b/scripts/ci_stop_server.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c6958d4d24a4ce5dc62d927665725a47d876c3f6
--- /dev/null
+++ b/scripts/ci_stop_server.sh
@@ -0,0 +1,16 @@
+#!/bin/bash 
+
+set -e
+
+# Usage ci_stop_server.sh pid_file_name
+
+if [ -f $1 ]; then
+  PID=`cat $1`;
+  kill -SIGINT $PID || true;
+  sleep 5
+  kill -9 $PID || true;
+else
+  echo Specified pid file: $1 does not exists.;
+fi
+
+
diff --git a/scripts/ci_sync.sh b/scripts/ci_sync.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0950cdd666794d4eef103d03b833c0c5c319d77d
--- /dev/null
+++ b/scripts/ci_sync.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+set -e
+set -o pipefail
+
+HIVEMIND_DB_NAME=$1
+HIVEMIND_POSTGRESQL_CONNECTION_STRING=$2
+HIVEMIND_SOURCE_HIVED_URL=$3
+HIVEMIND_MAX_BLOCK=$4
+HIVEMIND_HTTP_PORT=$5
+HIVEMIND_ENABLE_DB_MONITORING=${6:-yes}
+
+PYTHONUSERBASE=./local-site
+
+DB_NAME=${HIVEMIND_DB_NAME//-/_}
+DB_NAME=${DB_NAME//\[/_}
+DB_NAME=${DB_NAME//]/_}
+DB_URL=$HIVEMIND_POSTGRESQL_CONNECTION_STRING/$DB_NAME
+echo Corrected db name $DB_NAME
+echo Corrected db url $DB_URL
+
+# Reuse DB_NAME as name of symbolic link pointing local hive "binary".
+HIVE_NAME=$DB_NAME
+
+if [ -f hive_sync.pid ]; then
+  kill -SIGINT `cat hive_sync.pid` || true;
+  rm hive_sync.pid;
+fi
+
+kill -SIGINT `pgrep -f "$HIVE_NAME sync"` || true;
+sleep 5
+kill -9 `pgrep -f "$HIVE_NAME sync"` || true;
+
+kill -SIGINT `pgrep -f "$HIVE_NAME server"` || true;
+sleep 5
+kill -9 `pgrep -f "$HIVE_NAME server"` || true;
+
+fuser $HIVEMIND_HTTP_PORT/tcp -k -INT || true
+sleep 5
+
+fuser $HIVEMIND_HTTP_PORT/tcp -k -KILL || true
+sleep 5
+
+ls -l dist/*
+rm -rf ./local-site
+mkdir -p `python3 -m site --user-site`
+python3 setup.py install --user --force
+ln -sf ./local-site/bin/hive $HIVE_NAME
+./$HIVE_NAME -h
+
+echo Attempting to recreate database $DB_NAME
+psql -U $POSTGRES_USER -h localhost -d postgres -c "DROP DATABASE IF EXISTS $DB_NAME;"
+if [ "$HIVEMIND_ENABLE_DB_MONITORING" = "yes" ]; then
+  psql -U $POSTGRES_USER -h localhost -d postgres -c "CREATE DATABASE $DB_NAME TEMPLATE template_monitoring;"
+else
+  psql -U $POSTGRES_USER -h localhost -d postgres -c "CREATE DATABASE $DB_NAME"
+fi
+
+echo Attempting to starting hive sync using hived node: $HIVEMIND_SOURCE_HIVED_URL . Max sync block is: $HIVEMIND_MAX_BLOCK
+echo Attempting to access database $DB_URL
+./$HIVE_NAME sync --pid-file hive_sync.pid --test-max-block=$HIVEMIND_MAX_BLOCK --test-profile=False --steemd-url "$HIVEMIND_SOURCE_HIVED_URL" --prometheus-port 11011 \
+  --database-url $DB_URL  --mock-block-data-path mock_data/block_data/follow_op/mock_block_data_follow.json mock_data/block_data/community_op/mock_block_data_community.json mock_data/block_data/reblog_op/mock_block_data_reblog.json \
+  --community-start-block 4999998 2>&1 | tee -i hivemind-sync.log
+rm hive_sync.pid
diff --git a/scripts/db-monitoring/docker-compose.yml b/scripts/db-monitoring/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..cc446b3399fe36eaa8f173c8dba52c426c372596
--- /dev/null
+++ b/scripts/db-monitoring/docker-compose.yml
@@ -0,0 +1,62 @@
+version: '3'
+
+services:
+
+  pgwatch2:
+    # Docs: https://pgwatch2.readthedocs.io/en/latest/index.html
+    image: cybertec/pgwatch2-nonroot:1.8.0
+    restart: unless-stopped
+    ports:
+      # Grafana dashboarding
+      - "${PGWATCH2_GRAFANA_PUBLISHED_PORT}:3000"
+      # Management Web UI (monitored hosts, metrics, metrics configurations)
+      - "${PGWATCH2_WEBUI_PUBLISHED_PORT}:8080"
+      # Gatherer healthcheck / statistics on number of gathered metrics (JSON)
+      - "${PGWATCH2_DAEMON_PUBLISHED_PORT}:8081"
+      # Postgres configuration (or metrics storage DB, when using the cybertec/pgwatch2-postgres image)
+      - "${PGWATCH2_POSTGRES_PUBLISHED_PORT}:5432"
+      # InfluxDB API (when using the InfluxDB version)
+      - "${PGWATCH2_INFLUXDB_API_PUBLISHED_PORT}:8086"
+      # InfluxDB Backup port (when using the InfluxDB version)
+      - "${PGWATCH2_INFLUXDB_BACKUP_PUBLISHED_PORT}:8088"
+    volumes:
+      - pgwatch2-postgresql:/var/lib/postgresql
+      - pgwatch2-grafana:/var/lib/grafana
+      - pgwatch2-influxdb:/var/lib/influxdb
+      - pgwatch2-pgwatch2:/pgwatch2/persistent-config
+
+  pghero:
+    # Docs: https://github.com/ankane/pghero
+    image: ankane/pghero:v2.7.2
+    environment:
+      DATABASE_URL: ${PGHERO_DATABASE_URL}
+    restart: unless-stopped
+    ports:
+      - "${PGHERO_PUBLISHED_PORT}:8080"
+    volumes:
+      - $PWD/scripts/db-monitoring/pghero.yml:/app/config/pghero.yml
+
+  pgadmin4:
+    # Docs: https://www.pgadmin.org/docs/pgadmin4/latest/container_deployment.html
+    image: dpage/pgadmin4:4.26
+    environment:
+      - PGADMIN_DEFAULT_EMAIL=${PGADMIN_DEFAULT_EMAIL}
+      - PGADMIN_DEFAULT_PASSWORD=${PGADMIN_DEFAULT_PASSWORD}
+      - PGADMIN_LISTEN_ADDRESS=${PGADMIN_LISTEN_ADDRESS}
+      - PGADMIN_LISTEN_PORT=${PGADMIN_LISTEN_PORT}
+    restart: unless-stopped
+    ports:
+      - "${PGADMIN_PUBLISHED_PORT}:${PGADMIN_LISTEN_PORT}"
+    volumes:
+      - pgadmin4-pgadmin4:/pgadmin4
+      - pgadmin4-certs:/certs
+      - pgadmin4-lib:/var/lib/pgadmin
+
+volumes:
+  pgwatch2-postgresql:
+  pgwatch2-grafana:
+  pgwatch2-influxdb:
+  pgwatch2-pgwatch2:
+  pgadmin4-pgadmin4:
+  pgadmin4-certs:
+  pgadmin4-lib:
diff --git a/scripts/db-monitoring/pghero_example.yml b/scripts/db-monitoring/pghero_example.yml
new file mode 100644
index 0000000000000000000000000000000000000000..66ebbe40c5b96f02655693f9ee3104baa9f8f286
--- /dev/null
+++ b/scripts/db-monitoring/pghero_example.yml
@@ -0,0 +1,7 @@
+databases:
+  pghero:
+    url: postgres://pghero:pghero@example-1.com:5432/pghero
+  hive_test:
+    url: postgres://pghero:pghero@example-2.com:5432/hive_test
+  bamboo:
+    url: postgres://pghero:pghero@example-3.com:5432/bamboo
diff --git a/scripts/db-monitoring/readme-monitoring.md b/scripts/db-monitoring/readme-monitoring.md
new file mode 100644
index 0000000000000000000000000000000000000000..93c0682180a82726e8b48eb95cf26f63e7e9efa9
--- /dev/null
+++ b/scripts/db-monitoring/readme-monitoring.md
@@ -0,0 +1,92 @@
+# Postgresql monitoring
+
+Tutorial for Postgres version 10 on Ubuntu 18.04, assuming default
+configuration. We'll setup monitoring with
+[pgwatch2](https://github.com/cybertec-postgresql/pgwatch2)
+and [pghero](https://github.com/ankane/pghero). If you don't need these
+both tools, modify this tutorial accordingly.
+
+1. Install required apt packages:
+```
+# Should be installed on Ubuntu by default, when you have Postgresql
+# installed. Required both by pgwatch2 and pghero.
+sudo apt-get install postgresql-contrib;
+
+# Only for pgwatch2, if you need to monitor host's cpu load, IO
+# and memory usage inside pgwatch2 instance.
+postgresql-plpython3 python3-psutil
+
+# Only for pgwatch2, if you need to get recommendations about
+# monitored queries. Note: you should install official Postgresql
+# ubuntu [pgdg](https://www.postgresql.org/about/news/pgdg-apt-repository-for-debianubuntu-1432/)
+# repository to get apt package postgresql-10-pg-qualstats.
+postgresql-10-pg-qualstats
+```
+
+2. Install postgresql custom configuration file. Be careful with line
+concerning `shared_preload_libraries` (this can overrun your existing
+settings). You can also append the contents of file
+`scripts/db-monitoring/setup/postgresql_monitoring.conf` to the bottom
+of your file `/etc/postgresql/10/main/postgresql.conf`.
+```
+sudo cp scripts/db-monitoring/setup/postgresql_monitoring.conf /etc/postgresql/10/main/conf.d/90-monitoring.conf
+```
+**Restart postgresql.**
+
+3. Create roles `pgwatch2` and `pghero` (these are unprivileged roles
+for monitoring) in postgresql and create template database
+`template_monitoring`, in all postgresql instances, that you want to monitor
+(we need postgres superuser here):
+
+```
+cd scripts/db-monitoring/setup
+PSQL_OPTIONS="-p 5432 -U postgres -h 127.0.0.1" ./setup_monitoring.sh
+```
+
+Note that above script creates also database `pghero` for gathering
+historical stats data.
+
+Remember, that all databases under monitoring should replicate the structure
+and objects from template `template_monitoring`, so you should create them with
+command:
+```
+create database some_db template template_monitoring
+```
+
+In case of already existing database, which you can't recreate, you should
+install needed stuff into it by running command:
+```
+cd scripts/db-monitoring/setup
+PSQL_OPTIONS="-p 5432 -U postgres -h 127.0.0.1" \
+    ./setup_monitoring.sh some_existing_db_name yes yes no no
+```
+
+4. Create `.env` file and create configuration file for `pghero`
+(edit to your needs):
+```
+cp scripts/db-monitoring/docker/.env_example scripts/db-monitoring/.env
+cp scripts/db-monitoring/docker/pghero_example.yml \
+    scripts/db-monitoring/docker/pghero.yml
+```
+
+5. Run services `pgwatch2` and `pghero` in docker containers:
+```
+cd scripts/db-monitoring
+docker-compose up -d
+```
+
+7. Enter databases to be monitored by `pgwatch2`
+at http://ci-server.domain:8080. It's recommended to setup
+[postgres-continuous-discovery](https://pgwatch2.readthedocs.io/en/latest/preparing_databases.html#different-db-types-explained).
+Use unprivileged user `pgwatch2` created earlier.
+
+8. Go to http://ci-server.domain:30000/ to see dashboard produced by
+`pgwatch2`.
+
+9. Go to http://ci-server.domain:8085/ to see dashboard produced by
+`pghero`.
+
+10. Optionally install cron tasks from file
+`scripts/db-monitoring/setup/pghero_cron_jobs.txt`
+for collecting historical data by your pghero instance (on the host
+which runs pghero docker container).
\ No newline at end of file
diff --git a/scripts/db-monitoring/setup/pghero_cron_jobs.txt b/scripts/db-monitoring/setup/pghero_cron_jobs.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f18ad4d7d7c1f79a02ef98adee290b8c12f4da84
--- /dev/null
+++ b/scripts/db-monitoring/setup/pghero_cron_jobs.txt
@@ -0,0 +1,19 @@
+# Cron tasks for pghero historical data collector.
+# Install with `crontab -e`
+
+# Explanation
+# postgres://pghero:pghero@hive-4.pl.syncad.com:5432/pghero
+# is a connection string to the database in which pghero collects
+# historical data.
+
+# dbmonitoring_pghero_1 is a docker container name with running pghero
+# instance
+
+# Pghero collect query stats.
+*/5 * * * * docker exec -e DATABASE_URL=postgres://pghero:pghero@hive-4.pl.syncad.com:5432/pghero dbmonitoring_pghero_1 bin/rake pghero:capture_query_stats > /dev/null 2>&1
+
+# Pghero collect disk space stats.
+*/5 * * * * docker exec -e DATABASE_URL=postgres://pghero:pghero@hive-4.pl.syncad.com:5432/pghero dbmonitoring_pghero_1 bin/rake pghero:capture_space_stats > /dev/null 2>&1
+
+# Pghero collect query stats.
+@monthly docker exec -e DATABASE_URL=postgres://pghero:pghero@hive-4.pl.syncad.com:5432/pghero dbmonitoring_pghero_1 bin/rake pghero:clean_query_stats > /dev/null 2>&1
diff --git a/scripts/db-monitoring/setup/postgresql_monitoring.conf b/scripts/db-monitoring/setup/postgresql_monitoring.conf
new file mode 100644
index 0000000000000000000000000000000000000000..9989bd4bbaa44c8f10aea2239b97b60eaf8689af
--- /dev/null
+++ b/scripts/db-monitoring/setup/postgresql_monitoring.conf
@@ -0,0 +1,45 @@
+# Custom postgresql conf file for monitoring.
+# Put it into e.g. `/etc/postgresql/10/main/conf.d/90-monitoring.conf`
+
+shared_preload_libraries = 'pg_stat_statements,pg_qualstats'
+
+## pg_stat_statements
+## See https://www.postgresql.org/docs/10/pgstatstatements.html
+
+track_functions = pl
+track_io_timing = on
+track_activity_query_size = 2048
+
+pg_stat_statements.max = 10000
+pg_stat_statements.track = all
+
+
+## pg_qualstats
+## See https://github.com/powa-team/pg_qualstats
+## See https://powa.readthedocs.io/en/latest/components/stats_extensions/pg_qualstats.html
+
+# pg_qualstats.enabled (boolean, default true): whether or not pg_qualstats
+# should be enabled
+
+# pg_qualstats.track_constants (bolean, default true): whether or not
+# pg_qualstats should keep track of each constant value individually.
+# Disabling this GUC will considerably reduce the number of entries
+# necessary to keep track of predicates.
+
+# pg_qualstats.max: the maximum number of predicated and query text tracked
+# (defaults to 1000)
+
+# pg_qualstats.resolve_oids (boolean, default false): whether or not
+# pg_qualstats should resolve oids at query time, or juste store the oids.
+# Enabling this parameter makes the data analysis much more easy, since
+# a connection to the database where the query was executed won't be necessary,
+# but it will eat much more space (624 bytes per entry instead of 176).
+# Additionnaly, this will require some catalog lookups, which aren't free.
+
+# pg_qualstats.track_pg_catalog (boolean, default false): whether or not
+# pg_qualstats should compute predicates on object in pg_catalog schema.
+
+# pg_qualstats.sample_rate (double, default -1): the fraction of queries that
+# should be sampled. For example, 0.1 means that only one out of ten queries
+# will be sampled. The default (-1) means automatic, and results in a value
+# of 1 / max_connections, so that statiscally, concurrency issues will be rare.
diff --git a/scripts/db-monitoring/setup/setup_monitoring.sh b/scripts/db-monitoring/setup/setup_monitoring.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a1cf6af6e645d970e85a29a779d516b62b0e3531
--- /dev/null
+++ b/scripts/db-monitoring/setup/setup_monitoring.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+# Create stuff for monitoring.
+
+set -e
+
+DB_NAME=${1:-template_monitoring}
+SETUP_MONITORING_PGWATCH2=${2:-yes}
+SETUP_MONITORING_PGHERO=${3:-yes}
+CREATE_TEMPLATE=${4:-yes}
+CREATE_DB_PGHERO=${5:-no}
+SQL_SCRIPTS_PATH=$PWD/sql-monitoring
+
+if [ -z "$PSQL_OPTIONS" ]; then
+    # PSQL_OPTIONS="-p 5432 -U postgres -h 127.0.0.1"
+    PSQL_OPTIONS=""
+fi
+
+setup_monitoring_pgwatch2() {
+    echo "Creating role and stuff for pgwatch2"
+    psql $PSQL_OPTIONS -f $SQL_SCRIPTS_PATH/20_create_role_pgwatch2.sql
+    psql $PSQL_OPTIONS -d $DB_NAME -f $SQL_SCRIPTS_PATH/30_setup_monitoring_pgwatch2.sql
+}
+
+setup_monitoring_pghero() {
+    echo "Creating role and stuff for pghero"
+    psql $PSQL_OPTIONS -f $SQL_SCRIPTS_PATH/21_create_role_pghero.sql
+    psql $PSQL_OPTIONS -d $DB_NAME -f $SQL_SCRIPTS_PATH/31_setup_monitoring_pghero.sql
+}
+
+create_db_pghero() {
+    echo "Creating database pghero for collecting historical stats data"
+    psql $PSQL_OPTIONS -f $SQL_SCRIPTS_PATH/40_create_database_pghero.sql
+    psql postgresql://pghero:pghero@127.0.0.1:5432/pghero -f $SQL_SCRIPTS_PATH/41_create_tables_pghero.sql
+}
+
+create_template() {
+    echo "Creating database $DB_NAME"
+    psql $PSQL_OPTIONS -f $SQL_SCRIPTS_PATH/10_create_template.sql --set=db_name=$DB_NAME
+}
+
+lock_template() {
+    echo "Locking connections to database $DB_NAME"
+    psql $PSQL_OPTIONS -f $SQL_SCRIPTS_PATH/50_setup_template.sql --set=db_name=$DB_NAME
+}
+
+main() {
+
+    # Run flow.
+
+    if [ "$CREATE_TEMPLATE" = "yes" ]; then
+        create_template
+    fi
+
+    if [ "$SETUP_MONITORING_PGWATCH2" = "yes" ]; then
+        setup_monitoring_pgwatch2
+    fi
+
+    if [ "$SETUP_MONITORING_PGHERO" = "yes" ]; then
+        setup_monitoring_pghero
+    fi
+
+    if [ "$CREATE_DB_PGHERO" = "yes" ]; then
+        create_db_pghero
+    fi
+
+    if [ "$CREATE_TEMPLATE" = "yes" ]; then
+        lock_template
+    fi
+
+}
+
+main
diff --git a/scripts/db-monitoring/setup/sql-monitoring/10_create_template.sql b/scripts/db-monitoring/setup/sql-monitoring/10_create_template.sql
new file mode 100644
index 0000000000000000000000000000000000000000..2e66e6f426862704e0264dc543313156432efad0
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/10_create_template.sql
@@ -0,0 +1,23 @@
+-- Create database
+
+-- Example run:
+-- psql -p 5432 -U postgres -h 127.0.0.1 -f ./create_template.sql --set=db_name=template_monitoring
+
+SET client_encoding = 'UTF8';
+SET client_min_messages = 'warning';
+
+-- Handle default values for variables.
+\set db_name ':db_name'
+-- now db_name is set to the string ':db_name' if was not already set.
+-- Checking it using a CASE statement:
+SELECT CASE
+  WHEN :'db_name'= ':db_name'
+  THEN 'template_monitoring'
+  ELSE :'db_name'
+END AS "db_name"
+\gset
+
+\echo Creating database :db_name
+
+CREATE DATABASE :db_name;
+COMMENT ON DATABASE :db_name IS 'Template for monitoring';
diff --git a/scripts/db-monitoring/setup/sql-monitoring/20_create_role_pgwatch2.sql b/scripts/db-monitoring/setup/sql-monitoring/20_create_role_pgwatch2.sql
new file mode 100644
index 0000000000000000000000000000000000000000..af1a12c0aa7393f4e7d83712cdb4495d18deb3e3
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/20_create_role_pgwatch2.sql
@@ -0,0 +1,27 @@
+-- Create database
+
+-- Example run:
+-- psql -p 5432 -U postgres -h 127.0.0.1 -f ./create_role_pgwatch2.sql
+
+SET client_encoding = 'UTF8';
+SET client_min_messages = 'warning';
+
+\echo Creating role pgwatch2
+
+DO
+$do$
+BEGIN
+    IF EXISTS (SELECT * FROM pg_user WHERE pg_user.usename = 'pgwatch2') THEN
+        raise warning 'Role % already exists', 'pgwatch2';
+    ELSE
+        -- NB! For critical databases it might make sense to ensure that the user account
+        -- used for monitoring can only open a limited number of connections
+        -- (there are according checks in code, but multiple instances might be launched)
+        CREATE ROLE pgwatch2 WITH LOGIN PASSWORD 'pgwatch2';
+        COMMENT ON ROLE pgwatch2 IS
+            'Role for monitoring https://github.com/cybertec-postgresql/pgwatch2';
+        ALTER ROLE pgwatch2 CONNECTION LIMIT 10;
+        GRANT pg_monitor TO pgwatch2;
+   END IF;
+END
+$do$;
diff --git a/scripts/db-monitoring/setup/sql-monitoring/21_create_role_pghero.sql b/scripts/db-monitoring/setup/sql-monitoring/21_create_role_pghero.sql
new file mode 100644
index 0000000000000000000000000000000000000000..ebb1adc628580f21ac99fcc30d5a3fcd41f93d88
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/21_create_role_pghero.sql
@@ -0,0 +1,25 @@
+-- Create database
+
+-- Example run:
+-- psql -p 5432 -U postgres -h 127.0.0.1 -f ./create_role_pghero.sql
+
+SET client_encoding = 'UTF8';
+SET client_min_messages = 'warning';
+
+\echo Creating role pghero
+
+DO
+$do$
+BEGIN
+    IF EXISTS (SELECT * FROM pg_user WHERE pg_user.usename = 'pghero') THEN
+        raise warning 'Role % already exists', 'pghero';
+    ELSE
+        CREATE ROLE pghero WITH LOGIN PASSWORD 'pghero';
+        COMMENT ON ROLE pghero IS
+            'Role for monitoring https://github.com/ankane/pghero/';
+        ALTER ROLE pghero CONNECTION LIMIT 10;
+        ALTER ROLE pghero SET search_path = pghero, pg_catalog, public;
+        GRANT pg_monitor TO pghero;
+   END IF;
+END
+$do$;
diff --git a/scripts/db-monitoring/setup/sql-monitoring/30_setup_monitoring_pgwatch2.sql b/scripts/db-monitoring/setup/sql-monitoring/30_setup_monitoring_pgwatch2.sql
new file mode 100644
index 0000000000000000000000000000000000000000..c76ad6f7c067999eae5a1088c697810c5d9aa371
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/30_setup_monitoring_pgwatch2.sql
@@ -0,0 +1,401 @@
+-- Configure database for monitoring by unprivileged user `pgwatch2`
+-- using program https://github.com/cybertec-postgresql/pgwatch2/
+
+-- Example run:
+-- psql -p 5432 -U postgres -h 127.0.0.1 -d template_monitoring -f ./setup_monitoring_pgwatch2.sql
+
+SET client_encoding = 'UTF8';
+SET client_min_messages = 'warning';
+
+
+\echo Installing monitoring stuff for pgwatch2
+
+BEGIN;
+
+CREATE SCHEMA IF NOT EXISTS pgwatch2;
+COMMENT ON SCHEMA pgwatch2 IS
+    'Schema contains objects for monitoring https://github.com/cybertec-postgresql/pgwatch2';
+
+
+CREATE EXTENSION IF NOT EXISTS plpython3u WITH SCHEMA pg_catalog;
+COMMENT ON EXTENSION plpython3u IS 'PL/Python3U untrusted procedural language';
+
+CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA public;
+COMMENT ON EXTENSION pg_stat_statements
+    IS 'Track execution statistics of all SQL statements executed';
+
+CREATE EXTENSION IF NOT EXISTS pg_qualstats;
+COMMENT ON EXTENSION pg_qualstats
+    IS 'Statistics on predicates found in WHERE statements and JOIN clauses.';
+
+CREATE FUNCTION pgwatch2.get_load_average(OUT load_1min double precision,
+        OUT load_5min double precision, OUT load_15min double precision)
+        RETURNS record
+    LANGUAGE plpython3u SECURITY DEFINER
+    AS $$
+from os import getloadavg
+la = getloadavg()
+return [la[0], la[1], la[2]]
+$$;
+
+
+CREATE FUNCTION pgwatch2.get_psutil_cpu(OUT cpu_utilization double precision,
+        OUT load_1m_norm double precision, OUT load_1m double precision,
+        OUT load_5m_norm double precision, OUT load_5m double precision,
+        OUT "user" double precision, OUT system double precision,
+        OUT idle double precision, OUT iowait double precision,
+        OUT irqs double precision, OUT other double precision) RETURNS record
+    LANGUAGE plpython3u SECURITY DEFINER
+    AS $$
+
+from os import getloadavg
+from psutil import cpu_times_percent, cpu_percent, cpu_count
+from threading import Thread
+
+class GetCpuPercentThread(Thread):
+    def __init__(self, interval_seconds):
+        self.interval_seconds = interval_seconds
+        self.cpu_utilization_info = None
+        super(GetCpuPercentThread, self).__init__()
+
+    def run(self):
+        self.cpu_utilization_info = cpu_percent(self.interval_seconds)
+
+t = GetCpuPercentThread(0.5)
+t.start()
+
+ct = cpu_times_percent(0.5)
+la = getloadavg()
+
+t.join()
+
+return t.cpu_utilization_info, la[0] / cpu_count(), la[0], \
+    la[1] / cpu_count(), la[1], ct.user, ct.system, ct.idle, ct.iowait, \
+    ct.irq + ct.softirq, ct.steal + ct.guest + ct.guest_nice
+
+$$;
+
+
+CREATE FUNCTION pgwatch2.get_psutil_disk(OUT dir_or_tablespace text,
+        OUT path text, OUT total double precision, OUT used double precision,
+        OUT free double precision, OUT percent double precision)
+        RETURNS SETOF record
+    LANGUAGE plpython3u SECURITY DEFINER
+    AS $$
+
+from os import stat
+from os.path import join, exists
+from psutil import disk_usage
+ret_list = []
+
+# data_directory
+sqlstring = """select
+    current_setting('data_directory') as dd,
+    current_setting('log_directory') as ld,
+    current_setting('server_version_num')::int as pgver"""
+r = plpy.execute(sqlstring)
+dd = r[0]['dd']
+ld = r[0]['ld']
+du_dd = disk_usage(dd)
+ret_list.append(['data_directory', dd, du_dd.total, du_dd.used, du_dd.free,
+    du_dd.percent])
+
+dd_stat = stat(dd)
+# log_directory
+if ld:
+    if not ld.startswith('/'):
+        ld_path = join(dd, ld)
+    else:
+        ld_path = ld
+    if exists(ld_path):
+        log_stat = stat(ld_path)
+        if log_stat.st_dev == dd_stat.st_dev:
+            pass # no new info, same device
+        else:
+            du = disk_usage(ld_path)
+            ret_list.append(['log_directory', ld_path, du.total, du.used,
+                du.free, du.percent])
+
+# WAL / XLOG directory
+# plpy.notice('pg_wal' if r[0]['pgver'] >= 100000 else 'pg_xlog', r[0]['pgver'])
+joined_path_wal = join(r[0]['dd'], 'pg_wal' if r[0]['pgver'] >= 100000 else 'pg_xlog')
+wal_stat = stat(joined_path_wal)
+if wal_stat.st_dev == dd_stat.st_dev:
+    pass # no new info, same device
+else:
+    du = disk_usage(joined_path_wal)
+    ret_list.append(['pg_wal', joined_path_wal, du.total, du.used, du.free,
+        du.percent])
+
+# add user created tablespaces if any
+sql_tablespaces = """
+    select spcname as name, pg_catalog.pg_tablespace_location(oid) as location
+    from pg_catalog.pg_tablespace where not spcname like any(array[E'pg\\_%'])"""
+for row in plpy.cursor(sql_tablespaces):
+    du = disk_usage(row['location'])
+    ret_list.append([row['name'], row['location'], du.total, du.used, du.free,
+        du.percent])
+return ret_list
+
+$$;
+
+
+CREATE FUNCTION pgwatch2.get_psutil_disk_io_total(
+        OUT read_count double precision, OUT write_count double precision,
+        OUT read_bytes double precision, OUT write_bytes double precision)
+        RETURNS record
+    LANGUAGE plpython3u SECURITY DEFINER
+    AS $$
+from psutil import disk_io_counters
+dc = disk_io_counters(perdisk=False)
+return dc.read_count, dc.write_count, dc.read_bytes, dc.write_bytes
+$$;
+
+
+CREATE FUNCTION pgwatch2.get_psutil_mem(OUT total double precision,
+        OUT used double precision, OUT free double precision,
+        OUT buff_cache double precision, OUT available double precision,
+        OUT percent double precision, OUT swap_total double precision,
+        OUT swap_used double precision, OUT swap_free double precision,
+        OUT swap_percent double precision) RETURNS record
+    LANGUAGE plpython3u SECURITY DEFINER
+    AS $$
+from psutil import virtual_memory, swap_memory
+vm = virtual_memory()
+sw = swap_memory()
+return vm.total, vm.used, vm.free, vm.buffers + vm.cached, vm.available, \
+    vm.percent, sw.total, sw.used, sw.free, sw.percent
+$$;
+
+
+CREATE FUNCTION pgwatch2.get_stat_activity() RETURNS SETOF pg_stat_activity
+    LANGUAGE sql SECURITY DEFINER
+    AS $$
+  select * from pg_stat_activity
+    where datname = current_database() and pid != pg_backend_pid()
+$$;
+
+
+CREATE FUNCTION pgwatch2.get_stat_replication()
+        RETURNS SETOF pg_stat_replication
+    LANGUAGE sql SECURITY DEFINER
+    AS $$
+  select * from pg_stat_replication
+$$;
+
+
+CREATE FUNCTION pgwatch2.get_stat_statements()
+        RETURNS SETOF public.pg_stat_statements
+    LANGUAGE sql SECURITY DEFINER
+    AS $$
+  select
+    s.*
+  from
+    pg_stat_statements s
+    join
+    pg_database d
+      on d.oid = s.dbid and d.datname = current_database()
+$$;
+
+
+CREATE FUNCTION pgwatch2.get_table_bloat_approx_sql(OUT full_table_name text,
+        OUT approx_bloat_percent double precision,
+        OUT approx_bloat_bytes double precision,
+        OUT fillfactor integer) RETURNS SETOF record
+    LANGUAGE sql SECURITY DEFINER
+    AS $$
+
+SELECT
+    quote_ident(schemaname) || '.' || quote_ident(tblname) as full_table_name,
+    bloat_ratio as approx_bloat_percent,
+    bloat_size as approx_bloat_bytes,
+    fillfactor
+FROM
+    (
+        /* WARNING: executed with a non-superuser role, the query inspect only tables you are granted to read.
+         * This query is compatible with PostgreSQL 9.0 and more
+         */
+        SELECT
+            current_database(),
+            schemaname,
+            tblname,
+            bs * tblpages AS real_size,
+            (tblpages - est_tblpages) * bs AS extra_size,
+            CASE
+                WHEN tblpages - est_tblpages > 0
+                    THEN 100 * (tblpages - est_tblpages) / tblpages :: float
+                ELSE 0
+            END AS extra_ratio,
+            fillfactor,
+            CASE
+                WHEN tblpages - est_tblpages_ff > 0
+                    THEN (tblpages - est_tblpages_ff) * bs
+                ELSE 0
+            END AS bloat_size,
+            CASE
+                WHEN tblpages - est_tblpages_ff > 0
+                    THEN 100 * (tblpages - est_tblpages_ff) / tblpages :: float
+                ELSE 0
+            END AS bloat_ratio,
+            is_na -- , (pst).free_percent + (pst).dead_tuple_percent AS real_frag
+        FROM
+            (
+                SELECT
+                    ceil(reltuples / ((bs - page_hdr) / tpl_size))
+                        + ceil(toasttuples / 4) AS est_tblpages,
+                    ceil(
+                        reltuples / ((bs - page_hdr) * fillfactor
+                            / (tpl_size * 100))
+                    ) + ceil(toasttuples / 4) AS est_tblpages_ff,
+                    tblpages,
+                    fillfactor,
+                    bs,
+                    tblid,
+                    schemaname,
+                    tblname,
+                    heappages,
+                    toastpages,
+                    is_na -- , stattuple.pgstattuple(tblid) AS pst
+                FROM
+                    (
+                        SELECT
+                            (
+                                4 + tpl_hdr_size + tpl_data_size + (2 * ma) - CASE
+                                    WHEN tpl_hdr_size % ma = 0 THEN ma
+                                    ELSE tpl_hdr_size % ma
+                                END - CASE
+                                    WHEN ceil(tpl_data_size) :: int % ma = 0 THEN ma
+                                    ELSE ceil(tpl_data_size) :: int % ma
+                                END
+                            ) AS tpl_size,
+                            bs - page_hdr AS size_per_block,
+                            (heappages + toastpages) AS tblpages,
+                            heappages,
+                            toastpages,
+                            reltuples,
+                            toasttuples,
+                            bs,
+                            page_hdr,
+                            tblid,
+                            schemaname,
+                            tblname,
+                            fillfactor,
+                            is_na
+                        FROM
+                            (
+                                SELECT
+                                    tbl.oid AS tblid,
+                                    ns.nspname AS schemaname,
+                                    tbl.relname AS tblname,
+                                    tbl.reltuples,
+                                    tbl.relpages AS heappages,
+                                    coalesce(toast.relpages, 0) AS toastpages,
+                                    coalesce(toast.reltuples, 0) AS toasttuples,
+                                    coalesce(
+                                        substring(
+                                            array_to_string(tbl.reloptions, ' ')
+                                            FROM
+                                                'fillfactor=([0-9]+)'
+                                        ) :: smallint,
+                                        100
+                                    ) AS fillfactor,
+                                    current_setting('block_size') :: numeric AS bs,
+                                    CASE
+                                        WHEN version() ~ 'mingw32'
+                                            OR version() ~ '64-bit|x86_64|ppc64|ia64|amd64'
+                                        THEN 8
+                                        ELSE 4
+                                    END AS ma,
+                                    24 AS page_hdr,
+                                    23 + CASE
+                                        WHEN MAX(coalesce(null_frac, 0)) > 0
+                                        THEN (7 + count(*)) / 8
+                                        ELSE 0 :: int
+                                    END + CASE
+                                        WHEN tbl.relhasoids THEN 4
+                                        ELSE 0
+                                    END AS tpl_hdr_size,
+                                    sum(
+                                        (1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)
+                                    ) AS tpl_data_size,
+                                    bool_or(att.atttypid = 'pg_catalog.name' :: regtype)
+                                    OR count(att.attname) <> count(s.attname) AS is_na
+                                FROM
+                                    pg_attribute AS att
+                                    JOIN pg_class AS tbl ON att.attrelid = tbl.oid
+                                    JOIN pg_namespace AS ns ON ns.oid = tbl.relnamespace
+                                    LEFT JOIN pg_stats AS s ON s.schemaname = ns.nspname
+                                    AND s.tablename = tbl.relname
+                                    AND s.inherited = false
+                                    AND s.attname = att.attname
+                                    LEFT JOIN pg_class AS toast ON tbl.reltoastrelid = toast.oid
+                                WHERE
+                                    att.attnum > 0
+                                    AND NOT att.attisdropped
+                                    AND tbl.relkind IN ('r', 'm')
+                                    AND ns.nspname != 'information_schema'
+                                GROUP BY
+                                    1,
+                                    2,
+                                    3,
+                                    4,
+                                    5,
+                                    6,
+                                    7,
+                                    8,
+                                    9,
+                                    10,
+                                    tbl.relhasoids
+                                ORDER BY
+                                    2,
+                                    3
+                            ) AS s
+                    ) AS s2
+            ) AS s3 -- WHERE NOT is_na
+    ) s4
+$$;
+
+
+CREATE FUNCTION pgwatch2.get_wal_size() RETURNS bigint
+    LANGUAGE sql SECURITY DEFINER
+    AS $$
+select (sum((pg_stat_file('pg_wal/' || name)).size))::int8 from pg_ls_waldir()
+$$;
+
+
+GRANT USAGE ON SCHEMA pgwatch2 TO pg_monitor;
+
+GRANT EXECUTE ON FUNCTION pgwatch2.get_load_average(
+    OUT load_1min double precision, OUT load_5min double precision,
+    OUT load_15min double precision) TO pg_monitor;
+
+GRANT EXECUTE ON FUNCTION pgwatch2.get_psutil_cpu(
+    OUT cpu_utilization double precision, OUT load_1m_norm double precision,
+    OUT load_1m double precision, OUT load_5m_norm double precision,
+    OUT load_5m double precision, OUT "user" double precision,
+    OUT system double precision, OUT idle double precision,
+    OUT iowait double precision, OUT irqs double precision,
+    OUT other double precision) TO pg_monitor;
+
+GRANT EXECUTE ON FUNCTION pgwatch2.get_psutil_disk(OUT dir_or_tablespace text,
+    OUT path text, OUT total double precision, OUT used double precision,
+    OUT free double precision, OUT percent double precision) TO pg_monitor;
+
+GRANT EXECUTE ON FUNCTION pgwatch2.get_psutil_disk_io_total(
+    OUT read_count double precision, OUT write_count double precision,
+    OUT read_bytes double precision, OUT write_bytes double precision)
+    TO pg_monitor;
+
+GRANT EXECUTE ON FUNCTION pgwatch2.get_psutil_mem(OUT total double precision,
+    OUT used double precision, OUT free double precision,
+    OUT buff_cache double precision, OUT available double precision,
+    OUT percent double precision, OUT swap_total double precision,
+    OUT swap_used double precision, OUT swap_free double precision,
+    OUT swap_percent double precision) TO pg_monitor;
+
+GRANT EXECUTE ON FUNCTION pgwatch2.get_wal_size() TO pg_monitor;
+
+GRANT SELECT ON TABLE pg_catalog.pg_subscription TO pg_monitor;
+
+
+COMMIT;
diff --git a/scripts/db-monitoring/setup/sql-monitoring/31_setup_monitoring_pghero.sql b/scripts/db-monitoring/setup/sql-monitoring/31_setup_monitoring_pghero.sql
new file mode 100644
index 0000000000000000000000000000000000000000..deba4b36466e64c8e3f2c85015c4723cb4b09de8
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/31_setup_monitoring_pghero.sql
@@ -0,0 +1,72 @@
+-- Configure database for monitoring by unprivileged user `pgwatch2`
+-- using program https://github.com/cybertec-postgresql/pgwatch2/
+
+-- Example run:
+-- psql -p 5432 -U postgres -h 127.0.0.1 -d template_monitoring -f ./setup_monitoring.sql
+
+SET client_encoding = 'UTF8';
+SET client_min_messages = 'warning';
+
+
+\echo Installing monitoring stuff for pghero
+
+BEGIN;
+
+CREATE SCHEMA IF NOT EXISTS pghero;
+COMMENT ON SCHEMA pghero IS
+    'Schema contains objects for monitoring https://github.com/ankane/pghero/';
+
+CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA public;
+COMMENT ON EXTENSION pg_stat_statements
+    IS 'Track execution statistics of all SQL statements executed';
+
+-- view queries
+CREATE OR REPLACE FUNCTION pghero.pg_stat_activity() RETURNS SETOF pg_stat_activity AS
+$$
+  SELECT * FROM pg_catalog.pg_stat_activity;
+$$ LANGUAGE sql VOLATILE SECURITY DEFINER;
+
+CREATE OR REPLACE VIEW pghero.pg_stat_activity AS SELECT * FROM pghero.pg_stat_activity();
+
+-- kill queries
+CREATE OR REPLACE FUNCTION pghero.pg_terminate_backend(pid int) RETURNS boolean AS
+$$
+  SELECT * FROM pg_catalog.pg_terminate_backend(pid);
+$$ LANGUAGE sql VOLATILE SECURITY DEFINER;
+
+-- query stats
+CREATE OR REPLACE FUNCTION pghero.pg_stat_statements() RETURNS SETOF pg_stat_statements AS
+$$
+  SELECT * FROM public.pg_stat_statements;
+$$ LANGUAGE sql VOLATILE SECURITY DEFINER;
+
+CREATE OR REPLACE VIEW pghero.pg_stat_statements AS SELECT * FROM pghero.pg_stat_statements();
+
+-- query stats reset
+CREATE OR REPLACE FUNCTION pghero.pg_stat_statements_reset() RETURNS void AS
+$$
+  SELECT public.pg_stat_statements_reset();
+$$ LANGUAGE sql VOLATILE SECURITY DEFINER;
+
+-- improved query stats reset for Postgres 12+ - delete for earlier versions
+-- CREATE OR REPLACE FUNCTION pghero.pg_stat_statements_reset(userid oid, dbid oid, queryid bigint) RETURNS void AS
+-- $$
+--   SELECT public.pg_stat_statements_reset(userid, dbid, queryid);
+-- $$ LANGUAGE sql VOLATILE SECURITY DEFINER;
+
+-- suggested indexes
+CREATE OR REPLACE FUNCTION pghero.pg_stats() RETURNS
+TABLE(schemaname name, tablename name, attname name, null_frac real, avg_width integer, n_distinct real) AS
+$$
+  SELECT schemaname, tablename, attname, null_frac, avg_width, n_distinct FROM pg_catalog.pg_stats;
+$$ LANGUAGE sql VOLATILE SECURITY DEFINER;
+
+CREATE OR REPLACE VIEW pghero.pg_stats AS SELECT * FROM pghero.pg_stats();
+
+GRANT USAGE ON SCHEMA pghero TO pg_monitor;
+
+GRANT SELECT ON ALL TABLES IN SCHEMA pghero TO pg_monitor;
+
+GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA pghero TO pg_monitor;
+
+COMMIT;
diff --git a/scripts/db-monitoring/setup/sql-monitoring/40_create_database_pghero.sql b/scripts/db-monitoring/setup/sql-monitoring/40_create_database_pghero.sql
new file mode 100644
index 0000000000000000000000000000000000000000..f5d8c428c8d4d633ea9cdbd3c8f455425dbffd74
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/40_create_database_pghero.sql
@@ -0,0 +1,13 @@
+-- Create database
+
+-- Example run:
+-- psql -p 5432 -U postgres -h 127.0.0.1 -f ./create_database_pghero.sql
+
+SET client_encoding = 'UTF8';
+SET client_min_messages = 'warning';
+
+\echo Creating database pghero
+
+CREATE DATABASE pghero OWNER pghero;
+COMMENT ON DATABASE pghero
+    IS 'Historical data for monitoring https://github.com/ankane/pghero/'
\ No newline at end of file
diff --git a/scripts/db-monitoring/setup/sql-monitoring/41_create_tables_pghero.sql b/scripts/db-monitoring/setup/sql-monitoring/41_create_tables_pghero.sql
new file mode 100644
index 0000000000000000000000000000000000000000..7bb2193a918a49868e979dfb2b1201ba3b918026
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/41_create_tables_pghero.sql
@@ -0,0 +1,41 @@
+-- Create database
+
+-- Creates tables for gathering historical stats data. You need them in pghero
+-- database only.
+-- Example run:
+-- psql postgresql://pghero:pghero@127.0.0.1:5432/pghero -f ./create_tables_pghero.sql
+
+SET client_encoding = 'UTF8';
+SET client_min_messages = 'warning';
+
+\echo Creating tables in database pghero
+
+\c pghero pghero
+
+BEGIN;
+
+CREATE SCHEMA pghero;
+
+CREATE TABLE "pghero"."pghero_query_stats" (
+  "id" bigserial primary key,
+  "database" text,
+  "user" text,
+  "query" text,
+  "query_hash" bigint,
+  "total_time" float,
+  "calls" bigint,
+  "captured_at" timestamp
+);
+CREATE INDEX ON "pghero"."pghero_query_stats" ("database", "captured_at");
+
+CREATE TABLE "pghero_space_stats" (
+  "id" bigserial primary key,
+  "database" text,
+  "schema" text,
+  "relation" text,
+  "size" bigint,
+  "captured_at" timestamp
+);
+CREATE INDEX ON "pghero_space_stats" ("database", "captured_at");
+
+COMMIT;
diff --git a/scripts/db-monitoring/setup/sql-monitoring/50_setup_template.sql b/scripts/db-monitoring/setup/sql-monitoring/50_setup_template.sql
new file mode 100644
index 0000000000000000000000000000000000000000..56a7d87906922d0b022084b8554e4530ef39e338
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/50_setup_template.sql
@@ -0,0 +1,24 @@
+-- Create database
+
+-- Example run:
+-- psql -p 5432 -U postgres -h 127.0.0.1 -f ./setup_template.sql --set=db_name=template_monitoring
+
+SET client_encoding = 'UTF8';
+SET client_min_messages = 'warning';
+
+-- Handle default values for variables.
+\set db_name ':db_name'
+-- now db_name is set to the string ':db_name' if was not already set.
+-- Checking it using a CASE statement:
+SELECT CASE
+  WHEN :'db_name'= ':db_name'
+  THEN 'template_monitoring'
+  ELSE :'db_name'
+END AS "db_name"
+\gset
+
+update pg_database
+    set
+        datistemplate = true,
+        datallowconn = false
+    where datname = :'db_name';
diff --git a/scripts/db-monitoring/setup/sql-monitoring/ddl_deps.sql b/scripts/db-monitoring/setup/sql-monitoring/ddl_deps.sql
new file mode 100644
index 0000000000000000000000000000000000000000..991046901337a2134df7452fe3112a45ea1c1832
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/ddl_deps.sql
@@ -0,0 +1,208 @@
+/**
+Easy way to drop and recreate table or view dependencies, when you need to alter
+something in them.
+See http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/.
+Enhanced by Wojciech Barcik wbarcik@syncad.com (handling of rules).
+*/
+
+
+-- SEQUENCE: deps_saved_ddl_deps_id_seq
+
+-- DROP SEQUENCE deps_saved_ddl_deps_id_seq;
+
+CREATE SEQUENCE deps_saved_ddl_deps_id_seq
+    INCREMENT 1
+    START 1
+    MINVALUE 1
+    MAXVALUE 9223372036854775807
+    CACHE 1;
+
+
+-- Table: deps_saved_ddl
+
+-- DROP TABLE deps_saved_ddl;
+
+CREATE TABLE deps_saved_ddl
+(
+    deps_id integer NOT NULL DEFAULT nextval('deps_saved_ddl_deps_id_seq'::regclass),
+    deps_view_schema character varying(255) COLLATE pg_catalog."default",
+    deps_view_name character varying(255) COLLATE pg_catalog."default",
+    deps_ddl_to_run text COLLATE pg_catalog."default",
+    CONSTRAINT deps_saved_ddl_pkey PRIMARY KEY (deps_id)
+)
+WITH (
+    OIDS = FALSE
+)
+TABLESPACE pg_default;
+
+
+-- create table deps_saved_ddl
+-- (
+--     deps_id serial primary key,
+--     deps_view_schema varchar(255),
+--     deps_view_name varchar(255),
+--     deps_ddl_to_run text
+-- );
+
+
+-- FUNCTION: deps_save_and_drop_dependencies(character varying, character varying, boolean)
+
+-- DROP FUNCTION deps_save_and_drop_dependencies(character varying, character varying, boolean);
+
+CREATE OR REPLACE FUNCTION deps_save_and_drop_dependencies(
+    p_view_schema character varying,
+    p_view_name character varying,
+    drop_relation boolean DEFAULT true
+  )
+  RETURNS void
+  LANGUAGE 'plpgsql'
+  COST 100
+  VOLATILE
+AS $BODY$
+/**
+From http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/
+@wojtek added DDL for rules.
+
+Drops dependencies of view, but saves them into table `deps_saved_ddl`, for
+future restoration. Use function `deps_restore_dependencies` to restore
+dependencies dropped by this function.
+*/
+declare
+  v_curr record;
+begin
+for v_curr in
+(
+  select obj_schema, obj_name, obj_type from
+  (
+  with recursive recursive_deps(obj_schema, obj_name, obj_type, depth) as
+  (
+    select p_view_schema, p_view_name, null::varchar, 0
+    union
+    select dep_schema::varchar, dep_name::varchar, dep_type::varchar,
+        recursive_deps.depth + 1 from
+    (
+      select ref_nsp.nspname ref_schema, ref_cl.relname ref_name,
+          rwr_cl.relkind dep_type, rwr_nsp.nspname dep_schema,
+          rwr_cl.relname dep_name
+      from pg_depend dep
+      join pg_class ref_cl on dep.refobjid = ref_cl.oid
+      join pg_namespace ref_nsp on ref_cl.relnamespace = ref_nsp.oid
+      join pg_rewrite rwr on dep.objid = rwr.oid
+      join pg_class rwr_cl on rwr.ev_class = rwr_cl.oid
+      join pg_namespace rwr_nsp on rwr_cl.relnamespace = rwr_nsp.oid
+      where dep.deptype = 'n'
+      and dep.classid = 'pg_rewrite'::regclass
+    ) deps
+    join recursive_deps on deps.ref_schema = recursive_deps.obj_schema
+        and deps.ref_name = recursive_deps.obj_name
+    where (deps.ref_schema != deps.dep_schema or deps.ref_name != deps.dep_name)
+  )
+  select obj_schema, obj_name, obj_type, depth
+  from recursive_deps
+  where depth > 0
+  ) t
+  group by obj_schema, obj_name, obj_type
+  order by max(depth) desc
+) loop
+
+  insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+  select p_view_schema, p_view_name, 'COMMENT ON ' ||
+  case
+    when c.relkind = 'v' then 'VIEW'
+    when c.relkind = 'm' then 'MATERIALIZED VIEW'
+  else ''
+  end
+  || ' ' || n.nspname || '.' || c.relname || ' IS '''
+      || replace(d.description, '''', '''''') || ''';'
+  from pg_class c
+  join pg_namespace n on n.oid = c.relnamespace
+  join pg_description d on d.objoid = c.oid and d.objsubid = 0
+  where n.nspname = v_curr.obj_schema and c.relname = v_curr.obj_name
+      and d.description is not null;
+
+  insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+  select p_view_schema, p_view_name, 'COMMENT ON COLUMN ' || n.nspname || '.'
+      || c.relname || '.' || a.attname || ' IS '''
+      || replace(d.description, '''', '''''') || ''';'
+  from pg_class c
+  join pg_attribute a on c.oid = a.attrelid
+  join pg_namespace n on n.oid = c.relnamespace
+  join pg_description d on d.objoid = c.oid and d.objsubid = a.attnum
+  where n.nspname = v_curr.obj_schema and c.relname = v_curr.obj_name
+      and d.description is not null;
+
+  insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+  select p_view_schema, p_view_name, 'GRANT ' || privilege_type || ' ON '
+      || table_schema || '.' || table_name || ' TO ' || grantee
+  from information_schema.role_table_grants
+  where table_schema = v_curr.obj_schema and table_name = v_curr.obj_name;
+
+  if v_curr.obj_type = 'v' then
+
+    insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+    select p_view_schema, p_view_name, definition
+    from pg_catalog.pg_rules
+    where schemaname = v_curr.obj_schema and tablename = v_curr.obj_name;
+
+    insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+    select p_view_schema, p_view_name, 'CREATE VIEW '
+        || v_curr.obj_schema || '.' || v_curr.obj_name || ' AS ' || view_definition
+    from information_schema.views
+    where table_schema = v_curr.obj_schema and table_name = v_curr.obj_name;
+
+  elsif v_curr.obj_type = 'm' then
+    insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+    select p_view_schema, p_view_name, 'CREATE MATERIALIZED VIEW '
+        || v_curr.obj_schema || '.' || v_curr.obj_name || ' AS ' || definition
+    from pg_matviews
+    where schemaname = v_curr.obj_schema and matviewname = v_curr.obj_name;
+  end if;
+
+  if drop_relation = true then
+    execute 'DROP ' ||
+    case
+      when v_curr.obj_type = 'v' then 'VIEW'
+      when v_curr.obj_type = 'm' then 'MATERIALIZED VIEW'
+    end
+    || ' ' || v_curr.obj_schema || '.' || v_curr.obj_name;
+  end if;
+
+end loop;
+end;
+$BODY$;
+
+
+-- FUNCTION: deps_restore_dependencies(character varying, character varying)
+
+-- DROP FUNCTION deps_restore_dependencies(character varying, character varying);
+
+CREATE OR REPLACE FUNCTION deps_restore_dependencies(
+    p_view_schema character varying,
+    p_view_name character varying
+  )
+  RETURNS void
+  LANGUAGE 'plpgsql'
+  COST 100
+  VOLATILE
+AS $BODY$
+/**
+From http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/
+
+Restores dependencies dropped by function `deps_save_and_drop_dependencies`.
+*/
+declare
+  v_curr record;
+begin
+for v_curr in
+(
+  select deps_ddl_to_run
+  from deps_saved_ddl
+  where deps_view_schema = p_view_schema and deps_view_name = p_view_name
+  order by deps_id desc
+) loop
+  execute v_curr.deps_ddl_to_run;
+end loop;
+delete from deps_saved_ddl
+where deps_view_schema = p_view_schema and deps_view_name = p_view_name;
+end;
+$BODY$;
diff --git a/scripts/operation_extractor.py b/scripts/operation_extractor.py
new file mode 100755
index 0000000000000000000000000000000000000000..f7821c7e984cc9558ffc7c475028cb2f4e14f328
--- /dev/null
+++ b/scripts/operation_extractor.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+"""
+This script will scan blocks from range `from_block` to `to_block` if it finds operation defined in `operations` it will
+save such block to a `output_file`. Blocks not containing any operations from list `operations` will be saved empty.
+
+There is an option to save only operations data, without blocks data: use `--dump-ops-only`
+
+You can pass multiple operations.
+
+Example:
+./operation_extractor.py https://api.hive.blog 20000000 25000000 dump.json custom_json_operation --dump-ops-only True
+
+"""
+
+from json import dumps
+from hive.steem.client import SteemClient
+
+if __name__ == "__main__":
+    import argparse
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument("hived_url", type=str, help="Url address of hived instance")
+    parser.add_argument("from_block", type=int, help="Scan from block")
+    parser.add_argument("to_block", type=int, help="Scan to block")
+    parser.add_argument("output_file", type=str, help="Prepared blocks will be saved in this file")
+    parser.add_argument("operations", type=str, nargs='+', help="Save selected operations")
+    parser.add_argument("--dump-ops-only", type=bool, default=False, help="Dump only selected ops, without block data")
+
+    args = parser.parse_args()
+
+    client = SteemClient({"default":args.hived_url})
+    from_block = args.from_block
+    with open(args.output_file, "w") as output_file:
+        if not args.dump_ops_only:
+            output_file.write("{\n")
+        while from_block < args.to_block:
+            to_block = from_block + 1000
+            if to_block >= args.to_block:
+                to_block = args.to_block + 1
+            print("Processing range from: ", from_block, " to: ", to_block)
+            blocks = client.get_blocks_range(from_block, to_block)
+            for block in blocks:
+                block_num = int(block['block_id'][:8], base=16)
+                block_data = dict(block)
+                for idx in range(len(block_data['transactions'])):
+                    block_data['transactions'][idx]['operations'] = [op for op in block_data['transactions'][idx]['operations'] if op['type'] in args.operations]
+                    if args.dump_ops_only and block_data['transactions'][idx]['operations']:
+                        output_file.write("{}\n".format(dumps(block_data['transactions'][idx]['operations'])))
+                if not args.dump_ops_only:
+                    output_file.write('"{}":{},\n'.format(block_num, dumps(block_data)))
+            from_block = to_block
+        if not args.dump_ops_only:
+            output_file.write("}\n")
diff --git a/scripts/run_full_sync_tests.sh b/scripts/run_full_sync_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6ee55b47734cd6fc9019636cb1e3aff2ebab7d2f
--- /dev/null
+++ b/scripts/run_full_sync_tests.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+# script to run tavern tests for full sync hivemind node
+
+export TAVERN_DIR="tests/tests_api/hivemind/tavern_full_sync"
+
+SCRIPT=$(readlink -f "$0")
+
+$(dirname "$SCRIPT")/run_tests.sh "$@"
diff --git a/scripts/run_tests.sh b/scripts/run_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..edb8ac7a5a3d4e0b69192a571e4e67c41761b7cd
--- /dev/null
+++ b/scripts/run_tests.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+# this script will run tavern test versus hivemind instance at address and port
+# specified in command line args.
+
+# basic call is: ./run_tests.sh address port
+# example: ./run_tests.sh 127.0.0.1 8080
+
+# additionaly one can pass parameters to underlying pytest framework
+# example: ./run_tests.sh 127.0.0.1 8080 -m failing
+# above will run only tests marked as failing
+
+# you can also specify tests from given file:
+# example: ./run_tests.sh localhost 8080 test_database_api_patterns.tavern.yaml
+
+# or combine all options
+# ./run_tests.sh localhost 8080 test_database_api_patterns.tavern.yaml -m failing
+
+function display_usage {
+  echo "Usage: $0 hivemind_address hivemind_port [test options]"
+}
+
+function check_port {
+  re='^[0-9]+$'
+  if ! [[ $1 =~ $re ]] ; then
+    echo "Error: Port is not a number" >&2
+    exit 1
+  fi
+}
+
+function check_address {
+  if [[ $1 -eq "localhost" ]] ; then
+    return
+  fi
+
+  re='^(http(s?):\/\/)?((((www\.)?)+[a-zA-Z0-9\.\-\_]+(\.[a-zA-Z]{2,6})+)|(\b((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b))(\/[a-zA-Z0-9\_\-\s\.\/\?\%\#\&\=]*)?$'
+  if ! [[ $1 =~ $re ]] ; then
+    echo "Error: Address is not valid url or ip address" >&2
+    exit 1
+  fi
+}
+
+if [ $# -lt 2 ]
+then 
+  display_usage
+  exit 1
+fi
+
+if [[ ( $# == "--help") ||  $# == "-h" ]]
+then
+  display_usage
+  exit 0
+fi
+
+#check_address $1
+#check_port $2
+
+#cd ..
+
+set -e
+
+pip3 install tox --user
+
+export HIVEMIND_ADDRESS=$1
+export HIVEMIND_PORT=$2
+if [ -z "$TAVERN_DIR" ]
+then
+  export TAVERN_DIR="tests/tests_api/hivemind/tavern"
+fi
+echo "Attempting to start tests on hivemind instance listening on: $HIVEMIND_ADDRESS port: $HIVEMIND_PORT"
+
+echo "Additional test options: ${@:3}"
+
+tox -e tavern -- -W ignore::pytest.PytestDeprecationWarning -n auto -v -p no:logging ${@:3}
\ No newline at end of file
diff --git a/scripts/xml_report_parser.py b/scripts/xml_report_parser.py
new file mode 100755
index 0000000000000000000000000000000000000000..de29772234ef88871ed12a3e733e304635bc5c0b
--- /dev/null
+++ b/scripts/xml_report_parser.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python3
+import os
+
+from xml.dom import minidom
+
+def process_file_name(file_name, tavern_root_dir):
+    tavern_root_dir_dot = tavern_root_dir.replace("/", ".")
+    file_name_dot = file_name.replace("/", ".")
+    return file_name_dot.replace(tavern_root_dir_dot, "").lstrip(".")
+
+def get_requests_from_yaml(tavern_root_dir):
+    from fnmatch import fnmatch
+    import yaml
+    from json import dumps
+    ret = {}
+    pattern = "*.tavern.yaml"
+    for path, subdirs, files in os.walk(tavern_root_dir):
+        for name in files:
+            if fnmatch(name, pattern):
+                test_file = os.path.join(path, name)
+                yaml_document = None
+                with open(test_file, "r") as yaml_file:
+                    yaml_document = yaml.load(yaml_file, Loader=yaml.BaseLoader)
+                if "stages" in yaml_document:
+                    if "request" in yaml_document["stages"][0]:
+                        json_parameters = yaml_document["stages"][0]["request"].get("json", None)
+                        assert json_parameters is not None, "Unable to find json parameters in request"
+                        ret[process_file_name(test_file, tavern_root_dir)] = dumps(json_parameters)
+    return ret
+
+def parse_xml_files(root_dir):
+    ret = {}
+    print("Scanning path: {}".format(root_dir))
+    for name in os.listdir(root_dir):
+        file_path = os.path.join(root_dir, name)
+        if os.path.isfile(file_path) and name.startswith("benchmarks") and file_path.endswith(".xml"):
+            print("Processing file: {}".format(file_path))
+            xmldoc = minidom.parse(file_path)
+            test_cases = xmldoc.getElementsByTagName('testcase')
+            for test_case in test_cases:
+                test_name = test_case.attributes['classname'].value
+                test_time = float(test_case.attributes['time'].value)
+                if test_name in ret:
+                    ret[test_name].append(test_time)
+                else:
+                    ret[test_name] = [test_time]
+    return ret
+
+if __name__ == "__main__":
+    import argparse
+    from statistics import mean
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument("xml_report_dir", type=str, help="Path to benchmark xml reports")
+    parser.add_argument("tavern_root_dir", type=str, help="Path to tavern tests root dir")
+    parser.add_argument("--time-threshold", dest="time_threshold", type=float, default=1.0, help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.")
+    args = parser.parse_args()
+
+    assert os.path.exists(args.xml_report_dir), "Please provide valid xml report path"
+    assert os.path.exists(args.tavern_root_dir), "Please provide valid tavern path"
+
+    report_data = parse_xml_files(args.xml_report_dir)
+    request_data = get_requests_from_yaml(args.tavern_root_dir)
+
+    html_file = "tavern_benchmarks_report.html"
+    above_treshold = []
+    with open(html_file, "w") as ofile:
+        ofile.write("<html>\n")
+        ofile.write("  <head>\n")
+        ofile.write("    <style>\n")
+        ofile.write("      table, th, td {\n")
+        ofile.write("        border: 1px solid black;\n")
+        ofile.write("        border-collapse: collapse;\n")
+        ofile.write("      }\n")
+        ofile.write("      th, td {\n")
+        ofile.write("        padding: 15px;\n")
+        ofile.write("      }\n")
+        ofile.write("    </style>\n")
+        ofile.write("  </head>\n")
+        ofile.write("  <body>\n")
+        ofile.write("    <table>\n")
+        ofile.write("      <tr><th>Test name</th><th>Min time [s]</th><th>Max time [s]</th><th>Mean time [s]</th></tr>\n")
+        for name, data in report_data.items():
+            dmin = min(data)
+            dmax = max(data)
+            dmean = mean(data)
+            if dmean > args.time_threshold:
+                ofile.write("      <tr><td>{}<br/>Parameters: {}</td><td>{:.4f}</td><td>{:.4f}</td><td bgcolor=\"red\">{:.4f}</td></tr>\n".format(name, request_data[name], dmin, dmax, dmean))
+                above_treshold.append((name, "{:.4f}".format(dmean), request_data[name]))
+            else:
+                ofile.write("      <tr><td>{}</td><td>{:.4f}</td><td>{:.4f}</td><td>{:.4f}</td></tr>\n".format(name, dmin, dmax, dmean))
+        ofile.write("    </table>\n")
+        ofile.write("  </body>\n")
+        ofile.write("</html>\n")
+
+    if above_treshold:
+        from prettytable import PrettyTable
+        summary = PrettyTable()
+        print("########## Test failed with following tests above {}s threshold ##########".format(args.time_threshold))
+        summary.field_names = ['Test name', 'Mean time [s]', 'Call parameters']
+        for entry in above_treshold:
+            summary.add_row(entry)
+        print(summary)
+        exit(2)
+    exit(0)
diff --git a/setup.py b/setup.py
index f2b17ae7c823900126dc388e2a41672e71a90b9c..d053affb17025b6377b64f937a35a926e2cd5f19 100644
--- a/setup.py
+++ b/setup.py
@@ -1,51 +1,109 @@
 # coding=utf-8
 import sys
+import os
 
 from setuptools import find_packages
 from setuptools import setup
 
 assert sys.version_info[0] == 3 and sys.version_info[1] >= 6, "hive requires Python 3.6 or newer"
 
-tests_require = [
-    'pytest',
-    'pytest-cov',
-    'pytest-pylint',
-    'pytest-asyncio',
-    'pytest-console-scripts',
-    'git-pylint-commit-hook',
-    'pep8',
-    'yapf',
-]
-
-# yapf: disable
-setup(
-    name='hivemind',
-    version='0.0.1',
-    description='Developer-friendly microservice powering social networks on the Steem blockchain.',
-    long_description=open('README.md').read(),
-    packages=find_packages(exclude=['scripts']),
-    setup_requires=['pytest-runner'],
-    tests_require=tests_require,
-    install_requires=[
-        #'aiopg==0.16.0',
-        'aiopg @ https://github.com/aio-libs/aiopg/tarball/862fff97e4ae465333451a4af2a838bfaa3dd0bc',
-        'jsonrpcserver==4.0.1',
-        'aiohttp',
-        'certifi',
-        'sqlalchemy',
-        'funcy',
-        'toolz',
-        'maya',
-        'ujson',
-        'urllib3',
-        'psycopg2-binary',
-        'aiocache',
-        'configargparse',
-        'pdoc',
-    ],
-    extras_require={'test': tests_require},
-    entry_points={
-        'console_scripts': [
-            'hive=hive.cli:run',
-        ]
-    })
+VERSION = '0.0.1'
+
+class GitRevisionProvider(object):
+    """ Static class to provide version and git revision information"""
+
+    @staticmethod
+    def provide_git_revision():
+        """ Evaluate version and git revision and save it to a version file
+            Evaluation is based on VERSION variable and git describe if
+            .git directory is present in tree.
+            In case when .git is not available version and git_revision is taken
+            from get_distribution call
+        """
+        if os.path.exists(".git"):
+            from subprocess import check_output
+            command = 'git describe --tags --long --dirty'
+            version = check_output(command.split()).decode('utf-8').strip()
+            parts = version.split('-')
+            if parts[-1] == 'dirty':
+                sha = parts[-2]
+            else:
+                sha = parts[-1]
+            git_revision = sha.lstrip('g')
+            GitRevisionProvider._save_version_file(VERSION, git_revision)
+            return git_revision
+        else:
+            from pkg_resources import get_distribution
+            try:
+                version, git_revision = get_distribution("hivemind").version.split("+")
+                GitRevisionProvider._save_version_file(version, git_revision)
+                return git_revision
+            except:
+                GitRevisionProvider._save_version_file(VERSION, "")
+        return ""
+
+    @staticmethod
+    def _save_version_file(hivemind_version, git_revision):
+        """ Helper method to save version.py with current version and git_revision """
+        with open("hive/version.py", 'w') as version_file:
+            version_file.write("# generated by setup.py\n")
+            version_file.write("# contents will be overwritten\n")
+            version_file.write("VERSION = '{}'\n".format(hivemind_version))
+            version_file.write("GIT_REVISION = '{}'".format(git_revision))
+
+GIT_REVISION = GitRevisionProvider.provide_git_revision()
+SQL_SCRIPTS_PATH = 'hive/db/sql_scripts/'
+
+def get_sql_scripts():
+    from os import listdir
+    from os.path import isfile, join
+    return [join(SQL_SCRIPTS_PATH, f) for f in listdir(SQL_SCRIPTS_PATH) if isfile(join(SQL_SCRIPTS_PATH, f))]
+
+if __name__ == "__main__":
+    setup(
+        name='hivemind',
+        version=VERSION + "+" + GIT_REVISION,
+        description='Developer-friendly microservice powering social networks on the Steem blockchain.',
+        long_description=open('README.md').read(),
+        packages=find_packages(exclude=['scripts']),
+        data_files=[(SQL_SCRIPTS_PATH, get_sql_scripts())],
+        setup_requires=[
+            'pytest-runner',
+        ],
+        dependency_links=[
+            'https://github.com/bcb/jsonrpcserver/tarball/8f3437a19b6d1a8f600ee2c9b112116c85f17827#egg=jsonrpcserver-4.1.3+8f3437a'
+        ],
+        install_requires=[
+            'aiopg @ https://github.com/aio-libs/aiopg/tarball/862fff97e4ae465333451a4af2a838bfaa3dd0bc',
+            'jsonrpcserver @ https://github.com/bcb/jsonrpcserver/tarball/8f3437a19b6d1a8f600ee2c9b112116c85f17827#egg=jsonrpcserver',
+            'simplejson',
+            'aiohttp',
+            'certifi',
+            'sqlalchemy',
+            'funcy',
+            'toolz',
+            'maya',
+            'ujson',
+            'urllib3',
+            'psycopg2-binary',
+            'aiocache',
+            'configargparse',
+            'pdoc',
+            'diff-match-patch',
+            'prometheus-client',
+            'psutil',
+            'atomic',
+            'python-dateutil>=2.8.1'
+        ],
+        extras_require={
+            'dev': [
+                'pyYAML',
+                'prettytable',
+            ]
+        },
+        entry_points={
+            'console_scripts': [
+                'hive=hive.cli:run',
+            ]
+        }
+    )
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0e1a04a694912f3fe214fb341378b4201cca43a2
--- /dev/null
+++ b/tests/CMakeLists.txt
@@ -0,0 +1,92 @@
+cmake_minimum_required(VERSION 3.10)
+
+INCLUDE(tests_api/ApiTests.cmake)
+
+ENABLE_TESTING()
+
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind bridge account_notifications steemmeupscotty 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind bridge get_community hive-123456 alice)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind bridge get_ranked_posts trending hive alice)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind bridge list_all_subscriptions alice)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind bridge list_community_roles blocktrades)
+
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_account_reputations steemit 10)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_account_votes alice)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_active_votes drakos open-letter-to-justin-sun-and-the-steem-community)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_blog drakos 0 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_blog_authors drakos)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_blog_entries tarazkp 0 10)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_comment_discussions_by_payout [{"tag":"photography","limit":10,"truncate_body":0}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_content tarazkp the-legacy-of-yolo)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_content_replies tarazkp the-legacy-of-yolo)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_active [{"tag":"photography","limit":10,"truncate_body":0}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_author_before_date flaws my-first-experience-integrating-steem-into-chess-in-my-state-or-a-lot-of-photos-3 2020-03-01T00:00:00 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_blog [{"tag":"tarazkp","limit":2,"truncate_body":0}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_cashout [{"tag":"tarazkp","limit":2,"truncate_body":0}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_children [{"tag":"tarazkp","limit":2,"truncate_body":0}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_comments tarazkp the-legacy-of-yolo 5)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_created [{"tag":"blocktrades","limit":1}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_feed steemtools steempeak introducing-peaklock-and-keys-management 3)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_hot [{"tag":"tarazkp","limit":2,"truncate_body":0}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_promoted [{"tag":"tarazkp","limit":2,"truncate_body":0}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_trending [{"tag":"tarazkp","limit":2,"truncate_body":0}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_discussions_by_votes [{"tag":"tarazkp","limit":2,"truncate_body":0}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_feed steemit 0 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_feed_entries steemit 0 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_follow_count steemmeupscotty)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_followers steemit null blog 10)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_following steemit null blog 10)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_post_discussions_by_payout [{"tag":"tarazkp","limit":2,"truncate_body":0}])
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_reblogged_by tarazkp the-legacy-of-yolo)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_replies_by_last_update tarazkp the-legacy-of-yolo 10)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_tags_used_by_author steemit)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind condenser_api get_trending_tags blocktrades 1)
+
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind database_api list_comments ["steemit","firstpost","",""] 1 by_root)
+
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind follow_api get_account_reputations 1 blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind follow_api get_blog 1 blocktardes 0 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind follow_api get_blog_entries 1 blocktrades 0 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind follow_api get_follow_count 1 blocktrades)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind follow_api get_followers 1 steemit \"\" blog 10)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind follow_api get_following 1 blocktrades \"\" blog 10)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind follow_api get_reblogged_by 1 steemit firstpost)
+
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_active_votes flaws my-first-experience-integrating-steem-into-chess-in-my-state-or-a-lot-of-photos-3) 
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_comment_discussions_by_payout blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_content_replies flaws my-first-experience-integrating-steem-into-chess-in-my-state-or-a-lot-of-photos-3)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussion steemmeupscotty black-dog-on-a-hong-kong-sunrise-animal-landscape-photography)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_active blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_author_before_date flaws my-first-experience-integrating-steem-into-chess-in-my-state-or-a-lot-of-photos-3 2020-03-01T00:00:00 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_blog blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_cashout blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_children blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_comments tarazkp the-legacy-of-yolo 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_created blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_feed blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_hot blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_promoted blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_trending blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_discussions_by_votes blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_post_discussions_by_payout blocktrades 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_replies_by_last_update flaws my-first-experience-integrating-steem-into-chess-in-my-state-or-a-lot-of-photos-3 1)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_tags_used_by_author flaws)
+ADD_API_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind tags_api get_trending_tags blocktrades 1)
+
+ADD_API_PYREST_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind 5000000 bridge_api )
+ADD_API_PYREST_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind 5000000 condenser_api )
+ADD_API_PYREST_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind 5000000 database_api )
+ADD_API_PYREST_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind 5000000 follow_api )
+ADD_API_PYREST_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind 5000000 hive_api )
+ADD_API_PYREST_TEST(${CMAKE_BINARY_DIR}/tests/tests_api ${CMAKE_CURRENT_SOURCE_DIR}/tests_api hivemind 5000000 tags_api )
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/manual_tests/__init__.py b/tests/manual_tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/manual_tests/list_comments_by_author_last_update_test.py b/tests/manual_tests/list_comments_by_author_last_update_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..06e072dc8f227e8fd465783a7d046e39604cd567
--- /dev/null
+++ b/tests/manual_tests/list_comments_by_author_last_update_test.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python3
+
+from test_base import run_test
+
+if __name__ == '__main__':
+    reference_hive_node_url = 'http://127.0.0.1:8090'
+    test_hive_node_url = 'http://127.0.0.1:8080'
+
+    payload = {
+        "jsonrpc":"2.0",
+        "method":"database_api.list_comments",
+        "params" : {
+            "start" : ['steemit', '1970-01-01T00:00:00', '', ''],
+            "limit" : 10,
+            "order" : 'by_author_last_update'
+        },
+        "id":1
+    }
+
+    run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink', 'last_update'])
diff --git a/tests/manual_tests/list_comments_by_cashout_test.py b/tests/manual_tests/list_comments_by_cashout_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ec3609b4edca01fb071da9012e266372e43bdd4
--- /dev/null
+++ b/tests/manual_tests/list_comments_by_cashout_test.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python3
+from test_base import run_test
+
+if __name__ == '__main__':
+    reference_hive_node_url = 'http://127.0.0.1:8090'
+    test_hive_node_url = 'http://127.0.0.1:8080'
+
+    payload = {
+        "jsonrpc":"2.0",
+        "method":"database_api.list_comments",
+        "params" : {
+            "start" : ['1970-01-01T00:00:00', '', ''],
+            "limit" : 10,
+            "order" : 'by_cashout_time'
+        },
+        "id":1
+    }
+
+    run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink', 'parent_author', 'parent_permlink', 'created'])
diff --git a/tests/manual_tests/list_comments_by_parent_test.py b/tests/manual_tests/list_comments_by_parent_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea1d8019066b7c4dce9c72e04f6d54de6dbdeb19
--- /dev/null
+++ b/tests/manual_tests/list_comments_by_parent_test.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python3
+from test_base import run_test
+
+if __name__ == '__main__':
+    reference_hive_node_url = 'http://127.0.0.1:8090'
+    test_hive_node_url = 'http://127.0.0.1:8080'
+
+    payload = {
+        "jsonrpc":"2.0",
+        "method":"database_api.list_comments",
+        "params" : {
+            "start" : ['steemit', 'firstpost', '', ''],
+            "limit" : 10,
+            "order" : 'by_parent'
+        },
+        "id":1
+    }
+
+    run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink', 'parent_author', 'parent_permlink', 'created'])
diff --git a/tests/manual_tests/list_comments_by_permlink.py b/tests/manual_tests/list_comments_by_permlink.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d8db7e22203aebb7ade7b541de932ae1eb640a2
--- /dev/null
+++ b/tests/manual_tests/list_comments_by_permlink.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python3
+from test_base import run_test
+
+if __name__ == '__main__':
+    reference_hive_node_url = 'http://127.0.0.1:8090'
+    test_hive_node_url = 'http://127.0.0.1:8080'
+
+    payload = {
+        "jsonrpc" : "2.0",
+        "method" : "database_api.list_comments",
+        "params" : {
+            "start" : ['steemit', 'firstpost'],
+            "limit" : 10,
+            "order" : 'by_permlink'
+        },
+        "id" : 1
+    }
+
+    run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink'])
diff --git a/tests/manual_tests/list_comments_by_root_test.py b/tests/manual_tests/list_comments_by_root_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..00a01544c4b88479fb6b2876903f83684abebc67
--- /dev/null
+++ b/tests/manual_tests/list_comments_by_root_test.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python3
+from test_base import run_test
+
+if __name__ == '__main__':
+    reference_hive_node_url = 'http://127.0.0.1:8090'
+    test_hive_node_url = 'http://127.0.0.1:8080'
+
+    payload = {
+        "jsonrpc" : "2.0",
+        "method" : "database_api.list_comments",
+        "params" : {
+            "start" : ['steemit', 'firstpost', '', ''],
+            "limit" : 10,
+            "order" : 'by_root'
+        },
+        "id":1
+    }
+
+    run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink', 'root_author', 'root_permlink', 'created'])
+
diff --git a/tests/manual_tests/list_comments_by_update_test.py b/tests/manual_tests/list_comments_by_update_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7c3efd4f303db4e8110a8c094b378d6f0c4e017
--- /dev/null
+++ b/tests/manual_tests/list_comments_by_update_test.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python3
+from test_base import run_test
+
+if __name__ == '__main__':
+    reference_hive_node_url = 'http://127.0.0.1:8090'
+    test_hive_node_url = 'http://127.0.0.1:8080'
+
+    payload = {
+        "jsonrpc":"2.0",
+        "method":"database_api.list_comments",
+        "params" : {
+            "start" : ['steemit', '1970-01-01T00:00:00', '', ''],
+            "limit" : 10,
+            "order" : 'by_last_update'
+        },
+        "id":1
+    }
+
+    run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink', 'last_update'])
diff --git a/tests/manual_tests/test_base.py b/tests/manual_tests/test_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f24d1e67c9a14902e387c9739623f1915fe4311
--- /dev/null
+++ b/tests/manual_tests/test_base.py
@@ -0,0 +1,26 @@
+def run_test(reference_node_url, test_node_url, payload, table_keys):
+    import prettytable
+    from requests import post
+    from json import dumps
+
+    print("Querying reference node")
+    resp = post(reference_node_url, dumps(payload))
+
+    json = resp.json()
+    #print(json)
+    table = prettytable.PrettyTable()
+    table.field_names = table_keys
+    for row in json['result']['comments']:
+        table.add_row([row[key] for key in table_keys])
+    print(table)
+
+    print("Querying test node")
+    resp = post(test_node_url, dumps(payload))
+
+    json = resp.json()
+    #print(json)
+    table = prettytable.PrettyTable()
+    table.field_names = table_keys
+    for row in json['result']:
+        table.add_row([row[key] for key in table_keys])
+    print(table)
diff --git a/tests/server/test_server_database_api.py b/tests/server/test_server_database_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..380805c9613f5ec8c352e7c89b125f8f0f06774c
--- /dev/null
+++ b/tests/server/test_server_database_api.py
@@ -0,0 +1,60 @@
+import pytest
+from hive.server.database_api.methods import list_comments
+from hive.steem.client import SteemClient
+from hive.db.adapter import Db
+
+DB = Db.instance()
+CTX = {'db' : DB}
+
+@pytest.fixture
+def client():
+    return SteemClient(url='https://api.hive.blog')
+
+@pytest.mark.asyncio
+async def test_list_comments_by_cashout_time(client):
+    reference_data = await client.list_comments({"start":["1970-01-01T00:00:00","steemit","firstpost"],"limit":10,"order":"by_cashout_time"})
+    test_data = await list_comments(CTX, ["1970-01-01T00:00:00","steemit","firstpost"],10,"by_cashout_time")
+    assert reference_data
+    assert test_data
+    assert len(reference_data) == len(test_data)
+    to_compare = ['author','permlink']
+    for idx in range(len(reference_data)):
+        for key in to_compare:
+            assert reference_data[idx][key] == test_data[idx][key]
+        assert reference_data[idx]['cashout_time'] == test_data[idx]['payout_at']
+
+@pytest.mark.asyncio
+async def test_list_comments_by_permlink(client):
+    reference_data = await client.list_comments({"start":["steemit","firstpost"],"limit":10,"order":"by_permlink"})
+    test_data = await list_comments(CTX, ["steemit","firstpost"],10,"by_permlink")
+    assert reference_data
+    assert test_data
+    assert len(reference_data) == len(test_data)
+    to_compare = ['author','permlink']
+    for idx in range(len(reference_data)):
+        for key in to_compare:
+          assert reference_data[idx][key] == test_data[idx][key]
+
+@pytest.mark.asyncio
+async def test_list_comments_by_root(client):
+    reference_data = await client.list_comments({"start":["steemit","firstpost","",""],"limit":10,"order":"by_root"})
+    test_data = await list_comments(CTX, ["steemit","firstpost","",""],10,"by_root")
+    assert reference_data
+    assert test_data
+    assert len(reference_data) == len(test_data)
+    to_compare = ['author','permlink','root_author','root_permlink']
+    for idx in range(len(reference_data)):
+        for key in to_compare:
+            assert reference_data[idx][key] == test_data[idx][key]
+
+@pytest.mark.asyncio
+async def test_list_comments_by_parent(client):
+    reference_data = await client.list_comments({"start":["steemit","firstpost","",""],"limit":10,"order":"by_parent"})
+    test_data = await list_comments(CTX, ["steemit","firstpost","",""],10,"by_parent")
+    assert reference_data
+    assert test_data
+    assert len(reference_data) == len(test_data)
+    to_compare = ['author','permlink','parent_author','parent_permlink']
+    for idx in range(len(reference_data)):
+        for key in to_compare:
+            assert reference_data[idx][key] == test_data[idx][key]
diff --git a/tests/tests_api b/tests/tests_api
new file mode 160000
index 0000000000000000000000000000000000000000..52c351a435bdea390827b6ce277d5bc59c08fe53
--- /dev/null
+++ b/tests/tests_api
@@ -0,0 +1 @@
+Subproject commit 52c351a435bdea390827b6ce277d5bc59c08fe53
diff --git a/tests/utils/test_utils_post.py b/tests/utils/test_utils_post.py
index be5159db1abc33056a71eae0b0b8cbc0e09e714f..26c1e58ce24a59366505d65620f1a19c0067e010 100644
--- a/tests/utils/test_utils_post.py
+++ b/tests/utils/test_utils_post.py
@@ -3,10 +3,6 @@ from decimal import Decimal
 
 from hive.utils.post import (
     mentions,
-    post_basic,
-    post_legacy,
-    post_payout,
-    post_stats,
 )
 
 POST_1 = {
@@ -73,7 +69,7 @@ POST_1 = {
     "parent_author": "",
     "parent_permlink": "spam",
     "pending_payout_value": "0.000 HBD",
-    "percent_steem_dollars": 10000,
+    "percent_hbd": 10000,
     "permlink": "june-spam",
     "promoted": "0.000 HBD",
     "reblogged_by": [],
@@ -121,7 +117,7 @@ POST_2 = {
     "parent_author": "",
     "parent_permlink": "spam",
     "pending_payout_value": "0.000 HBD",
-    "percent_steem_dollars": 10000,
+    "percent_hbd": 10000,
     "permlink": "june-spam",
     "promoted": "0.000 HBD",
     "reblogged_by": [],
@@ -151,59 +147,3 @@ def test_mentions():
     assert not m('@longestokaccountx')
     assert m('@abc- @-foo @bar.') == {'abc', 'bar'}
     assert m('_[@foo](https://steemit.com/@foo)_') == {'foo'}
-
-def test_post_basic():
-    ret = post_basic(POST_1)
-    expect = {'json_metadata': {'tags': ['spam'], 'image': ['https://pbs.twimg.com/media/DBgNm3jXoAAioyE.jpg', 'https://example.com/image.jpg'], 'app': 'steemit/0.1', 'format': 'markdown'},
-              'image': 'https://pbs.twimg.com/media/DBgNm3jXoAAioyE.jpg',
-              'tags': ['spam'],
-              'is_nsfw': False,
-              'body': 'https://pbs.twimg.com/media/DBgNm3jXoAAioyE.jpg',
-              'preview': 'https://pbs.twimg.com/media/DBgNm3jXoAAioyE.jpg',
-              'payout_at': '2017-06-27T15:53:51',
-              'is_paidout': True,
-              'is_payout_declined': False,
-              'is_full_power': False}
-    assert ret == expect
-
-def test_post_basic_tags():
-    tags = post_basic(POST_2)['tags']
-    expected = ['steemit', 'steem', 'abc', 'bcd', 'cde']
-    assert tags == expected, "got %s" % tags
-
-def test_post_legacy():
-    ret = post_legacy(POST_1)
-    expect = {'allow_curation_rewards': True,
-              'allow_replies': True,
-              'allow_votes': True,
-              'beneficiaries': [],
-              'curator_payout_value': '0.000 HBD',
-              'id': 4437869,
-              'max_accepted_payout': '1000000.000 HBD',
-              'parent_author': '',
-              'parent_permlink': 'spam',
-              'percent_steem_dollars': 10000,
-              'root_author': 'test-safari',
-              'root_permlink': 'june-spam',
-              'root_title': 'June Spam',
-              'url': '/spam/@test-safari/june-spam'}
-    assert ret == expect
-
-def test_post_payout():
-    ret = post_payout(POST_1)
-    expect = {'payout': Decimal('0.044'),
-              'rshares': 2731865444,
-              'csvotes': 'test-safari,1506388632,10000,49.03\ndarth-cryptic,110837437,200,49.23\ntest25,621340000,10000,25\nmysqlthrashmetal,493299375,10000,41.02',
-              'sc_trend': 6243.994921804685,
-              'sc_hot': 149799.83955930467}
-    assert ret == expect
-
-def test_post_stats():
-    ret = post_stats(POST_1)
-    expect = {'hide': False,
-              'gray': False,
-              'author_rep': 49.03,
-              'flag_weight': 0,
-              'total_votes': 4,
-              'up_votes': 4}
-    assert ret == expect
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000000000000000000000000000000000000..7eec99af91c546384d7c6f99dea6006d7d0caec6
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,51 @@
+[tox]
+envlist = py36, tavern, benchmark, tavern-benchmark
+skipsdist = true
+
+[testenv]
+deps =
+  pytest
+
+[testenv:benchmark]
+deps =
+  {[testenv]deps}
+  pytest-benchmark
+  requests
+  pyYAML
+  prettytable
+commands =
+  python {toxinidir}/scripts/ci/start_api_benchmark.py {posargs}
+
+[testenv:tavern]
+setenv =
+  PYTHONPATH = {toxinidir}/tests/tests_api/hivemind/tavern:{env:PYTHONPATH:}
+
+passenv =
+  HIVEMIND_ADDRESS
+  HIVEMIND_PORT
+
+changedir = {env:TAVERN_DIR}
+
+deps =
+  {[testenv]deps}
+  pytest-xdist
+  tavern
+  deepdiff[murmur]
+  jsondiff
+
+commands = pytest {posargs}
+
+[testenv:tavern-benchmark]
+setenv =
+  {[testenv:tavern]setenv}
+
+passenv =
+  {[testenv:tavern]passenv}
+  TAVERN_DISABLE_COMPARATOR
+
+changedir = tests/tests_api/hivemind/tavern
+
+deps =
+  {[testenv:tavern]deps}
+
+commands = pytest --durations=0 {posargs}