diff --git a/.env.example b/.env.example
index 3c94b6fd3b4a94aea281e7d007a1b843b295bfb3..1920e3c987c756f92e62c5a029811ca19627f29e 100644
--- a/.env.example
+++ b/.env.example
@@ -1,5 +1,5 @@
 # The name of the ZFS storage pool for HAF to use
-ZPOOL="/workspace/hafbe/haf-pool"
+ZPOOL="haf-pool"
 # The name of the dataset on $ZPOOL where HAF will store its data
 # HAF won't read/write anything outside of $ZPOOL/$TOP_LEVEL_DATASET,
 # so you can have, e.g., multiple HAF installations on the same
@@ -20,7 +20,7 @@ TOP_LEVEL_DATASET_MOUNTPOINT="${ZPOOL_MOUNT_POINT}/${TOP_LEVEL_DATASET}"
 # - monitoring: services for Prometheus, Grafana, Loki, Cadvisor , Nodeexporter, Promtail, Postresexporter, Blackboxexporter...
 # COMPOSE_PROFILES="core,admin,hafah,hivemind,servers"
 # COMPOSE_PROFILES="core,admin,hafah,hafbe,hivemind,servers,monitoring"
-COMPOSE_PROFILES="core,admin,hafah,hafbe,servers"
+COMPOSE_PROFILES="core,admin,servers,apps"
 
 # The registry where Hive docker images are pulled from.  Normally, you
 # should set this to the default, `registry.hive.blog` or Docker Hub,
@@ -28,24 +28,24 @@ COMPOSE_PROFILES="core,admin,hafah,hafbe,servers"
 # images, change this to `registry.gitlab.syncad.com/hive` where both CI
 # builds are automatically pushed.
 # HIVE_API_NODE_REGISTRY=registry.hive.blog
-HIVE_API_NODE_REGISTRY=registry.gitlab.syncad.com/hive
 # HIVE_API_NODE_REGISTRY=hiveio
+# HIVE_API_NODE_REGISTRY=registry.gitlab.syncad.com/hive
 
 # To use the same tagged version of all the Hive API node images,
 # set it here.  You can override the tags for individual images
 # below
-HIVE_API_NODE_VERSION=1.27.5
+HIVE_API_NODE_VERSION=1.27.8
+
 
 # Global settings
 
 # override the HAF core image's version and registry image here:
-HAF_REGISTRY=${HIVE_API_NODE_REGISTRY}/haf/base_instance
-# HAF_VERSION=3a7456f2
-# HAF_VERSION=236db88f
+# HAF_IMAGE=${HIVE_API_NODE_REGISTRY}/haf
+# HAF_VERSION=${HIVE_API_NODE_VERSION}
 
-HAF_DATA_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}"
-HAF_LOG_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/logs"
-HAF_WAL_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory/haf_wal"
+# HAF_DATA_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}"
+# HAF_LOG_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/logs"
+# HAF_WAL_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory/haf_wal"
 # If you need to massive sync HAF (i.e. you are not using a ZFS snapshot),
 # then you can sync faster by temporarily using an in-memory shared_memory.bin.
 # To do this, comment out the line below and uncomment the one after, and
@@ -53,9 +53,8 @@ HAF_WAL_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory/haf_wal"
 # After the sync has finished, do `docker compose down` then move the shared_memory.bin
 # file to the shared_memory directory, edit this file to restore original values, and
 # `docker compose up -d` to restart HAF.
-# HAF_SHM_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory"
-# HAF_SHM_DIRECTORY="/mnt/haf_shared_mem"
 HAF_SHM_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory"
+# HAF_SHM_DIRECTORY="/mnt/haf_shared_mem"
 
 # The docker compose project name, gets prefixed onto each container name
 PROJECT_NAME=haf-world
@@ -65,14 +64,14 @@ PROJECT_NAME=haf-world
 NETWORK_NAME=haf
 
 # List of arguments for the HAF service
-#ARGUMENTS="--dump-snapshot=20230821"
-#ARGUMENTS="--skip-hived"
-#ARGUMENTS="--replay-blockchain"
-ARGUMENTS="--replay-blockchain --stop-replay-at-block 5000000"
+# ARGUMENTS=""
+# ARGUMENTS="--replay-blockchain"
+# ARGUMENTS="--dump-snapshot=20230821"
+# ARGUMENTS="--skip-hived"
 #
 # Example how to use monitoring services
 #
-# ARGUMENTS="--replay-blockchain --stop-replay-at-block 5000000 --exit-before-sync --block-stats-report-output=NOTIFY --block-stats-report-type=FULL --notifications-endpoint=hived-pme:9185"
+# ARGUMENTS="--replay-blockchain --stop-at-block 5000000 --block-stats-report-output=NOTIFY --block-stats-report-type=FULL --notifications-endpoint=hived-pme:9185"
 #
 # Mandatory options are:
 # --block-stats-report-output=NOTIFY --block-stats-report-type=FULL --notifications-endpoint=hived-pme:9185
@@ -89,70 +88,72 @@ ARGUMENTS="--replay-blockchain --stop-replay-at-block 5000000"
 # Additional logs are collected from all containers in the stack via Loki and Promtail
 # Default login and password for Grafana is admin/admin - remember to change it after first login
 # Statistics provided by Grafana are available at the host address on port 3000 (http(s)://hostname:3000)
-#
+
 
 # The default setup will run the recommended version of HAfAH,
 # you can run a custom version by un-commenting and modifying the
 # values below
-HAFAH_REGISTRY=${HIVE_API_NODE_REGISTRY}/hafah/setup
-# HAFAH_VERSION=9556cd0f
-# HAFAH_VERSION=a85fc0b4
+# HAFAH_IMAGE=${HIVE_API_NODE_REGISTRY}/hafah
+# HAFAH_VERSION=${HIVE_API_NODE_VERSION}
 
 # The default setup will run the recommended version of Hivemind using the values
 # below.  You can override them here to run a custom version of Hivemind
-HIVEMIND_INSTANCE_IMAGE=${HIVE_API_NODE_REGISTRY}/hivemind/instance
-# HIVEMIND_INSTANCE_VERSION=f09bd298
-
+# HIVEMIND_IMAGE=${HIVE_API_NODE_REGISTRY}/hivemind
+# HIVEMIND_VERSION=${HIVE_API_NODE_VERSION}
+# HIVEMIND_REWRITER_IMAGE=${HIVE_API_NODE_REGISTRY}/hivemind/postgrest-rewriter
 
 # The default setup will run the recommended version of balance tracker,
 # you can run a custom version by un-commenting and modifying the
 # values below
-BALANCE_TRACKER_REGISTRY=${HIVE_API_NODE_REGISTRY}/balance_tracker
-# BALANCE_TRACKER_VERSION=66452803
-# BALANCE_TRACKER_VERSION=c9906c48
+# BALANCE_TRACKER_IMAGE=${HIVE_API_NODE_REGISTRY}/balance_tracker
+# BALANCE_TRACKER_VERSION=${HIVE_API_NODE_VERSION}
 
 
 # REPUTATION_TRACKER_ADDON
-HAF_REPUTATION_TRACKER_REGISTRY=${HIVE_API_NODE_REGISTRY}/reputation_tracker
-#HAF_REPUTATION_TRACKER_VERSION=f9f74604
-
-HAF_VERSION=f2cec24d
-
-HAFAH_VERSION=5a07a805
-BALANCE_TRACKER_VERSION=b51cb031
-HAF_BLOCK_EXPLORER_VERSION=9e5a1bf2
-REPUTATION_TRACKER_VERSION=6dfbe231
-
-HIVEMIND_INSTANCE_VERSION=fa851ba1
-
-# REPTRACKER_SCHEMA="hafbe-rt"
-# BTRACKER_SCHEMA="hafbe-bt"
+# REPUTATION_TRACKER_IMAGE=${HIVE_API_NODE_REGISTRY}/reputation_tracker
+# REPUTATION_TRACKER_VERSION=${HIVE_API_NODE_VERSION}
+
+
+# There are two ways of running Balance Tracker: as a standalone app, or
+# integrated with HAF Block Explorer.  While you can technically run both,
+# there's no good reason to do so--you'll just waste disk space and processing
+# power maintaining two copies of the data.
+# Regardless of which way you decide to run Balance Tracker, you will need
+# to run a single API server, and it needs to know which schema the data is
+# stored in.  It will be in "hafbe_bal" if you're running HAF Block Explorer,
+# and "btracker_app" if you're running Balance Tracker standalone.
+# The default behavior is to serve data from the HAF Block Explorer, but 
+# if you're only running the standalone Balance Tracker, uncomment the next
+# line:
+# BTRACKER_SCHEMA="btracker_app"
+ 
 # The default setup will run the recommended version of HAF block explorer,
 # you can run a custom version by un-commenting and modifying the
 # values below
-HAF_BLOCK_EXPLORER_REGISTRY=${HIVE_API_NODE_REGISTRY}/haf_block_explorer
-# HAF_BLOCK_EXPLORER_VERSION=b268faed
-# HAF_BLOCK_EXPLORER_VERSION=354960a1
+# HAF_BLOCK_EXPLORER_IMAGE=${HIVE_API_NODE_REGISTRY}/haf_block_explorer
+# HAF_BLOCK_EXPLORER_VERSION=${HIVE_API_NODE_VERSION}
 
 
-# The default setup uses "Jussi" as the API reverse proxy & cache for the old JSON-RPC-style
-# calls.  There is an alternate reverse proxy, "Drone", that you can choose to use instead:
+# The default setup uses "Drone" as the API reverse proxy & cache for the old JSON-RPC-style
+# calls.  There is the older alternate reverse proxy, "Jussi", that you can choose to use instead.
+# For more info about drone/jussi, see:
 # https://hive.blog/hive-139531/@deathwing/announcing-drone-or-leveling-up-hive-api-nodes-and-user-experience
-# To replace Jussi with Drone, uncomment the next line:
-# JSONRPC_API_SERVER_NAME=drone
+# To replace Drone with Jussi, uncomment the next line:
+# JSONRPC_API_SERVER_NAME=jussi
 
 # The default setup will run the recommended version of Jussi
 # you can run a custom version by un-commenting and modifying the
 # values below
-# JUSSI_REGISTRY=${HIVE_API_NODE_REGISTRY}/jussi
+# JUSSI_IMAGE=${HIVE_API_NODE_REGISTRY}/jussi
 # JUSSI_VERSION=latest
 # JUSSI_REDIS_MAX_MEMORY=8G
 
 # If you have chosen to run Drone instead of Jussi, it will run the
 # this version by default.  You can run a custom version by un-commenting
 # and modifying the values below
-# DRONE_REGISTRY=${HIVE_API_NODE_REGISTRY}/drone
+# DRONE_IMAGE=${HIVE_API_NODE_REGISTRY}/drone
 # DRONE_VERSION=latest
+# DRONE_LOG_LEVEL=warn,access_log=info
 
 # In the default configuration, synchronous broadcast_transaction calls are not handled by
 # your local stack, but instead are sent to a dedicated hived instance on api.hive.blog.
@@ -175,25 +176,33 @@ HAF_BLOCK_EXPLORER_REGISTRY=${HIVE_API_NODE_REGISTRY}/haf_block_explorer
 # For running a full stack:
 # if you need to run a custom image (for example, to use ACME DNS challenges), specify it here
 # CADDY_IMAGE=${HIVE_API_NODE_REGISTRY}/haf_api_node/caddy
-# CADDY_VERSION=2.7.4-alpine-with-cloudflare
+# CADDY_VERSION=latest
+
+# The hostname you'll be running this server on.  This should be a single hostname, the public
+# hostname your server will be accessible from.  This is used by the Swagger-UI REST API
+# explorer for generating URLs pointing at your server.  If this isn't a public server,
+# this can be a local domain name.
+PUBLIC_HOSTNAME="your.hostname.com"
 
-# The hostname you'll be running this server on.  There are several ways you can configure
-# this.  Some examples:
-# - to serve API using HTTPS, with automatic redirect from HTTP -> HTTPS, just give the
-#   hostname:
-#     PUBLIC_HOSTNAME="your.hostname.com"
+# There are several ways you can configure serving HTTP/HTTPS.  Some examples:
+# - to serve API using HTTPS with automatic redirect from HTTP -> HTTPS (the default), 
+#   just give the hostname:
+#     CADDY_SITES="your.hostname.com"
+#   In the normal case, where you want to serve HTTP/HTTPS from the hostname you set in
+#   PUBLIC_HOSTNAME above, you don't need to set this variable, it will automatically take
+#   the value of PUBLIC_HOSTNAME
 # - to serve using only HTTP (if you have nginx or something else handling SSL termination),
 #   you can use:
-#     PUBLIC_HOSTNAME="http://your.hostname.com"
+#     CADDY_SITES="http://your.hostname.com"
 #   or even:
-#     PUBLIC_HOSTNAME="http://"
+#     CADDY_SITES="http://"
 #   if you want to respond on any hostname
 # - to serve on either HTTP or HTTPS (i.e., respond to HTTP requests in the clear, instead of
 #   issuing a redirect):
-#     PUBLIC_HOSTNAME="http://your.hostname.com, https://your.hostname.com"
+#     CADDY_SITES="http://your.hostname.com, https://your.hostname.com"
 # - to serve on multiple hostnames, separate them with a comma and space:
-#     PUBLIC_HOSTNAME="your.hostname.com, your.other-hostname.net"
-PUBLIC_HOSTNAME="your.hostname.com"
+#     CADDY_SITES="your.hostname.com, your.other-hostname.net"
+# CADDY_SITES="your.hostname.com"
 
 # By default, we're configured to use a self-signed SSL certificate (by including the
 # file below, which tells Caddy to generate a self-signed certificate).  To obtain a real
@@ -204,8 +213,16 @@ PUBLIC_HOSTNAME="your.hostname.com"
 # real certificate for PUBLIC_HOSTNAME from LetsEncrypt.  If this server is
 # behind a firewall or NAT, or PUBLIC_HOSTNAME is misconfigured, it will fail
 # to get a certificate, and that will count against LetsEncrypt's rate limits.
-#TLS_SELF_SIGNED_SNIPPET=caddy/self-signed.snippet
-TLS_SELF_SIGNED_SNIPPET=/dev/null
+TLS_SELF_SIGNED_SNIPPET=caddy/self-signed.snippet
+# TLS_SELF_SIGNED_SNIPPET=/dev/null
+
+# By default, we restrict access to the /admin URLs to localhost.  You can allow 
+# connections by switching the following variable to /dev/null.  First, though,
+# you should protect the admin endpoint by a password or to a local network.
+# Read caddy/snippets/README.md for how
+LOCAL_ADMIN_ONLY_SNIPPET=caddy/local-admin-only.snippet
+# LOCAL_ADMIN_ONLY_SNIPPET=/dev/null
+
 
 # Caddy will only accept requests on the /admin/ endpoints over https by default.
 # This is so that you can password-protect them with HTTP basicauth.
@@ -216,12 +233,12 @@ TLS_SELF_SIGNED_SNIPPET=/dev/null
 
 # Monitoring env variables
 #
-export PROMETHEUS_VERSION=v2.49.1
-export NODE_EXPORTER_VERSION=v1.7.0
-export CADVISOR_VERSION=v0.47.2
-export GRAFANA_VERSION=10.3.3
-export LOKI_VERSION=2.9.4
-export PROMTAIL_VERSION=2.9.4
-export HIVED_PME_VERSION=49a7312d
-export BLACKBOX_VERSION=v0.24.0
-export DATA_SOURCE="postgresql://postgres@haf:5432/postgres?sslmode=disable"
+# PROMETHEUS_VERSION=v2.49.1
+# NODE_EXPORTER_VERSION=v1.7.0
+# CADVISOR_VERSION=v0.47.2
+# GRAFANA_VERSION=10.3.3
+# LOKI_VERSION=2.9.4
+# PROMTAIL_VERSION=2.9.4
+# HIVED_PME_VERSION=49a7312d
+# BLACKBOX_VERSION=v0.24.0
+# DATA_SOURCE="postgresql://postgres@haf:5432/postgres?sslmode=disable"
diff --git a/.gitignore b/.gitignore
index 4c49bd78f1d08f2bc09fa0bd8191ed38b7dce5e3..376f62f8011cf1ed6f9956102e2f4bc3e2b9a3df 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,3 @@
 .env
+repo_versions.txt
+*.sw?
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6df8d2ffc5abcb79142bea714ef827467a6d453c
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,23 @@
+stages:
+  - publish
+
+build_haproxy_healthchecks_docker_image:
+  stage: publish
+  variables:
+    DOCKER_BUILDKIT: 1
+    DOCKER_DRIVER: overlay2
+    DOCKER_TLS_CERTDIR: "/certs"
+  image: docker:27.3.1
+  services:
+    - docker:27.3.1-dind
+  script:
+    - "docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY"
+    - "docker login -u $BLOG_REGISTRY_USER -p $BLOG_REGISTRY_PASSWORD registry-upload.hive.blog"
+    - "(cd healthchecks && docker build -t $CI_REGISTRY_IMAGE/haproxy-healthchecks:$CI_COMMIT_TAG -t registry-upload.hive.blog/haf_api_node/haproxy-healthchecks:$CI_COMMIT_TAG .)"
+    - "docker push $CI_REGISTRY_IMAGE/haproxy-healthchecks:$CI_COMMIT_TAG"
+    - "docker push registry-upload.hive.blog/haf_api_node/haproxy-healthchecks:$CI_COMMIT_TAG"
+  tags:
+    - public-runner-docker
+  rules:
+    - if: $CI_COMMIT_TAG && $CI_COMMIT_TAG =~ /^1\..+$/
+      when: always
diff --git a/README.md b/README.md
index a2c71d6c56c71b91aa5b0d6a29a89af21df9f46c..b8d9b93dd930cb64b7158b9a0dad0221672afea0 100644
--- a/README.md
+++ b/README.md
@@ -271,7 +271,72 @@ The Hivemind data is now gone.
 If you're uninstalling Hivemind permanently, then remember to remove the `hivemind` profile from your `.env` file's `COMPOSE_PROFILES` line so it doesn't start automatically next time you do a `docker compose up -d`.
 
 If you're upgrading to a new version of hivemind:
-- if you're upgrading to a pre-release version, you'll need to set `HIVEMIND_INSTANCE_VERSION` in your `.env` file to the correct tag for the version you want to run.  If you're just upgrading to a new release version (the ones tagged `haf_api_node`), you can leave this alone.
+- if you're upgrading to a pre-release version, you'll need to set `HIVEMIND_VERSION` in your `.env` file to the correct tag for the version you want to run.  If you're just upgrading to a new release version (the ones tagged `haf_api_node`), you can leave this alone.
 - run `docker compose pull` to grab the new version
 - run `docker compose up -d` to bring up all services.  This should run hivemind's install, then launch the block processing container.
 - you can monitor Hivemind's sync process by watching the logs from `docker compose logs -f hivemind-block-processing`.  In a few short days, your Hivemind app should be fully synced and ready to handle API requests.
+
+# Scripts in the haf_api_node Directory
+
+## use_develop_env.py
+This script updates the `.env` file in the `haf_api_node` repository with the short git hashes of other repositories in the specified directory. It scans the given directory for git repositories, retrieves their remote URLs and short git hashes, and updates the `.env` file accordingly.
+
+Usage:
+```
+python3 use_develop_env.py <path_to_directory>
+```
+
+## make_ramdisk.sh
+This script creates a ramdisk and mounts it to the `/mnt/haf_shared_mem` directory. It sets the size of the ramdisk to 26GB and changes the permissions to allow read/write access for all users.
+
+Usage:
+```
+sudo ./make_ramdisk.sh
+```
+
+## clone_zfs_datasets.sh
+This script clones an existing ZFS dataset to create a new dataset. It is useful for creating backups or duplicating datasets for testing purposes. The script takes the source dataset and the target dataset as arguments and performs the cloning operation.
+
+Usage:
+```
+sudo ./clone_zfs_datasets.sh <source_dataset> <target_dataset>
+```
+Example:
+```markdown
+sudo ./clone_zfs_datasets.sh haf-pool/haf-datadir haf-pool/haf-datadir-test-upgrade
+```
+
+## snapshot_zfs_datasets.sh
+This script creates a ZFS snapshot of the HAF datasets. It unmounts the datasets, takes a snapshot, and then remounts them. It also provides options for handling log files during the snapshot process.
+
+Usage:
+```
+sudo ./snapshot_zfs_datasets.sh [--env-file=filename] [--public-snapshot] [--temp-dir=dir] [--swap-logs-with-dataset=dataset] snapshot-name
+```
+Options:
+- `--env-file=filename`: Specify the environment file to use.
+- `--public-snapshot`: Move log files to /tmp before taking the snapshot, then restore them afterwards.
+- `--temp-dir=dir`: Use a different temp directory (use if /tmp isn't big enough).
+- `--swap-logs-with-dataset=dataset`: Swap the logs dataset with an empty dataset before taking the snapshot, then swap back afterwards.
+
+Example:
+```
+sudo ./snapshot_zfs_datasets.sh 20231023T1831Z-haf-only
+```
+
+## rollback_zfs_datasets.sh
+This script rolls back ZFS datasets to a specified snapshot. It unmounts the datasets, rolls them back to the named snapshot, and then remounts them. This process will result in the loss of all data on those datasets since the snapshot.
+
+Usage:
+```
+sudo ./rollback_zfs_datasets.sh [--env-file=filename] [--zpool=zpool_name] [--top-level-dataset=dataset_name] snapshot-name
+```
+Options:
+- `--env-file=filename`: Specify the environment file to use.
+- `--zpool=zpool_name`: Specify the ZFS pool name.
+- `--top-level-dataset=dataset_name`: Specify the top-level dataset name.
+
+Example:
+```
+sudo ./rollback_zfs_datasets.sh --env-file=.env --zpool=haf-pool --top-level-dataset=haf-datadir snapshot_name
+```
\ No newline at end of file
diff --git a/assisted_startup.sh b/assisted_startup.sh
index edf72406633dad2bc4bfa21cde12dfd30327a5d1..2fcbb84150c56eace54687b0682786a9526dccc8 100755
--- a/assisted_startup.sh
+++ b/assisted_startup.sh
@@ -84,7 +84,7 @@ if [ ! -f .env ]; then
     echo "core: the minimal HAF system of a database and hived"
     echo "admin: useful tools for administrating HAF: pgadmin, pghero"
     echo "apps: core HAF apps: hivemind, hafah, hafbe (balance-tracker is a subapp)"
-    echo "servers: services for routing/caching API calls: haproxy, jussi (JSON caching), varnish (REST caching)"
+    echo "servers: services for routing/caching API calls: haproxy, jussi/drone (JSON caching), varnish (REST caching)"
     read -p "Run admin? (Y or N): " choice
     if [[ "$choice" == "Y" || "$choice" == "y" ]]; then
         echo "Adding admin to profiles..."
diff --git a/backend.yaml b/backend.yaml
index 2e7e706ef2c121547e1d18dd462e66ddbfb3d50b..0331513504491d65e4929c7e1b749c152d44fe60 100644
--- a/backend.yaml
+++ b/backend.yaml
@@ -1,11 +1,17 @@
 services:
   pghero:
-    image: ankane/pghero:v3.3.3
+    image: ankane/pghero:v3.6.1
     profiles:
       - admin
     environment:
       DATABASE_URL: postgres://pghero@haf:5432/haf_block_log
       RAILS_RELATIVE_URL_ROOT: /admin/pghero
+    healthcheck:
+      test: ["CMD-SHELL", "nc -z 127.0.0.1 8080 || exit 1"]
+      interval: 30s
+      timeout: 5s
+      retries: 3
+      start_period: 30s
     networks:
       - haf-network
     init: true
@@ -14,12 +20,18 @@ services:
         condition: service_healthy
 
   pgadmin:
-    image: dpage/pgadmin4:${PGADMIN_VERSION:-8.10}
+    image: dpage/pgadmin4:${PGADMIN_VERSION:-8.14}
     profiles:
       - admin
     environment:
       PGADMIN_DEFAULT_EMAIL: "admin@haf.world"
       PGADMIN_DEFAULT_PASSWORD: "admin"
+    healthcheck:
+      test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1/misc/ping || exit 1"]
+      interval: 10s
+      timeout: 3s
+      retries: 10
+      start_period: 1m
     networks:
       - haf-network
     init: true
diff --git a/balance_tracker.yaml b/balance_tracker.yaml
index d1095b49d58c874c129e17ef3d4e3e18fa3f3bf3..e50f12b22b0b2e8c07d3772434df707b8258ca46 100644
--- a/balance_tracker.yaml
+++ b/balance_tracker.yaml
@@ -1,46 +1,52 @@
+# Note, most of the services in this file are only used in the uncommon case where
+# the node is running Balance Tracker standalone.  Balance Tracker is typically
+# run as part of HAF Block Explorer, and the services related to that usage are
+# in haf_block_explorer.yaml.
+# The exception is the postgrest server & rewriter.  Those are shared between the two
 services:
   balance-tracker-install:
-    image: ${BALANCE_TRACKER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/balance_tracker}:${BALANCE_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${BALANCE_TRACKER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/balance_tracker}:${BALANCE_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
-      - apps
-      - hafbe
-      - balance-tracker-setup
+      - balance-tracker-standalone
     environment:
       POSTGRES_HOST: haf
     networks:
       haf-network:
     command:
       - install_app
-      - --schema=${BTRACKER_SCHEMA:-hafbe_bal}
+      - --schema=btracker_app
       - --swagger-url=${PUBLIC_HOSTNAME}
     depends_on:
       haf:
         condition: service_healthy
+  # to uninstall Balance Tracker (standalone), shut down the stack and run a command like:
+  #   docker compose --profile core --profile balance-tracker-standalone-uninstall up -d
   balance-tracker-uninstall:
-    image: ${BALANCE_TRACKER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/balance_tracker}:${BALANCE_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${BALANCE_TRACKER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/balance_tracker}:${BALANCE_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
-      - balance-tracker-uninstall
+      - balance-tracker-standalone-uninstall
     environment:
       POSTGRES_HOST: haf
     networks:
       haf-network:
     command:
       - uninstall_app
-      - --schema=${BTRACKER_SCHEMA:-hafbe_bal}
+      - --schema=btracker_app
     depends_on:
       haf:
         condition: service_healthy
   balance-tracker-block-processing:
-    image: ${BALANCE_TRACKER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/balance_tracker}:${BALANCE_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${BALANCE_TRACKER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/balance_tracker}:${BALANCE_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - balance-tracker-standalone
     environment:
       POSTGRES_HOST: haf
+      POSTGRES_USER: btracker_owner
     networks:
       haf-network:
     command:
       - process_blocks
-      - --schema=${BTRACKER_SCHEMA:-hafbe_bal}
+      - --schema=btracker_app
     healthcheck:
       test: ["CMD-SHELL","/app/block-processing-healthcheck.sh"]
       interval: 60s
@@ -53,14 +59,14 @@ services:
       haf:
         condition: service_healthy
   balance-tracker-postgrest:
-    image: ${POSTGREST_REGISTRY:-registry.gitlab.syncad.com/hive/haf_api_node/postgrest}:${POSTGREST_VERSION:-latest}
+    image: ${POSTGREST_IMAGE:-registry.gitlab.syncad.com/hive/haf_api_node/postgrest}:${POSTGREST_VERSION:-latest}
     profiles:
       - apps
       - balance-tracker-standalone
       - hafbe
     environment:
       PGRST_ADMIN_SERVER_PORT: 3001
-      PGRST_DB_URI: postgresql://btracker_user@haf/haf_block_log
+      PGRST_DB_URI: postgresql://btracker_user@haf/haf_block_log?application_name=balance_tracker_postgrest
       PGRST_DB_SCHEMA: btracker_endpoints
       PGRST_DB_ANON_ROLE: btracker_user
       PGRST_DB_POOL: 20
@@ -79,23 +85,24 @@ services:
     depends_on:
       balance-tracker-install:
         condition: service_completed_successfully
+        required: false # allow this service to be missing (when only running hafbe)
+      block-explorer-install-balance-tracker:
+        condition: service_completed_successfully
+        required: false # allow this service to be missing (when only running standalone)
       haf:
         condition: service_healthy
   balance-tracker-postgrest-rewriter:
-    image: nginx
+    image: ${BALANCE_TRACKER_REWRITER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/balance_tracker/postgrest-rewriter}:${BALANCE_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - apps
       - balance-tracker-standalone
       - hafbe
-    volumes:
-      - type: bind
-        source: balance_tracker/balance_tracker_nginx.conf
-        target: /etc/nginx/nginx.conf
-        read_only: true
-      - type: bind
-        source: balance_tracker/rewrite_rules.conf
-        target: /etc/nginx/rewrite_rules.conf
-        read_only: true
+    healthcheck:
+      test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:81/health || exit 1"]
+      interval: 10s
+      timeout: 3s
+      retries: 10
+      start_period: 1m
     depends_on:
       balance-tracker-postgrest:
         condition: service_healthy
diff --git a/balance_tracker/balance_tracker_nginx.conf b/balance_tracker/balance_tracker_nginx.conf
deleted file mode 100644
index e4e44eaa6089d631a3137e4cb633c81a9a99a932..0000000000000000000000000000000000000000
--- a/balance_tracker/balance_tracker_nginx.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Homepage and endpoints of the API "HAF Block Explorer".
-#
-worker_processes  5;
-error_log /dev/stdout info;
-worker_rlimit_nofile 8192;
-
-events {
-  worker_connections 4096;
-}
-http {
-    access_log /dev/stdout;
-    server {
-            listen 0.0.0.0:80 default_server;
-            server_name _;
-
-            location / {
-                    include rewrite_rules.conf;
-                    rewrite_log on;
-
-                    proxy_pass  http://balance-tracker-postgrest:3000;  # my PostREST is  here!
-
-                    proxy_set_header Host $host;
-                    proxy_set_header X-Real-IP $remote_addr;
-                    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-                    #default_type;
-                    proxy_hide_header Content-Location;
-                    proxy_set_header  Connection "";
-                    proxy_http_version 1.1;
-            }
-    }
-}
diff --git a/balance_tracker/rewrite_rules.conf b/balance_tracker/rewrite_rules.conf
deleted file mode 100644
index 9aebbd4cc8e4539abdec5b7d8c23a83cca6bd0b0..0000000000000000000000000000000000000000
--- a/balance_tracker/rewrite_rules.conf
+++ /dev/null
@@ -1,29 +0,0 @@
-rewrite ^/account-balances/([^/]+)/rewards/info /rpc/get_account_info_rewards?account-name=$1 break;
-# endpoint for get /account-balances/{account-name}/rewards/info
-
-rewrite ^/account-balances/([^/]+)/rewards /rpc/get_account_rewards?account-name=$1 break;
-# endpoint for get /account-balances/{account-name}/rewards
-
-rewrite ^/account-balances/([^/]+)/withdrawals /rpc/get_account_withdraws?account-name=$1 break;
-# endpoint for get /account-balances/{account-name}/withdrawals
-
-rewrite ^/account-balances/([^/]+)/savings /rpc/get_account_savings?account-name=$1 break;
-# endpoint for get /account-balances/{account-name}/savings
-
-rewrite ^/account-balances/([^/]+)/delegations /rpc/get_account_delegations?account-name=$1 break;
-# endpoint for get /account-balances/{account-name}/delegations
-
-rewrite ^/account-balances/([^/]+) /rpc/get_account_balances?account-name=$1 break;
-# endpoint for get /account-balances/{account-name}
-
-rewrite ^/balance-for-coins/([^/]+)/by-time /rpc/get_balance_for_coin_by_time?account-name=$1 break;
-# endpoint for get /balance-for-coins/{account-name}/by-time
-
-rewrite ^/balance-for-coins/([^/]+) /rpc/get_balance_for_coin_by_block?account-name=$1 break;
-# endpoint for get /balance-for-coins/{account-name}
-
-rewrite ^/$ / break;
-# endpoint for openapi spec itself
-
-rewrite ^/(.*)$ /rpc/$1 break;
-# default endpoint for everything else
diff --git a/caddy.yaml b/caddy.yaml
index a56f2b771d08e78bf7889cc58c8d47969d3d21f8..7462fc0f175ed2cc4eb6bf99085c3a7a2e026182 100644
--- a/caddy.yaml
+++ b/caddy.yaml
@@ -10,7 +10,8 @@ services:
       - 443:443/udp
     environment:
       PUBLIC_HOSTNAME: ${PUBLIC_HOSTNAME}
-      JSONRPC_API_SERVER_NAME: ${JSONRPC_API_SERVER_NAME:-jussi}
+      CADDY_SITES: ${CADDY_SITES:-${PUBLIC_HOSTNAME}}
+      JSONRPC_API_SERVER_NAME: ${JSONRPC_API_SERVER_NAME:-drone}
       ADMIN_ENDPOINT_PROTOCOL: ${ADMIN_ENDPOINT_PROTOCOL:-https}
     volumes:
       - type: bind
@@ -26,17 +27,21 @@ services:
         target: /etc/caddy/admin_html
         read_only: true
       - type: bind
-        source: ${HAF_DATA_DIRECTORY}/logs/pgbadger
+        source: ${HAF_DATA_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}}/logs/pgbadger
         target: /etc/caddy/pgbadger
         read_only: true
       - type: bind
-        source: ${HAF_DATA_DIRECTORY}/logs/caddy
+        source: ${HAF_DATA_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}}/logs/caddy
         target: /var/log/caddy
         read_only: false
       - type: bind
         source: ${TLS_SELF_SIGNED_SNIPPET}
         target: /etc/caddy/tls-self-signed-snippets/self-signed.snippet
         read_only: true
+      - type: bind
+        source: ${LOCAL_ADMIN_ONLY_SNIPPET}
+        target: /etc/caddy/local-admin-only-snippets/local-admin-only.snippet
+        read_only: true
       - type: volume
         source: caddy_data
         target: /data
diff --git a/caddy/Caddyfile b/caddy/Caddyfile
index 10f888962709097dd6dc73809cf6d12af7c862d8..fa91f4b056d1ce2067017c3d522f5e7959a08ee5 100644
--- a/caddy/Caddyfile
+++ b/caddy/Caddyfile
@@ -3,12 +3,16 @@
 }
 
 # Simple caddy config, handles SSL and forwards everything to varnish
-{$PUBLIC_HOSTNAME} {
+{$CADDY_SITES} {
   # Import a snippet that will generate a self-signed certificate by default.
   # To generate a real certificate, bind-mount an empty file here and then
   # put your real TLS config in a file in the snippets directory
   import tls-self-signed-snippets/*.snippet
 
+  # Import a snippet that will restrict connections to the admin endpoint
+  # to the localhost
+  import local-admin-only-snippets/*.snippet
+
   import snippets/*.snippet
 
   # Block API abusers outright
diff --git a/caddy/snippets/local_admin_only.snippet b/caddy/local-admin-only.snippet
similarity index 100%
rename from caddy/snippets/local_admin_only.snippet
rename to caddy/local-admin-only.snippet
diff --git a/clear_pool.sh b/clear_pool.sh
index 90182c9a34e34659b1c55bf5b99e4d7f172cee8d..1f75823c7a358924709f9a61e497e2f17fbcae98 100755
--- a/clear_pool.sh
+++ b/clear_pool.sh
@@ -1,5 +1,5 @@
 #brain dead script that needs improvement, but useful for me
 . ./.env
-sudo rm -rf ${HAF_LOG_DIRECTORY}/postgresql/*
-sudo rm -rf ${HAF_DATA_DIRECTORY}/haf_db_store/*
-rm -rf ${HAF_SHM_DIRECTORY}/shared_memory.bin ${HAF_SHM_DIRECTORY}/haf_wal/*
+sudo rm -rf ${HAF_LOG_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}/logs}/postgresql/*
+sudo rm -rf ${HAF_DATA_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}}/haf_db_store/*
+rm -rf ${HAF_SHM_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory}/shared_memory.bin ${HAF_SHM_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory}/haf_wal/*
diff --git a/compose.yml b/compose.yml
index dcc8b4f10549afc212846f3c43ab276189a931ad..d8bd0de6fb38cf28ad7fe9b1797dc337e1f4bc32 100644
--- a/compose.yml
+++ b/compose.yml
@@ -5,12 +5,11 @@ include:
   - backend.yaml
   - hafah.yaml
   - hivemind.yaml
-  - hivemind_reptracker.yaml
   - balance_tracker.yaml
   - reputation_tracker.yaml
   - haf_block_explorer.yaml
   - varnish.yaml
-  - ${JSONRPC_API_SERVER_NAME:-jussi}.yaml
+  - ${JSONRPC_API_SERVER_NAME:-drone}.yaml
   - haproxy.yaml
   - caddy.yaml
   - monitoring.yaml
diff --git a/compression.conf b/compression.conf
new file mode 100644
index 0000000000000000000000000000000000000000..1f833802f2e60c51b09b1cb2d1e1228dd10fdeca
--- /dev/null
+++ b/compression.conf
@@ -0,0 +1 @@
+default_toast_compression = 'lz4'
diff --git a/create_zfs_datasets.sh b/create_zfs_datasets.sh
index 7441b05c473bb9c6953eef9b979bb5d68c1bb42f..c06cf8110d0fae6b32f02aab5e6515ebac658879 100755
--- a/create_zfs_datasets.sh
+++ b/create_zfs_datasets.sh
@@ -6,7 +6,7 @@ print_help() {
   echo "Usage: $0 --env-file=filename"
 }
 
-OPTIONS=$(getopt -o he: --long env-file:,help,zpool:,top-level-dataset: -n "$0" -- "$@")
+OPTIONS=$(getopt -o he:s --long env-file:,help,zpool:,top-level-dataset:,skip-empty-snapshot -n "$0" -- "$@")
 
 if [ $? -ne 0 ]; then
     print_help
@@ -17,6 +17,7 @@ ZPOOL=""
 TOP_LEVEL_DATASET=""
 ZPOOL_MOUNT_POINT=""
 TOP_LEVEL_DATASET_MOUNTPOINT=""
+SKIP_EMPTY_SNAPSHOT=false
 
 eval set -- "$OPTIONS"
 
@@ -34,6 +35,10 @@ while true; do
       TOP_LEVEL_DATASET="$2"
       shift 2
       ;;
+    --skip-empty-snapshot|-s)
+      SKIP_EMPTY_SNAPSHOT=true
+      shift
+      ;;
     --help|-h)
       print_help
       exit 0
@@ -122,6 +127,7 @@ chown -R 105:109 "$TOP_LEVEL_DATASET_MOUNTPOINT/haf_db_store"
 mkdir -p "$TOP_LEVEL_DATASET_MOUNTPOINT/haf_postgresql_conf.d"
 cp pgtune.conf "$TOP_LEVEL_DATASET_MOUNTPOINT/haf_postgresql_conf.d"
 cp zfs.conf "$TOP_LEVEL_DATASET_MOUNTPOINT/haf_postgresql_conf.d"
+cp compression.conf "$TOP_LEVEL_DATASET_MOUNTPOINT/haf_postgresql_conf.d"
 cp logging.conf "$TOP_LEVEL_DATASET_MOUNTPOINT/haf_postgresql_conf.d"
 # 105:109 is postgres:postgres inside the container
 chown -R 105:109 "$TOP_LEVEL_DATASET_MOUNTPOINT/haf_postgresql_conf.d"
@@ -133,3 +139,10 @@ mkdir -p "$TOP_LEVEL_DATASET_MOUNTPOINT/logs/caddy"
 chown -R 1000:100 "$TOP_LEVEL_DATASET_MOUNTPOINT/logs"
 # 105:109 is postgres:postgres inside the container
 chown -R 105:109 "$TOP_LEVEL_DATASET_MOUNTPOINT/logs/postgresql" "$TOP_LEVEL_DATASET_MOUNTPOINT/logs/pgbadger"
+
+if [ "$SKIP_EMPTY_SNAPSHOT" = false ]; then
+  # Create a snapshot called 'empty'
+  ./snapshot_zfs_datasets.sh empty
+else
+  echo "Skipping creation of 'empty' snapshot."
+fi
diff --git a/drone.yaml b/drone.yaml
index 50092229ad22de4329265fa489085616fcfa9d00..734799f193f3296409edc1697395e4101c8f7e64 100644
--- a/drone.yaml
+++ b/drone.yaml
@@ -1,10 +1,10 @@
 services:
   drone:
-    image: ${DRONE_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/drone}:${DRONE_VERSION:-latest}
+    image: ${DRONE_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/drone}:${DRONE_VERSION:-latest}
     profiles:
       - servers
     environment:
-      RUST_LOG: access_log=info
+      RUST_LOG: ${DRONE_LOG_LEVEL:-warn,access_log=info}
     volumes:
       - type: bind
         source: ./drone/config.yaml
@@ -17,7 +17,7 @@ services:
       - haf-network
   # to use our log analysis tools, we need to capture drone's traffic using a 
   # specific nginx logging config.  To do this, rename the actual drone
-  # service above to 'jussi-real', then uncomment the next section. 
+  # service above to 'drone-real', then uncomment the next section. 
   # drone:
   #   image: nginx
   #   profiles:
@@ -28,12 +28,22 @@ services:
   #       target: /etc/nginx/nginx.conf
   #       read_only: true
   #     - type: bind
-  #       source: ${HAF_LOG_DIRECTORY}/drone
+  #       source: ${HAF_LOG_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}/logs}/drone
   #       target: /var/log/drone
   #   depends_on:
   #     drone-real:
   #       condition: service_started
+  #   networks:
+  #     - haf-network
+  # drone:
+  #   image: mitmproxy/mitmproxy
+  #   profiles:
+  #     - servers
   #   ports:
-  #     - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:9001:9000"
+  #     - 8081:8081
+  #   command: mitmweb --listen-port 9000 --web-host 0.0.0.0 --mode reverse:http://drone-real:9000
+  #   depends_on:
+  #     drone-real:
+  #       condition: service_started
   #   networks:
   #     - haf-network
diff --git a/drone/config.yaml b/drone/config.yaml
index f93b676263c352f872be432adf40e6afed19d144..403664fbc6c8da453ab272d841633bcd3ad4504a 100644
--- a/drone/config.yaml
+++ b/drone/config.yaml
@@ -120,17 +120,20 @@ ttls:
   appbase.condenser_api.get_account_history: 6
   appbase.condenser_api.get_content: 6
   appbase.condenser_api.get_profile: 6
+  appbase.condenser_api.get_follow_count: 6
   appbase.database_api.find_accounts: 3
   appbase.condenser_api.get_dynamic_global_properties: 1
   hive: NO_CACHE
   bridge: NO_CACHE
   bridge.get_discussion: 6
   bridge.get_account_posts: 12
-  bridge.get_ranked_posts: 6
   bridge.get_profile: 6
-  bridge.get_community: 6
   bridge.get_post: 6
-  bridge.get_trending_topics: 3
+  bridge.unread_notifications: 3
+  bridge.get_ranked_posts: 6
+  bridge.get_community: 12
+  bridge.get_trending_topics: 30
+  bridge.does_user_follow_any_lists: 3
   hafsql: NO_CACHE
 
 # how long to wait for the backend to respond before giving up
diff --git a/drone/nginx.conf b/drone/nginx.conf
index 51d95b0d59b5177a1f76d5900ac52fd92b3040e3..d37b4fe0f144b0ea8065865d3dc3707f36d71fe3 100644
--- a/drone/nginx.conf
+++ b/drone/nginx.conf
@@ -13,7 +13,7 @@ http {
                                   '"$http_user_agent" || "$http_x_forwarded_for" || '
                                   '"$upstream_http_x_jussi_cache_hit" || "$upstream_http_x_jussi_namespace" || '
                                   '"$upstream_http_x_jussi_api" || "$upstream_http_x_jussi_method" || "$upstream_http_x_jussi_params" || '
-                                  '$upstream_connect_time || $upstream_response_time || "$request_body"';
+                                  '$upstream_connect_time || $upstream_response_time || "$request_body" || "$request_id"';
   # we intend to change the log to a structured format (jsonl) to make parsing easier, something like below.
   # that work hasn't been done yet
   log_format json_log escape=json '{'
@@ -49,13 +49,16 @@ http {
     server_name _;
 
     location / {
-      access_log /var/log/drone/access_log api_log buffer=32k flush=5s;
+      #access_log /var/log/drone/access_log api_log buffer=32k flush=5s;
       # switch to this to log in jsonl format instead
       access_log /var/log/drone/access_log.json json_log buffer=32k flush=5s;
       proxy_pass http://drone;
       # Allow fast streaming HTTP/1.1 pipes (keep-alive, unbuffered)
       proxy_http_version 1.1;
-      proxy_request_buffering off;
+
+      client_body_buffer_size 128k; # Adjust size as needed
+      proxy_request_buffering on;
+      client_body_in_single_buffer on;
       proxy_buffering off;
       #proxy_set_header forwarded 'by=\"_$hostname\";$for_addr;proto=$scheme;host=\"$http_host\"';
     }
diff --git a/email-alerts/README.md b/email-alerts/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0385550c0b889d7d9a5c2e303331fc6a75164d1e
--- /dev/null
+++ b/email-alerts/README.md
@@ -0,0 +1,37 @@
+This "email-alerts" configuration allows you to have haproxy send you email
+messages to notify you whenever a service goes down.
+
+These alerts are very basic, consisting of a single line that looks like:
+```
+[HAProxy Alert] Server balance-tracker/balance-tracker is DOWN. 0 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue
+```
+
+It's not much, but it's enough to tell you that there's something that needs 
+your attention.  If you have a more sophisticated monitoring system like
+Zabbix or Nagios, you may want to look into using that instead.
+
+To use this config, add a line to your .env file telling docker to merge this 
+file in:
+
+```
+COMPOSE_FILE=compose.yml:email-alerts/compose.email-alerts.yml
+```
+
+In addition, you'll need to set several other settings in your .env file:
+
+First, set the login information for your SMTP server.
+```
+SMTP_HOST="smtp.gmail.com:587"
+SMTP_USER="me@gmail.com"
+SMTP_PASS="myapppassword"
+# Auth defaults to "plain", you can uncomment to use "login" instead
+# SMTP_AUTH_TYPE="login"
+```
+
+You also need to tell it where to send the emails.  If you need to, you
+can customize the "from" address and alert threshold.
+```
+HAPROXY_EMAIL_TO="me@gmail.com"
+# HAPROXY_EMAIL_FROM="noreply@${PUBLIC_HOSTNAME}"
+# HAPROXY_EMAIL_LEVEL="notice"
+```
diff --git a/email-alerts/compose.email-alerts.yml b/email-alerts/compose.email-alerts.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5a0cb180292bca4f7445fc7d62c923597995a555
--- /dev/null
+++ b/email-alerts/compose.email-alerts.yml
@@ -0,0 +1,2 @@
+include:
+  - email-alerts/haproxy.email-alerts.yaml
diff --git a/email-alerts/haproxy.email-alerts.yaml b/email-alerts/haproxy.email-alerts.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a78cc16919f6b75190d4eb0bd1f36a8d16ac1d31
--- /dev/null
+++ b/email-alerts/haproxy.email-alerts.yaml
@@ -0,0 +1,33 @@
+services:
+  haproxy:
+    environment:
+      HAPROXY_EMAIL_FROM: "${HAPROXY_EMAIL_FROM:-noreply@${PUBLIC_HOSTNAME}}"
+      HAPROXY_EMAIL_TO: "${HAPROXY_EMAIL_TO}"
+      HAPROXY_EMAIL_LEVEL: "${HAPROXY_EMAIL_LEVEL:-notice}"
+    volumes:
+      # override the file bind-mounted by the haproxy.yaml in the upper-level directory
+      - type: bind
+        source: ../haproxy/mailer.cfg
+        target: /usr/local/etc/haproxy/_mailer.cfg
+        read_only: true
+    # if you've enabled email notifications when services go down, haproxy must wait for
+    # the smtp-relay service to be up before launching, otherwise it will fail
+    depends_on:
+      smtp-relay:
+        condition: service_started
+  smtp-relay:
+    profiles:
+      - servers
+    image: grafana/smtprelay
+    # When using this service, SMTP_HOST/USER/PASS are required, they're defaulted to empty strings
+    # to keep docker compose from warning when not using this service
+    command:
+      - -hostname=${PUBLIC_HOSTNAME}
+      - -listen=0.0.0.0:25
+      - -allowed_nets= 
+      - -remote_host=${SMTP_HOST}
+      - -remote_user=${SMTP_USER}
+      - -remote_pass=${SMTP_PASS}
+      - -remote_auth=${SMTP_AUTH_TYPE:-plain}
+    networks:
+      - haf-network
diff --git a/exposed/README.md b/exposed/README.md
index 54ad02f0779a7167e6bba0040ba1b5c7aafdcb41..fb696355a8ee66cf0650bd383be364a56ae2b4bc 100644
--- a/exposed/README.md
+++ b/exposed/README.md
@@ -1,10 +1,10 @@
 This "exposed" configuration includes directives that allow you to directly access 
 the services from other machines -- without this config, the only exposed ports
 go to the main webserver, and the only way to access, e.g., hafah's postgrest
-server is by sending it through caddy -> jussi -> haproxy -> hafah-postgrest
+server is by sending it through caddy -> jussi/drone -> haproxy -> hafah-postgrest
 
 Exposing ports like this is useful if you want to split the stack across multiple 
-machines (e.g., caddy + jussi + redis + varnish on one machine, everything else
+machines (e.g., caddy + drone + varnish on one machine, everything else
 on the others).  Or, if you have two redundant servers, and you want to be able
 to add backup entries in haproxy that send traffic to the other server.
 
diff --git a/exposed/compose.exposed.yml b/exposed/compose.exposed.yml
index cc68f8d270b7f8d70306bf9fc8e1171e9d1599ed..49a2ff1d320cb52fb0536c1f6da6fb23afebdc0f 100644
--- a/exposed/compose.exposed.yml
+++ b/exposed/compose.exposed.yml
@@ -2,7 +2,7 @@ include:
   - exposed/haproxy.exposed.yaml
   - exposed/hafah.exposed.yaml
   - exposed/haf_block_explorer.exposed.yaml
-  - exposed/${JSONRPC_API_SERVER_NAME:-jussi}.exposed.yaml
+  - exposed/${JSONRPC_API_SERVER_NAME:-drone}.exposed.yaml
   - exposed/hivemind.exposed.yaml
   - exposed/haf_base.exposed.yaml
   - exposed/balance_tracker.exposed.yaml
diff --git a/exposed/haf_block_explorer.exposed.yaml b/exposed/haf_block_explorer.exposed.yaml
index 6f3eee2cdb0c93e1000e88cb151f3c0fb54e175f..71f4af67652d602366ea09d7c30a542b99da3e31 100644
--- a/exposed/haf_block_explorer.exposed.yaml
+++ b/exposed/haf_block_explorer.exposed.yaml
@@ -1,4 +1,4 @@
 services:
-  block-explorer-postgrest:
+  block-explorer-postgrest-rewriter:
     ports:
-      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_PORT_PREFIX:-1}7005:3000"
+      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_PORT_PREFIX:-1}7005:80"
diff --git a/exposed/hafah.exposed.yaml b/exposed/hafah.exposed.yaml
index cc2c0ccb1eb59b181e3e89210de81ee8697fb738..cea7a3fb0adf480b2e5bdcbebb165d57d58eeeb8 100644
--- a/exposed/hafah.exposed.yaml
+++ b/exposed/hafah.exposed.yaml
@@ -1,4 +1,4 @@
 services:
-  hafah-postgrest:
+  hafah-postgrest-rewriter:
     ports:
-      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_PORT_PREFIX:-1}7003:3000"
+      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_PORT_PREFIX:-1}7003:80"
diff --git a/exposed/haproxy.exposed.yaml b/exposed/haproxy.exposed.yaml
index c95ba961d1b1064be45e3fb6f3de31d686be5081..3846ca55441f09650f1b092e12d127dedb97d378 100644
--- a/exposed/haproxy.exposed.yaml
+++ b/exposed/haproxy.exposed.yaml
@@ -3,4 +3,4 @@ services:
     # expose the healthchecks so we can connect to them from an external haproxy
     # shift the ports by 20000 so they don't conflict with haproxy's (if we want to expose those also)
     ports:
-      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_HEALTHCHECK_PORT_PREFIX:-2}7001-${HAF_API_NODE_EXPOSED_HEALTHCHECK_PORT_PREFIX:-2}7005:7001-7005"
+      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_HEALTHCHECK_PORT_PREFIX:-2}7001-${HAF_API_NODE_EXPOSED_HEALTHCHECK_PORT_PREFIX:-2}7009:7001-7009"
diff --git a/exposed/hivemind.exposed.yaml b/exposed/hivemind.exposed.yaml
index 0870d9fc6ccc31c6605f5e0da3026a0d330d76b4..51657c3d740023e4853263c46f44d5c9b0cc1cce 100644
--- a/exposed/hivemind.exposed.yaml
+++ b/exposed/hivemind.exposed.yaml
@@ -1,4 +1,12 @@
 services:
-  hivemind-server:
+  # When we want to benchmark the old hivemind server against the new, you can use this
+  # to expose it on a different port from the postgrest version.  For everyone else,
+  # it's not needed at all.  This should be removed once we remove the config for the 
+  # python version of the server.
+  #
+  # hivemind-server:
+  #   ports:
+  #     - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_PORT_PREFIX:-3}7002:8080"
+  hivemind-postgrest-rewriter:
     ports:
-      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_PORT_PREFIX:-1}7002:8080"
+      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_PORT_PREFIX:-1}7002:80"
diff --git a/exposed/hivemind_rtracker.exposed.yaml b/exposed/hivemind_rtracker.exposed.yaml
deleted file mode 100644
index b53e64ba3d0fccfb72a154f51f3be10ca98328b7..0000000000000000000000000000000000000000
--- a/exposed/hivemind_rtracker.exposed.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-services:
-  hivemind-rtracker-postgrest:
-    ports:
-      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_PORT_PREFIX:-1}7010:3000"
\ No newline at end of file
diff --git a/exposed/reputation_tracker.exposed.yaml b/exposed/reputation_tracker.exposed.yaml
index ed4c0c45f8f8b69b6a88240f87ba2114687baff4..63038678777168f8843197110c338939c139cc20 100644
--- a/exposed/reputation_tracker.exposed.yaml
+++ b/exposed/reputation_tracker.exposed.yaml
@@ -1,4 +1,4 @@
 services:
-  reputation-tracker-postgrest:
+  reputation-tracker-postgrest-rewriter:
     ports:
-      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_PORT_PREFIX:-1}7009:3000"
\ No newline at end of file
+      - "${HAF_API_NODE_EXPOSED_IPADDR:-0.0.0.0}:${HAF_API_NODE_EXPOSED_PORT_PREFIX:-1}7009:80"
diff --git a/haf_base.yaml b/haf_base.yaml
index 5c9ef7d42f4b07643cecfee4658e2e990d4b6dea..2f9d54f88c8186c855aa50a62489e4b73c9f5d81 100644
--- a/haf_base.yaml
+++ b/haf_base.yaml
@@ -3,7 +3,7 @@ services:
     profiles:
       - core
       - block-explorer-uninstall
-    image: ${HAF_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf}:${HAF_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${HAF_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf}:${HAF_VERSION:-${HIVE_API_NODE_VERSION}}
     networks:
       - haf-network
     tty: true
@@ -11,7 +11,7 @@ services:
     init: true
     entrypoint:
       - /home/haf_admin/docker_entrypoint.sh
-      - ${ARGUMENTS}
+      - ${ARGUMENTS:-}
     # after requesting a shutdown with SIGTERM, allow the container two minutes to exit
     # before killing it.  The default of ten seconds isn't enough for postgresql to
     # cleanly shut down, and would often make PostgreSQL perform crash recovery at the
@@ -27,7 +27,8 @@ services:
         host    haf_block_log    btracker_owner   ${COMPOSE_PROJECT_NAME}-balance-tracker-block-processing-1.${NETWORK_NAME}    trust\n
         host    haf_block_log    btracker_user    ${COMPOSE_PROJECT_NAME}-balance-tracker-postgrest-1.${NETWORK_NAME}           trust\n
 
-        host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-reputation-tracker-install-1.${NETWORK_NAME}          trust\n
+        host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-reputation-tracker-install-1.${NETWORK_NAME}   trust\n
+
         host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-reputation-tracker-uninstall-1.${NETWORK_NAME}        trust\n
         host    haf_block_log    reptracker_owner ${COMPOSE_PROJECT_NAME}-reputation-tracker-block-processing-1.${NETWORK_NAME} trust\n
         host    haf_block_log    reptracker_user  ${COMPOSE_PROJECT_NAME}-reputation-tracker-postgrest-1.${NETWORK_NAME}        trust\n
@@ -37,11 +38,12 @@ services:
         host    haf_block_log    hafah_user       ${COMPOSE_PROJECT_NAME}-hafah-postgrest-1.${NETWORK_NAME}                     trust\n
         host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-hafah-benchmarks-1.${NETWORK_NAME}                    trust\n
 
-        host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-block-explorer-install-1.${NETWORK_NAME}              trust\n
-        host    haf_block_log    hafbe_owner      ${COMPOSE_PROJECT_NAME}-block-explorer-install-1.${NETWORK_NAME}              trust\n
+        host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-block-explorer-install-schema-1.${NETWORK_NAME}       trust\n
+        host    haf_block_log    hafbe_owner      ${COMPOSE_PROJECT_NAME}-block-explorer-install-schema-1.${NETWORK_NAME}       trust\n
         host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-block-explorer-uninstall-1.${NETWORK_NAME}            trust\n
         host    haf_block_log    hafbe_owner      ${COMPOSE_PROJECT_NAME}-block-explorer-uninstall-1.${NETWORK_NAME}            trust\n
         host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-block-explorer-uninstall-balance-tracker-1.${NETWORK_NAME}           trust\n
+        host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-block-explorer-install-balance-tracker-1.${NETWORK_NAME}             trust\n
         host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-block-explorer-uninstall-reputation-tracker-1.${NETWORK_NAME}        trust\n
 
 
@@ -54,12 +56,11 @@ services:
         host    haf_block_log    hivemind         ${COMPOSE_PROJECT_NAME}-hivemind-install-1.${NETWORK_NAME}                    trust\n
         host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-hivemind-block-processing-1.${NETWORK_NAME}           trust\n
         host    haf_block_log    hivemind         ${COMPOSE_PROJECT_NAME}-hivemind-block-processing-1.${NETWORK_NAME}           trust\n
+        host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-hivemind-server-1.${NETWORK_NAME}                     trust\n
         host    haf_block_log    hivemind         ${COMPOSE_PROJECT_NAME}-hivemind-server-1.${NETWORK_NAME}                     trust\n
-        host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-hivemind-uninstall-1.${NETWORK_NAME}                  trust\n
+        host    haf_block_log    hivemind         ${COMPOSE_PROJECT_NAME}-hivemind-postgrest-server-1.${NETWORK_NAME}           trust\n
 
-        host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-hivemind-rtracker-install-1.${NETWORK_NAME}          trust\n
-        host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-hivemind-rtracker-uninstall-1.${NETWORK_NAME}        trust\n
-        host    haf_block_log    reptracker_user  ${COMPOSE_PROJECT_NAME}-hivemind-rtracker-postgrest-1.${NETWORK_NAME}        trust\n
+        host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-hivemind-uninstall-1.${NETWORK_NAME}                  trust\n
 
         host    haf_block_log    haf_admin        ${COMPOSE_PROJECT_NAME}-pgadmin-1.${NETWORK_NAME}                             trust\n
         host    postgres         haf_admin        ${COMPOSE_PROJECT_NAME}-pgadmin-1.${NETWORK_NAME}                             trust\n
@@ -82,6 +83,8 @@ services:
       # 5 minutes is probably long enough that anyone using NVMes will be able to recover their database at
       # startup.  If you're on slower drives or older hardware, you may need to increase this.
       PGCTLTIMEOUT: 300
+      # HACK, do not commit
+      OVERRIDE_LD_PRELOAD: ""
     # when mounting volumes, declare the directories you want to bind-mount here,
     # using either the short or long syntax.  Don't declare them as top-level named
     # volumes and mount them by name.  Declaring them inline here always results
@@ -89,16 +92,16 @@ services:
     # seems to vary from system to system (even when `o: rbind` is specified).
     volumes:
       - type: bind
-        source: ${HAF_DATA_DIRECTORY}/
+        source: ${HAF_DATA_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}}/
         target: /home/hived/datadir
       - type: bind
-        source: ${HAF_LOG_DIRECTORY}/postgresql
+        source: ${HAF_LOG_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}/logs}/postgresql
         target: /home/hived/postgresql_logs/
       - type: bind
-        source: ${HAF_SHM_DIRECTORY}/
+        source: ${HAF_SHM_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory}/
         target: /home/hived/shm_dir
       - type: bind
-        source: ${HAF_WAL_DIRECTORY:-${HAF_SHM_DIRECTORY}/haf_wal}
+        source: ${HAF_WAL_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory/haf_wal}
         target: /home/hived/wal_dir
     shm_size: 4gb
     healthcheck:
@@ -108,13 +111,13 @@ services:
       retries: 10
       start_period: 72h
   logrotate:
-    image: ${LOGROTATE_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_api_node/logrotate}:${LOGROTATE_VERSION:-latest}
+    image: ${LOGROTATE_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_api_node/logrotate}:${LOGROTATE_VERSION:-latest}
     profiles:
       - logrotate
     init: true
     volumes:
       - type: bind
-        source: ${HAF_LOG_DIRECTORY}/postgresql
+        source: ${HAF_LOG_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}/logs}/postgresql
         target: /var/log/postgresql
       - type: bind
         source: ./logrotate/logrotate.d
@@ -128,10 +131,10 @@ services:
     command: infinity
     volumes:
       - type: bind
-        source: ${HAF_DATA_DIRECTORY}/logs/pgbadger
+        source: ${HAF_DATA_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}}/logs/pgbadger
         target: /pgbadger_output
       - type: bind
-        source: ${HAF_LOG_DIRECTORY}/postgresql
+        source: ${HAF_LOG_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}/logs}/postgresql
         target: /postgresql_logs
         read_only: true
     labels:
diff --git a/haf_block_explorer.yaml b/haf_block_explorer.yaml
index d22c2d71be36cd3426abb854a7b135296eb70580..ff20cc30708319cf0324defc40ebfc9a19f5395e 100644
--- a/haf_block_explorer.yaml
+++ b/haf_block_explorer.yaml
@@ -1,6 +1,6 @@
 services:
-  block-explorer-install:
-    image: ${HAF_BLOCK_EXPLORER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_block_explorer}:${HAF_BLOCK_EXPLORER_VERSION:-${HIVE_API_NODE_VERSION}}
+  block-explorer-install-schema:
+    image: ${HAF_BLOCK_EXPLORER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_block_explorer}:${HAF_BLOCK_EXPLORER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - apps
       - hafbe
@@ -14,28 +14,45 @@ services:
     depends_on:
       haf:
         condition: service_healthy
-      balance-tracker-install:
-        condition: service_completed_successfully
-      reputation-tracker-install:
+      block-explorer-install-balance-tracker:
         condition: service_completed_successfully
       hafah-install:
         condition: service_completed_successfully
+      reputation-tracker-install:
+        condition: service_completed_successfully
+  # to uninstall HAF Block Explorer, shut down the stack and run a command like:
+  #   docker compose --profile core --profile block-explorer-uninstall up -d
   block-explorer-uninstall:
-    image: ${HAF_BLOCK_EXPLORER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_block_explorer}:${HAF_BLOCK_EXPLORER_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${HAF_BLOCK_EXPLORER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_block_explorer}:${HAF_BLOCK_EXPLORER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - block-explorer-uninstall
     environment:
       POSTGRES_HOST: haf
     command:
       - uninstall_app
-      - --skip-btracker
     networks:
       haf-network:
     depends_on:
       haf:
         condition: service_healthy
+  block-explorer-install-balance-tracker:
+    image: ${BALANCE_TRACKER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/balance_tracker}:${BALANCE_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
+    profiles:
+      - apps
+      - hafbe
+    environment:
+      POSTGRES_HOST: haf
+    networks:
+      haf-network:
+    command:
+      - install_app
+      - --schema=hafbe_bal
+      - --swagger-url=${PUBLIC_HOSTNAME}
+    depends_on:
+      haf:
+        condition: service_healthy
   block-explorer-uninstall-balance-tracker:
-    image: ${BALANCE_TRACKER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/balance_tracker}:${BALANCE_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${BALANCE_TRACKER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/balance_tracker}:${BALANCE_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - block-explorer-uninstall
     environment:
@@ -44,61 +61,65 @@ services:
       haf-network:
     command:
       - uninstall_app
+      - --schema=hafbe_bal
     depends_on:
       block-explorer-uninstall:
         condition: service_completed_successfully
       haf:
         condition: service_healthy
   block-explorer-block-processing:
-    image: ${HAF_BLOCK_EXPLORER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_block_explorer}:${HAF_BLOCK_EXPLORER_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${HAF_BLOCK_EXPLORER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_block_explorer}:${HAF_BLOCK_EXPLORER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - apps
       - hafbe
     environment:
       POSTGRES_HOST: haf
+      POSTGRES_USER: hafbe_owner
     command:
       - process_blocks
     networks:
       haf-network:
+    healthcheck:
+      test: ["CMD-SHELL","/app/block-processing-healthcheck.sh"]
+      interval: 60s
+      timeout: 10s
+      retries: 3
+      start_period: 48h
     depends_on:
-      block-explorer-install:
+      block-explorer-install-schema:
         condition: service_completed_successfully
       haf:
         condition: service_healthy
   block-explorer-postgrest-rewriter:
-    image: openresty/openresty:alpine
+    image: ${HAF_BLOCK_EXPLORER_REWRITER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_block_explorer/postgrest-rewriter}:${HAF_BLOCK_EXPLORER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - apps
       - hafbe
-    volumes:
-      - type: bind
-        source: haf_block_explorer/haf_block_explorer_nginx.conf
-        target: /usr/local/openresty/nginx/conf/nginx.conf
-        read_only: true
-      - type: bind
-        source: haf_block_explorer/rewrite_rules.conf
-        target: /usr/local/openresty/nginx/conf/rewrite_rules.conf
-        read_only: true
+    healthcheck:
+      test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:81/health || exit 1"]
+      interval: 10s
+      timeout: 3s
+      retries: 10
+      start_period: 1m
     depends_on:
       block-explorer-postgrest:
         condition: service_healthy
     networks:
       - haf-network
-
   block-explorer-postgrest:
-    image: ${POSTGREST_REGISTRY:-registry.gitlab.syncad.com/hive/haf_api_node/postgrest}:${POSTGREST_VERSION:-latest}
+    image: ${POSTGREST_IMAGE:-registry.gitlab.syncad.com/hive/haf_api_node/postgrest}:${POSTGREST_VERSION:-latest}
     profiles:
       - apps
       - hafbe
     environment:
       PGRST_ADMIN_SERVER_PORT: 3001
-      PGRST_DB_URI: postgresql://hafbe_user@haf/haf_block_log
+      PGRST_DB_URI: postgresql://hafbe_user@haf/haf_block_log?application_name=block_explorer_postgrest
       PGRST_DB_SCHEMA: hafbe_endpoints
       PGRST_DB_ANON_ROLE: hafbe_user
       PGRST_DB_POOL: 20
       PGRST_DB_POOL_ACQUISITION_TIMEOUT: 10
       PGRST_DB_ROOT_SPEC: root
-      PGRST_DB_EXTRA_SEARCH_PATH: ${BTRACKER_SCHEMA:-hafbe_bal}, ${REPTRACKER_SCHEMA:-hafbe_rep}
+      PGRST_DB_EXTRA_SEARCH_PATH: hafbe_bal, reptracker_app
       # PGRST_OPENAPI_SERVER_PROXY_URI: http://${PUBLIC_HOSTNAME}/hafbe_api/
     networks:
       haf-network:
@@ -109,7 +130,7 @@ services:
       retries: 10
       start_period: 1m
     depends_on:
-      block-explorer-install:
+      block-explorer-install-schema:
         condition: service_completed_successfully
       haf:
         condition: service_healthy
diff --git a/haf_block_explorer/haf_block_explorer_nginx.conf b/haf_block_explorer/haf_block_explorer_nginx.conf
deleted file mode 100644
index 20c2f4db1503ab4c3d8288345b75345ac4ed3bf9..0000000000000000000000000000000000000000
--- a/haf_block_explorer/haf_block_explorer_nginx.conf
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Homepage and endpoints of the API "HAF Block Explorer".
-#
-worker_processes  5;
-error_log /dev/stdout info;
-worker_rlimit_nofile 8192;
-
-events {
-  worker_connections 4096;
-}
-
-http {
-    access_log /dev/stdout;
-    # Initialize Lua and load functions here
-    init_by_lua_block {
-        -- Base64 encoding function
-        local b = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
-
-        function enc(data)
-            return ((data:gsub('.', function(x) 
-                local r, b = '', x:byte()
-                for i = 8, 1, -1 do 
-                    r = r .. (b % 2^i - b % 2^(i - 1) > 0 and '1' or '0') 
-                end
-                return r
-            end)..'0000'):gsub('%d%d%d?%d?%d?%d?', function(x)
-                if (#x < 6) then return '' end
-                local c = 0
-                for i = 1, 6 do 
-                    c = c + (x:sub(i, i) == '1' and 2^(6 - i) or 0) 
-                end
-                return b:sub(c + 1, c + 1)
-            end)..({ '', '==', '=' })[#data % 3 + 1])
-        end
-
-        -- Process path filters
-        function process_path_filters(args)
-            local path_filters = {}
-            
-            for path_filter in args:gmatch("path%-filter=([^&]*)") do
-                table.insert(path_filters, enc(ngx.unescape_uri(path_filter)))
-            end
-            if #path_filters > 0 then
-                return "{" .. table.concat(path_filters, ",") .. "}"
-            else
-                return "{}"
-            end
-        end
-    }
-
-    server {
-        listen 0.0.0.0:80 default_server;
-        server_name _;
-
-        location / {
-            # Set the path_filters variable
-            set_by_lua_block $path_filters {
-                local args = ngx.var.args or ""
-                return process_path_filters(args)
-            }
-
-            # Modify args to remove path-filters
-            set_by_lua_block $args {
-                local args = ngx.var.args or ""
-                return args:gsub("&?path%-filter=[^&]*", "")
-            }
-
-            # Include rewrite rules after setting variables
-            include rewrite_rules.conf;
-            rewrite_log on;
-
-            proxy_pass http://block-explorer-postgrest:3000;
-
-            proxy_set_header Host $host;
-            proxy_set_header X-Real-IP $remote_addr;
-            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-            proxy_hide_header Content-Location;
-            proxy_set_header Connection "";
-            proxy_http_version 1.1;
-        }
-    }
-}
diff --git a/haf_block_explorer/rewrite_rules.conf b/haf_block_explorer/rewrite_rules.conf
deleted file mode 100644
index d36e206bc98fa64d8f6806a3e14f9ef7f5368f8b..0000000000000000000000000000000000000000
--- a/haf_block_explorer/rewrite_rules.conf
+++ /dev/null
@@ -1,44 +0,0 @@
-rewrite ^/operation-type-counts /rpc/get_latest_blocks break;
-# endpoint for get /operation-type-counts
-
-rewrite ^/input-type/([^/]+) /rpc/get_input_type?input-value=$1 break;
-# endpoint for get /input-type/{input-value}
-
-rewrite ^/last-synced-block /rpc/get_hafbe_last_synced_block break;
-# endpoint for get /last-synced-block
-
-rewrite ^/version /rpc/get_hafbe_version break;
-# endpoint for get /version
-
-rewrite ^/block-numbers /rpc/get_block_by_op?path-filter=$path_filters break;
-# endpoint for get /block-numbers
-
-rewrite ^/accounts/([^/]+)/comment-operations /rpc/get_comment_operations?account-name=$1 break;
-# endpoint for get /accounts/{account-name}/comment-operations
-
-rewrite ^/accounts/([^/]+)/authority /rpc/get_account_authority?account-name=$1 break;
-# endpoint for get /accounts/{account-name}/authority
-
-rewrite ^/accounts/([^/]+) /rpc/get_account?account-name=$1 break;
-# endpoint for get /accounts/{account-name}
-
-rewrite ^/witnesses/([^/]+)/votes/history /rpc/get_witness_votes_history?account-name=$1 break;
-# endpoint for get /witnesses/{account-name}/votes/history
-
-rewrite ^/witnesses/([^/]+)/voters/count /rpc/get_witness_voters_num?account-name=$1 break;
-# endpoint for get /witnesses/{account-name}/voters/count
-
-rewrite ^/witnesses/([^/]+)/voters /rpc/get_witness_voters?account-name=$1 break;
-# endpoint for get /witnesses/{account-name}/voters
-
-rewrite ^/witnesses/([^/]+) /rpc/get_witness?account-name=$1 break;
-# endpoint for get /witnesses/{account-name}
-
-rewrite ^/witnesses /rpc/get_witnesses break;
-# endpoint for get /witnesses
-
-rewrite ^/$ / break;
-# endpoint for openapi spec itself
-
-rewrite ^/(.*)$ /rpc/$1 break;
-# default endpoint for everything else
diff --git a/hafah.yaml b/hafah.yaml
index a24ad73e618c806085a48eb521c2a51a592677ee..3ceabd264aa7a022d15707c350a7f2114df16633 100644
--- a/hafah.yaml
+++ b/hafah.yaml
@@ -1,6 +1,6 @@
 services:
   hafah-install:
-    image: ${HAFAH_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hafah}:${HAFAH_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${HAFAH_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hafah}:${HAFAH_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - apps
       - hafah
@@ -17,7 +17,7 @@ services:
       haf:
         condition: service_healthy
   hafah-uninstall:
-    image: ${HAFAH_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hafah}:${HAFAH_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${HAFAH_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hafah}:${HAFAH_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - hafah-uninstall
     environment:
@@ -30,13 +30,14 @@ services:
       haf:
         condition: service_healthy
   hafah-postgrest:
-    image: ${POSTGREST_REGISTRY:-registry.gitlab.syncad.com/hive/haf_api_node/postgrest}:${POSTGREST_VERSION:-latest}
+    image: ${POSTGREST_IMAGE:-registry.gitlab.syncad.com/hive/haf_api_node/postgrest}:${POSTGREST_VERSION:-latest}
     profiles:
       - apps
       - hafah
+      - hafbe
     environment:
       PGRST_ADMIN_SERVER_PORT: 3001
-      PGRST_DB_URI: postgresql://hafah_user@haf/haf_block_log
+      PGRST_DB_URI: postgresql://hafah_user@haf/haf_block_log?application_name=hafah_postgrest
       PGRST_DB_SCHEMA: hafah_endpoints, hafah_api_v1, hafah_api_v2
       PGRST_DB_ROOT_SPEC: home
       PGRST_DB_ANON_ROLE: hafah_user
@@ -57,25 +58,23 @@ services:
       haf:
         condition: service_healthy
   hafah-postgrest-rewriter:
-    image: openresty/openresty:alpine
+    image: ${HAFAH_REWRITER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hafah/postgrest-rewriter}:${HAFAH_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - apps
       - hafah
-    volumes:
-      - type: bind
-        source: hafah_rest/hafah_rest_nginx.conf
-        target: /usr/local/openresty/nginx/conf/nginx.conf
-        read_only: true
-      - type: bind
-        source: hafah_rest/rewrite_rules.conf
-        target: /usr/local/openresty/nginx/conf/rewrite_rules.conf
-        read_only: true
+      - hafbe
+    healthcheck:
+      test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:81/health || exit 1"]
+      interval: 10s
+      timeout: 3s
+      retries: 10
+      start_period: 1m
     depends_on:
       hafah-postgrest:
         condition: service_healthy
     networks:
       - haf-network
-  hafah-benchmarks-old-style:  #NOTE: need to first run mkdir -p ${HAF_DATA_DIRECTORY}/tests/hafah_api_benchmarks/old-style
+  hafah-benchmarks-old-style:  #NOTE: need to first run mkdir -p ${HAF_DATA_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}}/tests/hafah_api_benchmarks/old-style
     image: registry.gitlab.syncad.com/hive/tests_api/benchmark_aio:latest
     profiles:
       - hafah-benchmarks
@@ -89,7 +88,7 @@ services:
       WDIR: /workspace
     volumes: 
       - type: bind
-        source: ${HAF_DATA_DIRECTORY}/tests/hafah_api_benchmarks/old-style
+        source: ${HAF_DATA_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}}/tests/hafah_api_benchmarks/old-style
         target: /workspace
     networks:
       haf-network:
@@ -114,7 +113,7 @@ services:
   #     WDIR: /workspace
   #   volumes: 
   #     - type: bind
-  #       source: ${HAF_DATA_DIRECTORY}/tests/hafah_api_benchmarks/new-style
+  #       source: ${HAF_DATA_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}}/tests/hafah_api_benchmarks/new-style
   #       target: /workspace
   #   networks:
   #     haf-network:
diff --git a/hafah_rest/hafah_rest_nginx.conf b/hafah_rest/hafah_rest_nginx.conf
deleted file mode 100644
index de51c4bf780ca7ac564cd35e33ab99d2291ce828..0000000000000000000000000000000000000000
--- a/hafah_rest/hafah_rest_nginx.conf
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Homepage and endpoints of the API "HAF Block Explorer".
-#
-worker_processes  5;
-error_log /dev/stdout info;
-worker_rlimit_nofile 8192;
-
-events {
-  worker_connections 4096;
-}
-
-http {
-    access_log /dev/stdout;
-    # Initialize Lua and load functions here
-    init_by_lua_block {
-        -- Base64 encoding function
-        local b = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
-
-        function enc(data)
-            return ((data:gsub('.', function(x) 
-                local r, b = '', x:byte()
-                for i = 8, 1, -1 do 
-                    r = r .. (b % 2^i - b % 2^(i - 1) > 0 and '1' or '0') 
-                end
-                return r
-            end)..'0000'):gsub('%d%d%d?%d?%d?%d?', function(x)
-                if (#x < 6) then return '' end
-                local c = 0
-                for i = 1, 6 do 
-                    c = c + (x:sub(i, i) == '1' and 2^(6 - i) or 0) 
-                end
-                return b:sub(c + 1, c + 1)
-            end)..({ '', '==', '=' })[#data % 3 + 1])
-        end
-
-        -- Process path filters
-        function process_path_filters(args)
-            local path_filters = {}
-            
-            for path_filter in args:gmatch("path%-filter=([^&]*)") do
-                table.insert(path_filters, enc(ngx.unescape_uri(path_filter)))
-            end
-            if #path_filters > 0 then
-                return "{" .. table.concat(path_filters, ",") .. "}"
-            else
-                return "{}"
-            end
-        end
-    }
-
-    server {
-        listen 0.0.0.0:80 default_server;
-        server_name _;
-
-        location / {
-            # Set the path_filters variable
-            set_by_lua_block $path_filters {
-                local args = ngx.var.args or ""
-                return process_path_filters(args)
-            }
-
-            # Modify args to remove path-filters
-            set_by_lua_block $args {
-                local args = ngx.var.args or ""
-                return args:gsub("&?path%-filter=[^&]*", "")
-            }
-
-            # Include rewrite rules after setting variables
-            include rewrite_rules.conf;
-            rewrite_log on;
-
-            proxy_pass  http://hafah-postgrest:3000;  # my PostREST is  here!
-
-            proxy_set_header Host $host;
-            proxy_set_header X-Real-IP $remote_addr;
-            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-            proxy_hide_header Content-Location;
-            proxy_set_header Connection "";
-            proxy_http_version 1.1;
-        }
-    }
-}
diff --git a/hafah_rest/rewrite_rules.conf b/hafah_rest/rewrite_rules.conf
deleted file mode 100644
index e431f936746f942929bd46c3154d70b75ce61ff0..0000000000000000000000000000000000000000
--- a/hafah_rest/rewrite_rules.conf
+++ /dev/null
@@ -1,50 +0,0 @@
-rewrite ^/block-number-by-date/([^/]+) /rpc/get_block_by_time?timestamp=$1 break;
-# endpoint for get /block-number-by-date/{timestamp}
-
-rewrite ^/global-state /rpc/get_block break;
-# endpoint for get /global-state
-
-rewrite ^/headblock /rpc/get_head_block_num break;
-# endpoint for get /headblock
-
-rewrite ^/version /rpc/get_version break;
-# endpoint for get /version
-
-rewrite ^/accounts/([^/]+)/operation-types /rpc/get_acc_op_types?account-name=$1 break;
-# endpoint for get /accounts/{account-name}/operation-types
-
-rewrite ^/accounts/([^/]+)/operations /rpc/get_ops_by_account?account-name=$1 break;
-# endpoint for get /accounts/{account-name}/operations
-
-rewrite ^/transactions/([^/]+) /rpc/get_transaction?transaction-id=$1 break;
-# endpoint for get /transactions/{transaction-id}
-
-rewrite ^/operation-types/([^/]+)/keys /rpc/get_operation_keys?type-id=$1 break;
-# endpoint for get /operation-types/{type-id}/keys
-
-rewrite ^/operation-types /rpc/get_op_types break;
-# endpoint for get /operation-types
-
-rewrite ^/operations/([^/]+) /rpc/get_operation?operation-id=$1 break;
-# endpoint for get /operations/{operation-id}
-
-rewrite ^/operations /rpc/get_operations break;
-# endpoint for get /operations
-
-rewrite ^/blocks/([^/]+)/operations /rpc/get_ops_by_block_paging?block-num=$1&path-filter=$path_filters break;
-# endpoint for get /blocks/{block-num}/operations
-
-rewrite ^/blocks/([^/]+)/header /rpc/get_block_header?block-num=$1 break;
-# endpoint for get /blocks/{block-num}/header
-
-rewrite ^/blocks/([^/]+) /rpc/get_block?block-num=$1 break;
-# endpoint for get /blocks/{block-num}
-
-rewrite ^/blocks /rpc/get_block_range break;
-# endpoint for get /blocks
-
-rewrite ^/$ / break;
-# endpoint for openapi spec itself
-
-rewrite ^/(.*)$ /rpc/$1 break;
-# default endpoint for everything else
diff --git a/haproxy.yaml b/haproxy.yaml
index a1fa963e1e4be3b97aec5df65656b10ea3e214ef..1f2875adad2680ec15c95a929ba043f68d4ec7f4 100644
--- a/haproxy.yaml
+++ b/haproxy.yaml
@@ -12,17 +12,28 @@ services:
         source: ./haproxy/haproxy.cfg
         target: /usr/local/etc/haproxy/haproxy.cfg
         read_only: true
+      - type: bind
+        source: ./haproxy/no-mailer.cfg
+        target: /usr/local/etc/haproxy/_mailer.cfg
+        read_only: true
     networks:
       - haf-network
     healthcheck:
       test: wget -q --spider 127.0.0.1:8001 
+    command: -W -db -f /usr/local/etc/haproxy
   haproxy-healthchecks:
-    image: ${HAPROXY_HEALTHCHECKS_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_api_node/haproxy-healthchecks}:${HAPROXY_HEALTHCHECKS_VERSION:-latest}
+    image: ${HAPROXY_HEALTHCHECKS_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/haf_api_node/haproxy-healthchecks}:${HAPROXY_HEALTHCHECKS_VERSION:-${HIVE_API_NODE_VERSION}}
     environment:
       POSTGRES_URL: postgresql://haf_admin@haf/haf_block_log
       POSTGRES_URL_HAFBE: postgresql://hafbe_user@haf/haf_block_log
       POSTGRES_URL_BTRACKER: postgresql://btracker_user@haf/haf_block_log
       POSTGRES_URL_REPTRACKER: postgresql://reptracker_user@haf/haf_block_log
+    healthcheck:
+      test: ["CMD-SHELL", "/healthcheck_healthcheck.sh || exit 1"]
+      interval: 10s
+      timeout: 3s
+      retries: 10
+      start_period: 1m
     profiles:
       - servers
       - haproxy-healthchecks
diff --git a/haproxy/haproxy.cfg b/haproxy/haproxy.cfg
index f9c41bebebd07d1c8c0141e659eef12b82ba9125..47310e5a37c8611bace1f5a8d783210ddb22fcef 100644
--- a/haproxy/haproxy.cfg
+++ b/haproxy/haproxy.cfg
@@ -7,26 +7,11 @@ global
   presetenv SYNC_BROADCAST_BACKEND_PORT 443
   presetenv SYNC_BROADCAST_BACKEND_SSL ssl 
 
-defaults
-  log     global
-  mode    http
-  option  httplog
-  option  dontlognull
-  option  forwardfor
-  option  http-server-close
-  option  log-health-checks
-  timeout connect 5s
-  timeout client  30s
-  timeout server  30s
-  timeout tunnel  1h
-  default-server init-addr last,libc,none resolvers docker check
-  #errorfile 400 /etc/haproxy/errors/400.http
-  #errorfile 403 /etc/haproxy/errors/403.http
-  #errorfile 408 /etc/haproxy/errors/408.http
-  #errorfile 500 /etc/haproxy/errors/500.http
-  #errorfile 502 /etc/haproxy/errors/502.http
-  #errorfile 503 /etc/haproxy/errors/503.http
-  #errorfile 504 /etc/haproxy/errors/504.http
+# DEFAULTS
+# do not put a defaults section here, the defaults section is included
+# from either the `mailer.cfg` or `no-mailer.cfg` files (only one of those
+# files is included, which one depends on your setup).  Make any changes 
+# to defaults in both of those files.
 
 resolvers docker
   parse-resolv-conf
@@ -87,11 +72,6 @@ frontend reputation-tracker-in-7009
   option http-server-close
   default_backend reputation-tracker
 
-frontend hivemind-rtracker-in-7010
-  bind *:7010
-  option http-server-close
-  default_backend hivemind-rtracker
-
 backend hived
   balance roundrobin
   server haf haf:8090 check agent-check agent-addr haproxy-healthchecks agent-port 7001 agent-inter 10s
@@ -102,26 +82,42 @@ backend hived-http
 
 backend balance-tracker
   balance roundrobin
-  server balance-tracker balance-tracker-postgrest-rewriter:80 
+  # balance tracker doesn't have a cheap API method we can call, so test whether we can get
+  # the OpenAPI JSON
+  option httpchk
+  http-check connect
+  http-check send meth HEAD uri /
+  http-check expect status 200
+  timeout check 100
+  server balance-tracker balance-tracker-postgrest-rewriter:80 check agent-check agent-addr haproxy-healthchecks agent-port 7004 agent-inter 10s
 
 backend reputation-tracker
   balance roundrobin
+  option httpchk
+  http-check connect
+  http-check send meth GET uri /last-synced-block
+  http-check expect status 200
   server reputation-tracker reputation-tracker-postgrest-rewriter:80 check agent-check agent-addr haproxy-healthchecks agent-port 7009 agent-inter 10s
 
-backend hivemind-rtracker
-  balance roundrobin
-  server hivemind-rtracker hivemind-rtracker-postgrest-rewriter:80 check agent-check agent-addr haproxy-healthchecks agent-port 7010 agent-inter 10s
 backend hafah
   balance roundrobin
-  server hafah-postgrest hafah-postgrest-rewriter:80 
+  option httpchk
+  http-check connect
+  http-check send meth GET uri /version
+  http-check expect status 200
+  server hafah-postgrest hafah-postgrest-rewriter:80 check agent-check agent-addr haproxy-healthchecks agent-port 7003 agent-inter 10s
 
 backend hivemind
   balance roundrobin
-  server hivemind hivemind-server:8080 check agent-check agent-addr haproxy-healthchecks agent-port 7002 agent-inter 10s
+  server hivemind hivemind-postgrest-rewriter:80 check agent-check agent-addr haproxy-healthchecks agent-port 7002 agent-inter 10s
 
 backend block-explorer
   balance roundrobin
-  server block-explorer block-explorer-postgrest-rewriter:80 
+  option httpchk
+  http-check connect
+  http-check send meth GET uri /last-synced-block
+  http-check expect status 200
+  server block-explorer block-explorer-postgrest-rewriter:80 check agent-check agent-addr haproxy-healthchecks agent-port 7005 agent-inter 10s
 
 backend sync-hived
   balance roundrobin
diff --git a/haproxy/mailer.cfg b/haproxy/mailer.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..8fd04bfdeea2d0f0dd1dd70aa4a8dfaf255a6c44
--- /dev/null
+++ b/haproxy/mailer.cfg
@@ -0,0 +1,28 @@
+mailers mta
+  mailer smtp smtp-relay:25
+
+defaults
+  log     global
+  mode    http
+  option  httplog
+  option  dontlognull
+  option  forwardfor
+  option  http-server-close
+  option  log-health-checks
+  timeout connect 5s
+  timeout client  30s
+  timeout server  30s
+  timeout tunnel  1h
+  default-server init-addr last,libc,none resolvers docker check
+  #errorfile 400 /etc/haproxy/errors/400.http
+  #errorfile 403 /etc/haproxy/errors/403.http
+  #errorfile 408 /etc/haproxy/errors/408.http
+  #errorfile 500 /etc/haproxy/errors/500.http
+  #errorfile 502 /etc/haproxy/errors/502.http
+  #errorfile 503 /etc/haproxy/errors/503.http
+  #errorfile 504 /etc/haproxy/errors/504.http
+  email-alert from donotreply@syncad.com
+  email-alert to efrias@syncad.com
+  email-alert mailers mta
+  email-alert level notice
+
diff --git a/haproxy/no-mailer.cfg b/haproxy/no-mailer.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..058307905d2343443f7db05d959b54d3232144cf
--- /dev/null
+++ b/haproxy/no-mailer.cfg
@@ -0,0 +1,20 @@
+defaults
+  log     global
+  mode    http
+  option  httplog
+  option  dontlognull
+  option  forwardfor
+  option  http-server-close
+  option  log-health-checks
+  timeout connect 5s
+  timeout client  30s
+  timeout server  30s
+  timeout tunnel  1h
+  default-server init-addr last,libc,none resolvers docker check
+  #errorfile 400 /etc/haproxy/errors/400.http
+  #errorfile 403 /etc/haproxy/errors/403.http
+  #errorfile 408 /etc/haproxy/errors/408.http
+  #errorfile 500 /etc/haproxy/errors/500.http
+  #errorfile 502 /etc/haproxy/errors/502.http
+  #errorfile 503 /etc/haproxy/errors/503.http
+  #errorfile 504 /etc/haproxy/errors/504.http
diff --git a/healthchecks/Dockerfile b/healthchecks/Dockerfile
index f3f42194980bcdc697ecc700dfa700c30d6258d8..58a7f9d6ea7f017aa4380a2557feb16cdfac0303 100644
--- a/healthchecks/Dockerfile
+++ b/healthchecks/Dockerfile
@@ -3,13 +3,14 @@ FROM alpine:3.18.3
 RUN apk --no-cache add postgresql14-client
 
 COPY checks/hived.sh /checks/hived.sh
-COPY checks/hafbe_bal.sh /checks/hafbe_bal.sh
-COPY checks/hafbe_rep.sh /checks/hafbe_rep.sh
+COPY checks/hafbe_btracker.sh /checks/hafbe_btracker.sh
+COPY checks/hafbe_reptracker.sh /checks/hafbe_reptracker.sh
 COPY checks/hivemind.sh /checks/hivemind.sh
 COPY checks/hafah.sh /checks/hafah.sh
 COPY checks/hafbe.sh /checks/hafbe.sh
 COPY checks/format_seconds.sh /checks/format_seconds.sh
 COPY checks/check_haf_lib.sh /checks/check_haf_lib.sh
+COPY healthcheck_healthcheck.sh /
 COPY docker_entrypoint.sh /
 
 ENTRYPOINT ["/docker_entrypoint.sh"]
diff --git a/healthchecks/checks/check_haf_lib.sh b/healthchecks/checks/check_haf_lib.sh
index fe8c5560cd627d1230cfcadde39ad79ee0ff11ed..d5058dbf8c42879d62e25b443da0cf28be3654b8 100755
--- a/healthchecks/checks/check_haf_lib.sh
+++ b/healthchecks/checks/check_haf_lib.sh
@@ -5,7 +5,7 @@ check_haf_lib() {
     echo "down #HAF not in sync"
     exit 1
   fi
-  LAST_IRREVERSIBLE_BLOCK_AGE=$(psql "$POSTGRES_URL" --quiet --no-align --tuples-only --command="select extract('epoch' from now() - created_at)::integer from hive.blocks where num = (select consistent_block from hive.irreversible_data)")
+  LAST_IRREVERSIBLE_BLOCK_AGE=$(psql "$POSTGRES_URL" --quiet --no-align --tuples-only --command="select extract('epoch' from now() - created_at)::integer from hafd.blocks where num = (select consistent_block from hafd.irreversible_data)")
   if [ "$LAST_IRREVERSIBLE_BLOCK_AGE" -gt 60 ]; then
     age_string=$(format_seconds "$LAST_IRREVERSIBLE_BLOCK_AGE")
     echo "down #HAF LIB over a minute old ($age_string)"
diff --git a/healthchecks/checks/hafbe.sh b/healthchecks/checks/hafbe.sh
index 64bf31861c1ff2943593b7dd7ecfaf98dfd631f6..0893d7ad558976a1af225001c9532090d81b3a9d 100755
--- a/healthchecks/checks/hafbe.sh
+++ b/healthchecks/checks/hafbe.sh
@@ -8,21 +8,14 @@ trap "trap - 2 15 && kill -- -\$\$" 2 15
 
 check_haf_lib
 
-REPTRACKER_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_REPTRACKER" --quiet --no-align --tuples-only --command="select extract('epoch' from now() - created_at)::integer from hive.blocks where num = (select hive.app_get_current_block_num('hafbe_rep'))")
+REPTRACKER_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_REPTRACKER" --quiet --no-align --tuples-only --command="select extract('epoch' from hive.get_app_current_block_age('reptracker_app'))::integer")
 if [ "$REPTRACKER_LAST_PROCESSED_BLOCK_AGE" -gt 60 ]; then
   age_string=$(format_seconds "$REPTRACKER_LAST_PROCESSED_BLOCK_AGE")
-  echo "down #hafbe_rep block over a minute old ($age_string)"
+  echo "down #reptracker_app block over a minute old ($age_string)"
   exit 3
 fi
 
-BTRACKER_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_BTRACKER" --quiet --no-align --tuples-only --command="select extract('epoch' from now() - created_at)::integer from hive.blocks where num = (select hive.app_get_current_block_num('hafbe_bal'))")
-if [ "$BTRACKER_LAST_PROCESSED_BLOCK_AGE" -gt 60 ]; then
-  age_string=$(format_seconds "$BTRACKER_LAST_PROCESSED_BLOCK_AGE")
-  echo "down #hafbe_bal block over a minute old ($age_string)"
-  exit 3
-fi
-
-HAFBE_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_HAFBE" --quiet --no-align --tuples-only --command="select extract('epoch' from now() - created_at)::integer from hive.blocks where num = (select hive.app_get_current_block_num('hafbe_app'))")
+HAFBE_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_HAFBE" --quiet --no-align --tuples-only --command="select extract('epoch' from hive.get_app_current_block_age(ARRAY['hafbe_app', 'hafbe_bal']))::integer")
 if [ "$HAFBE_LAST_PROCESSED_BLOCK_AGE" -gt 60 ]; then
   age_string=$(format_seconds "$HAFBE_LAST_PROCESSED_BLOCK_AGE")
   echo "down #hafbe block over a minute old ($age_string)"
diff --git a/healthchecks/checks/hafbe_btracker.sh b/healthchecks/checks/hafbe_btracker.sh
index ae48610d2e7d2e21f34e676ee380384b31d16107..fe540482dcffe544020d0a3c0d262b9f0e1619b5 100755
--- a/healthchecks/checks/hafbe_btracker.sh
+++ b/healthchecks/checks/hafbe_btracker.sh
@@ -8,7 +8,7 @@ trap "trap - 2 15 && kill -- -\$\$" 2 15
 
 check_haf_lib
 
-BTRACKER_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_BTRACKER" --quiet --no-align --tuples-only --command="select extract('epoch' from now() - created_at)::integer from hive.blocks where num = (select hive.app_get_current_block_num('btracker_app'))")
+BTRACKER_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_BTRACKER" --quiet --no-align --tuples-only --command="select extract('epoch' from hive.get_app_current_block_age('hafbe_bal'))::integer")
 if [ "$BTRACKER_LAST_PROCESSED_BLOCK_AGE" -gt 60 ]; then
   age_string=$(format_seconds "$BTRACKER_LAST_PROCESSED_BLOCK_AGE")
   echo "down #hafbe_bal block over a minute old ($age_string)"
diff --git a/healthchecks/checks/hafbe_reptracker.sh b/healthchecks/checks/hafbe_reptracker.sh
index 52339b09fb2eaf0d8886293710e2a1186f8da745..1fe1d209cd3ca1e2dd846715accd49a2e16bf5c3 100755
--- a/healthchecks/checks/hafbe_reptracker.sh
+++ b/healthchecks/checks/hafbe_reptracker.sh
@@ -8,7 +8,7 @@ trap "trap - 2 15 && kill -- -\$\$" 2 15
 
 check_haf_lib
 
-REPTRACKER_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_REPTRACKER" --quiet --no-align --tuples-only --command="select extract('epoch' from now() - created_at)::integer from hive.blocks where num = (select hive.app_get_current_block_num('reptracker_app'))")
+REPTRACKER_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_REPTRACKER" --quiet --no-align --tuples-only --command="select extract('epoch' from hive.get_app_current_block_age('reptracker_app'))::integer")
 if [ "$REPTRACKER_LAST_PROCESSED_BLOCK_AGE" -gt 60 ]; then
   age_string=$(format_seconds "$REPTRACKER_LAST_PROCESSED_BLOCK_AGE")
   echo "down #hafbe_rep block over a minute old ($age_string)"
diff --git a/healthchecks/checks/hivemind.sh b/healthchecks/checks/hivemind.sh
index 9bea3fd0bb8cf54259b87dd99156a2757957750b..4f98ff83f16b17544835a215bbb7fa988b925fbe 100755
--- a/healthchecks/checks/hivemind.sh
+++ b/healthchecks/checks/hivemind.sh
@@ -8,7 +8,14 @@ trap "trap - 2 15 && kill -- -\$\$" 2 15
 
 check_haf_lib
 
-HIVEMIND_LAST_IMPORTED_BLOCK_AGE=$(psql "$POSTGRES_URL" --quiet --no-align --tuples-only --command="select extract('epoch' from now() - (select last_imported_block_date from hivemind_app.hive_state limit 1))::integer")
+REPTRACKER_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_REPTRACKER" --quiet --no-align --tuples-only --command="select extract('epoch' from hive.get_app_current_block_age('reptracker_app'))::integer")
+if [ "$REPTRACKER_LAST_PROCESSED_BLOCK_AGE" -gt 60 ]; then
+  age_string=$(format_seconds "$REPTRACKER_LAST_PROCESSED_BLOCK_AGE")
+  echo "down #reptracker_app block over a minute old ($age_string)"
+  exit 3
+fi
+
+HIVEMIND_LAST_IMPORTED_BLOCK_AGE=$(psql "$POSTGRES_URL" --quiet --no-align --tuples-only --command="select extract('epoch' from hive.get_app_current_block_age('hivemind_app'))::integer")
 if [ "$HIVEMIND_LAST_IMPORTED_BLOCK_AGE" -gt 60 ]; then
   age_string=$(format_seconds "$HIVEMIND_LAST_IMPORTED_BLOCK_AGE")
   echo "down #hivemind block over a minute old ($age_string)"
diff --git a/healthchecks/checks/hivemind_rtracker.sh b/healthchecks/checks/hivemind_rtracker.sh
index d93763e2a9450ba3b4a88f9d525728c76be8f973..e816326ff5f551be8c1523cb3ba80bc4174b85f6 100644
--- a/healthchecks/checks/hivemind_rtracker.sh
+++ b/healthchecks/checks/hivemind_rtracker.sh
@@ -8,7 +8,7 @@ trap "trap - 2 15 && kill -- -\$\$" 2 15
 
 check_haf_lib
 
-REPTRACKER_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_REPTRACKER" --quiet --no-align --tuples-only --command="select extract('epoch' from now() - created_at)::integer from hive.blocks where num = (select hive.app_get_current_block_num('reptracker_app'))")
+REPTRACKER_LAST_PROCESSED_BLOCK_AGE=$(psql "$POSTGRES_URL_REPTRACKER" --quiet --no-align --tuples-only --command="select extract('epoch' from hive.get_app_current_block_age('reptracker_app'))::integer")
 if [ "$REPTRACKER_LAST_PROCESSED_BLOCK_AGE" -gt 60 ]; then
   age_string=$(format_seconds "$REPTRACKER_LAST_PROCESSED_BLOCK_AGE")
   echo "down #reptracker_app block over a minute old ($age_string)"
diff --git a/healthchecks/docker_entrypoint.sh b/healthchecks/docker_entrypoint.sh
index 9fd84b3a267b0247a806a1fa2679feb362943f3d..01f1315d06679d430caee65491304c1a32e85716 100755
--- a/healthchecks/docker_entrypoint.sh
+++ b/healthchecks/docker_entrypoint.sh
@@ -6,6 +6,5 @@ nc -lk -p 7003 -e /checks/hafah.sh &
 nc -lk -p 7004 -e /checks/hafbe_btracker.sh &
 nc -lk -p 7005 -e /checks/hafbe.sh &
 nc -lk -p 7009 -e /checks/hafbe_reptracker.sh &
-nc -lk -p 7010 -e /checks/hivemind_rtracker.sh &
 
 wait
diff --git a/healthchecks/healthcheck_healthcheck.sh b/healthchecks/healthcheck_healthcheck.sh
new file mode 100755
index 0000000000000000000000000000000000000000..cb60479f49481ad8cb1554b275f6854a0b94a039
--- /dev/null
+++ b/healthchecks/healthcheck_healthcheck.sh
@@ -0,0 +1,2 @@
+#! /bin/sh
+#exec netstat -tln | grep -E ':(7001|7002|7003|7004|7005|7009)\b' | wc -l | grep -q '^6$'
diff --git a/hivemind.yaml b/hivemind.yaml
index 2bbd32a6c611078b2b7c002e5f85f61c37396615..ac96fdec4a5502af5a5af2f64767c53131116894 100644
--- a/hivemind.yaml
+++ b/hivemind.yaml
@@ -1,6 +1,22 @@
 services:
+  hivemind-install:
+    image: ${HIVEMIND_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hivemind}:${HIVEMIND_VERSION:-${HIVE_API_NODE_VERSION}}
+    profiles:
+      - apps
+      - hivemind
+    networks:
+      haf-network:
+    command:
+      - "setup"
+      - "--database-admin-url=postgresql://haf_admin@haf/haf_block_log"
+    depends_on:
+      haf:
+        condition: service_healthy
+      reputation-tracker-install:
+        condition: service_completed_successfully
+
   hivemind-block-processing:
-    image: ${HIVEMIND_INSTANCE_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hivemind}:${HIVEMIND_INSTANCE_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${HIVEMIND_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hivemind}:${HIVEMIND_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - apps
       - hivemind
@@ -10,25 +26,32 @@ services:
       - "sync"
       - "--database-url=postgresql://hivemind@haf/haf_block_log"
       - "--database-admin-url=postgresql://haf_admin@haf/haf_block_log"
-      - "--install-app"
-      - "--only-hivemind"
     healthcheck:
       test: ["CMD-SHELL","/home/hivemind/block-processing-healthcheck.sh || exit 1"]
       interval: 10s
       timeout: 2s
       retries: 10
-      start_period: 5m
+      start_period: 72h
+    # after requesting a shutdown with SIGTERM, allow the container 30 seconds to exit
+    # before killing it.
+    # Normally it will exit cleanly in a second or two, but it can take longer if it's
+    # in the middle of massive sync.  When in massive sync, a forced shutdown can leave
+    # the hivemind database in an inconsistent state, requiring an uninstall/reinstall
+    # of hivemind to correct it.
+    stop_grace_period: 30s
+    stop_signal: SIGINT
     depends_on:
       haf:
         condition: service_healthy
-      hivemind-rtracker-install:
+      reputation-tracker-install:
+        condition: service_completed_successfully
+      hivemind-install:
         condition: service_completed_successfully
 
   hivemind-server:
-    image: ${HIVEMIND_INSTANCE_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hivemind}:${HIVEMIND_INSTANCE_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${HIVEMIND_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hivemind}:${HIVEMIND_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
-      - apps
-      - hivemind
+      - legacy-hivemind-server
     networks:
       haf-network:
     command:
@@ -40,27 +63,56 @@ services:
       haf:
         condition: service_healthy
 
-  # note: the hivemind-install steps, which create/update the hivemind database schema, are also run automatically at the beginning
-  # of the hivemind-block-processing image's startup.  There is normally no reason to run this step separately.  If you do,
-  # be sure that the hivemind block-processor is not running at the same time as this install script, as doing so may corrupt your
-  # database
-  hivemind-install:
-    image: ${HIVEMIND_INSTANCE_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hivemind}:${HIVEMIND_INSTANCE_VERSION:-${HIVE_API_NODE_VERSION}}
+  hivemind-postgrest-server:
+    image: ${POSTGREST_IMAGE:-registry.gitlab.syncad.com/hive/haf_api_node/postgrest}:${POSTGREST_VERSION:-latest}
     profiles:
-      - hivemind-install
+      - apps
+      - hivemind
     networks:
       haf-network:
-    command:
-      - "setup"
-      - "--database-admin-url=postgresql://haf_admin@haf/haf_block_log"
+    environment:
+      PGRST_ADMIN_SERVER_PORT: 3001
+      PGRST_SERVER_PORT: 8080
+      PGRST_DB_URI: "postgresql://hivemind@haf/haf_block_log?application_name=hive-mind-postgrest" # warning hivemind improperly matches all connections starting from `hivemind` and crashes if they are made externally (not from python)
+      PGRST_DB_SCHEMA: hivemind_endpoints
+      # PGRST_DB_ROOT_SPEC: home
+      PGRST_DB_ANON_ROLE: hivemind
+      PGRST_DB_POOL: 40
+      PGRST_DB_POOL_ACQUISITION_TIMEOUT: 10
+      PGRST_OPENAPI_MODE: "disabled" # unclear why this is set, I guess because we currently only support json-rpc?
+      # when debugging, you can enable this
+      # PGRST_DB_PLAN_ENABLED: true
+    healthcheck:
+      test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:3001/ready || exit 1"]
+      interval: 10s
+      timeout: 3s
+      retries: 10
+      start_period: 1m
     depends_on:
+      hivemind-install:
+        condition: service_completed_successfully
       haf:
         condition: service_healthy
-      hivemind-rtracker-install:
-        condition: service_completed_successfully
+
+  hivemind-postgrest-rewriter:
+    image: ${HIVEMIND_REWRITER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hivemind/postgrest-rewriter}:${HIVEMIND_VERSION:-${HIVE_API_NODE_VERSION}}
+    profiles:
+      - apps
+      - hivemind
+    healthcheck:
+      test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:81/health || exit 1"]
+      interval: 10s
+      timeout: 3s
+      retries: 10
+      start_period: 1m
+    depends_on:
+      hivemind-postgrest-server:
+        condition: service_healthy
+    networks:
+      - haf-network
 
   hivemind-uninstall:
-    image: ${HIVEMIND_INSTANCE_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hivemind}:${HIVEMIND_INSTANCE_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${HIVEMIND_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/hivemind}:${HIVEMIND_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - hivemind-uninstall
     networks:
diff --git a/hivemind_reptracker.yaml b/hivemind_reptracker.yaml
deleted file mode 100644
index 6025b866117900d06a36d031563e13c3647d4fa1..0000000000000000000000000000000000000000
--- a/hivemind_reptracker.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-services:
-  hivemind-rtracker-install:
-    image: ${REPUTATION_TRACKER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/reputation_tracker}:${REPUTATION_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
-    profiles:
-      - hivemind
-    environment:
-      POSTGRES_HOST: haf
-    networks:
-      haf-network:
-    command:
-      - install_app
-      - --is_forking=false
-      - --swagger-url=${PUBLIC_HOSTNAME}
-    depends_on:
-      haf:
-        condition: service_healthy
-  hivemind-rtracker-uninstall:
-    image: ${REPUTATION_TRACKER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/reputation_tracker}:${REPUTATION_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
-    profiles:
-      - reputation-tracker-uninstall
-    environment:
-      POSTGRES_HOST: haf
-    networks:
-      haf-network:
-    command:
-      - uninstall_app
-    depends_on:
-      haf:
-        condition: service_healthy
-  hivemind-rtracker-postgrest:
-    image: ${POSTGREST_REGISTRY:-registry.gitlab.syncad.com/hive/haf_api_node/postgrest}:${POSTGREST_VERSION:-latest}
-    profiles:
-      - hivemind
-    environment:
-      PGRST_ADMIN_SERVER_PORT: 3001
-      PGRST_DB_URI: postgresql://reptracker_user@haf/haf_block_log
-      PGRST_DB_SCHEMA: reptracker_endpoints
-      PGRST_DB_ANON_ROLE: reptracker_user
-      PGRST_DB_POOL: 20
-      PGRST_DB_POOL_ACQUISITION_TIMEOUT: 10
-      PGRST_DB_EXTRA_SEARCH_PATH: reptracker_app
-      # PGRST_OPENAPI_SERVER_PROXY_URI: http://${PUBLIC_HOSTNAME}/reptracker_user/
-    healthcheck:
-      test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:3001/ready || exit 1"]
-      interval: 10s
-      timeout: 3s
-      retries: 10
-      start_period: 1m
-    networks:
-      haf-network:
-    depends_on:
-      hivemind-rtracker-install:
-        condition: service_completed_successfully
-      haf:
-        condition: service_healthy
-  hivemind-rtracker-postgrest-rewriter:
-    image: nginx
-    profiles:
-      - hivemind
-    volumes:
-      - type: bind
-        source: hivemind_rtracker/hivemind_rtracker_nginx.conf
-        target: /etc/nginx/nginx.conf
-        read_only: true
-      - type: bind
-        source: hivemind_rtracker/rewrite_rules.conf
-        target: /etc/nginx/rewrite_rules.conf
-        read_only: true
-    depends_on:
-      hivemind-rtracker-postgrest:
-        condition: service_healthy
-    networks:
-      - haf-network
-  semaphore:
-    image: busybox
-    depends_on:
-      haf:
-        condition: service_healthy
-    command: ["sleep", "20"]
diff --git a/hivemind_rtracker/hivemind_rtracker_nginx.conf b/hivemind_rtracker/hivemind_rtracker_nginx.conf
deleted file mode 100644
index 846172dbc9cc37a36b19f2962ce12add8b710972..0000000000000000000000000000000000000000
--- a/hivemind_rtracker/hivemind_rtracker_nginx.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Homepage and endpoints of the API "HAF Block Explorer".
-#
-worker_processes  5;
-error_log /dev/stdout info;
-worker_rlimit_nofile 8192;
-
-events {
-  worker_connections 4096;
-}
-http {
-    access_log /dev/stdout;
-    server {
-            listen 0.0.0.0:80 default_server;
-            server_name _;
-
-            location / {
-                    include rewrite_rules.conf;
-                    rewrite_log on;
-
-                    proxy_pass  http://hivemind-rtracker-postgrest:3000;  # my PostREST is  here!
-
-                    proxy_set_header Host $host;
-                    proxy_set_header X-Real-IP $remote_addr;
-                    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-                    #default_type;
-                    proxy_hide_header Content-Location;
-                    proxy_set_header  Connection "";
-                    proxy_http_version 1.1;
-            }
-    }
-}
diff --git a/hivemind_rtracker/rewrite_rules.conf b/hivemind_rtracker/rewrite_rules.conf
deleted file mode 100644
index d93aa9c6e3c2c8fa2d2bc425d95268bee600b571..0000000000000000000000000000000000000000
--- a/hivemind_rtracker/rewrite_rules.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-rewrite ^/reputation/([^/]+) /rpc/get_account_reputation?account-name=$1 break;
-# endpoint for get /reptracker/reputation/{account-name}
-
-rewrite ^/$ / break;
-# endpoint for openapi spec itself
-
-rewrite ^/(.*)$ /rpc/$1 break;
-# default endpoint for everything else
diff --git a/jussi.yaml b/jussi.yaml
index ff6f0bffdaabd68e1aaf58e8dd7c47b9cb7363e8..1ca19c7d4734c98341586879519bf71f88e83783 100644
--- a/jussi.yaml
+++ b/jussi.yaml
@@ -13,7 +13,7 @@ services:
     networks:
       - haf-network
   jussi:
-    image: ${JUSSI_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/jussi}:${JUSSI_VERSION:-latest}
+    image: ${JUSSI_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/jussi}:${JUSSI_VERSION:-latest}
     profiles:
       - servers
     environment:
@@ -46,13 +46,15 @@ services:
   # service above to 'jussi-real', then uncomment the next section. 
   # jussi:
   #   image: nginx
+  #   profiles:
+  #     - servers
   #   volumes:
   #     - type: bind
   #       source: ./jussi/nginx.conf
   #       target: /etc/nginx/nginx.conf
   #       read_only: true
   #     - type: bind
-  #       source: ${HAF_LOG_DIRECTORY}/jussi
+  #       source: ${HAF_LOG_DIRECTORY:-${TOP_LEVEL_DATASET_MOUNTPOINT}/logs}/jussi
   #       target: /var/log/jussi
   #   depends_on:
   #     jussi-real:
diff --git a/local_to_env.py b/local_to_env.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cf1072671430c381dea8b39a531d93da939bdae
--- /dev/null
+++ b/local_to_env.py
@@ -0,0 +1,79 @@
+import os
+import subprocess
+
+def get_git_revision(repo_path):
+    try:
+        repo_url = subprocess.check_output(
+            ["git", "-C", repo_path, "config", "--get", "remote.origin.url"],
+            stderr=subprocess.STDOUT
+        ).strip().decode('utf-8')
+
+        git_hash = subprocess.check_output(
+            ["git", "-C", repo_path, "rev-parse", "HEAD"],
+            stderr=subprocess.STDOUT
+        ).strip().decode('utf-8')[:8]  # Use only the first 8 characters of the hash
+
+        commit_date = subprocess.check_output(
+            ["git", "-C", repo_path, "show", "-s", "--format=%ci", git_hash],
+            stderr=subprocess.STDOUT
+        ).strip().decode('utf-8')
+
+        commit_description = subprocess.check_output(
+            ["git", "-C", repo_path, "show", "-s", "--format=%s", git_hash],
+            stderr=subprocess.STDOUT
+        ).strip().decode('utf-8')
+
+        return repo_url, git_hash, commit_date, commit_description
+    except subprocess.CalledProcessError:
+        return None, None, None, None
+
+def update_env_file(env_file_path, repo_name, git_hash, commit_date, commit_description):
+    updated = False
+    with open(env_file_path, "r") as file:
+        lines = file.readlines()
+
+    with open(env_file_path, "w") as file:
+        for line in lines:
+            if line.startswith(f"{repo_name.upper()}_VERSION="):
+                file.write(f"{repo_name.upper()}_VERSION={git_hash}  # {commit_date} - {commit_description}\n")
+                updated = True
+                print(f"Updated {repo_name.upper()}_VERSION to {git_hash} in .env file")
+            else:
+                file.write(line)
+
+    if not updated:
+        with open(env_file_path, "a") as file:
+            file.write(f"{repo_name.upper()}_VERSION={git_hash}  # {commit_date} - {commit_description}\n")
+            print(f"Added {repo_name.upper()}_VERSION={git_hash} to .env file")
+
+def main(directory):
+    env_file_path = None
+    for item in os.listdir(directory):
+        item_path = os.path.join(directory, item)
+        if os.path.isdir(item_path) and item == "haf_api_node":
+            env_file_path = os.path.join(item_path, ".env")
+            break
+
+    if not env_file_path or not os.path.exists(env_file_path):
+        print("Error: .env file not found in haf_api_node repo.")
+        return
+
+    with open("repo_versions.txt", "w") as output_file:
+        for item in os.listdir(directory):
+            item_path = os.path.join(directory, item)
+            if os.path.isdir(item_path) and ".git" in os.listdir(item_path):
+                repo_url, git_hash, commit_date, commit_description = get_git_revision(item_path)
+                if repo_url and git_hash and commit_date and commit_description:
+                    output_file.write(f"{repo_url}, {git_hash}, {commit_date}, {commit_description}\n")
+                    print(f"Found repo: {repo_url} at {item_path}")
+                    repo_name = os.path.basename(item_path)
+                    if repo_name != "hive":
+                        update_env_file(env_file_path, repo_name, git_hash, commit_date, commit_description)
+
+if __name__ == "__main__":
+    import sys
+    if len(sys.argv) != 2:
+        print("Usage: python local_to_env.py <directory>")
+        print("This script updates the .env file in the haf_api_node repository with the short git hashes of other repositories in the specified directory.")
+    else:
+        main(sys.argv[1])
diff --git a/log_rotation/README.md b/log_rotation/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b0cf579f60b6acf176811eda0f67eee0af6d5e22
--- /dev/null
+++ b/log_rotation/README.md
@@ -0,0 +1,23 @@
+This _log_rotation_ configuration includes directives that cause docker to
+limit how much data is kept when logging the container's stdout/stderr.
+
+In the default configuration, without these files, docker will log using the
+system's default logging configuration.  By default, this uses the json-file
+logging driver, which writes all output to a text file in _JSON Lines_ format.
+By default, logs will be kept forever.  This puts you at risk of running out
+of disk space eventually, though if you have large disks, low API traffic,
+or you regularly restart your containers, this may never be an issue for you.
+
+Including this config:
+ - switches the logging driver to the more efficient _local_, and
+ - sets finite limits on how much space the log files can take
+
+At the moment, these limits are high, but should allow a public API node to
+keep at least one day's worth of logs for the most verbose containers.
+
+To use this config, add a line to your .env file telling docker to merge this 
+file in:
+
+```
+  COMPOSE_FILE=compose.yml:log_rotation/compose.log_rotation.yml
+```
diff --git a/log_rotation/backend.log_rotation.yaml b/log_rotation/backend.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a094fa54a9fc05cfd6353deabc7167ff707027f9
--- /dev/null
+++ b/log_rotation/backend.log_rotation.yaml
@@ -0,0 +1,7 @@
+services:
+  pghero:
+    logging:
+      driver: local
+  pgadmin:
+    logging:
+      driver: local
diff --git a/log_rotation/balance_tracker.log_rotation.yaml b/log_rotation/balance_tracker.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6acdddb507fa0150a62bee23aa4126d5c3faaa81
--- /dev/null
+++ b/log_rotation/balance_tracker.log_rotation.yaml
@@ -0,0 +1,16 @@
+services:
+  balance-tracker-install:
+    logging:
+      driver: local
+  balance-tracker-uninstall:
+    logging:
+      driver: local
+  balance-tracker-block-processing:
+    logging:
+      driver: local
+  balance-tracker-postgrest:
+    logging:
+      driver: local
+  balance-tracker-postgrest-rewriter:
+    logging:
+      driver: local
diff --git a/log_rotation/caddy.log_rotation.yaml b/log_rotation/caddy.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8998b07b075f9644a4f1f5cabdc2837007c79451
--- /dev/null
+++ b/log_rotation/caddy.log_rotation.yaml
@@ -0,0 +1,10 @@
+services:
+  caddy:
+    logging:
+      driver: local
+      options:
+        max-size: 100m
+        max-file: 10
+  version-display:
+    logging:
+      driver: local
diff --git a/log_rotation/compose.log_rotation.yml b/log_rotation/compose.log_rotation.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0f5628d218f400a8fd3b19cb9ee4713fac2d9689
--- /dev/null
+++ b/log_rotation/compose.log_rotation.yml
@@ -0,0 +1,14 @@
+include:
+  - log_rotation/backend.log_rotation.yaml
+  - log_rotation/balance_tracker.log_rotation.yaml
+  - log_rotation/caddy.log_rotation.yaml
+  - log_rotation/${JSONRPC_API_SERVER_NAME:-drone}.log_rotation.yaml
+  - log_rotation/haf_base.log_rotation.yaml
+  - log_rotation/haf_block_explorer.log_rotation.yaml
+  - log_rotation/hafah.log_rotation.yaml
+  - log_rotation/haproxy.log_rotation.yaml
+  - log_rotation/hivemind.log_rotation.yaml
+  - log_rotation/varnish.log_rotation.yaml
+  - log_rotation/monitoring.log_rotation.yaml
+  - log_rotation/reputation_tracker.log_rotation.yaml
+  - log_rotation/swagger.log_rotation.yaml
diff --git a/log_rotation/drone.log_rotation.yaml b/log_rotation/drone.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..95df8b0893ea00ddfacdb2ec56cef5b643878a8e
--- /dev/null
+++ b/log_rotation/drone.log_rotation.yaml
@@ -0,0 +1,7 @@
+services:
+  drone:
+    logging:
+      driver: local
+      options:
+        max-size: 1000m
+        max-file: 50
diff --git a/log_rotation/haf_base.log_rotation.yaml b/log_rotation/haf_base.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b7b9d808fd6ccb2234e44688fe9780e4db5f2b0f
--- /dev/null
+++ b/log_rotation/haf_base.log_rotation.yaml
@@ -0,0 +1,16 @@
+services:
+  haf:
+    logging:
+      driver: local
+      options:
+        max-size: 100m
+        max-file: 10
+  logrotate:
+    logging:
+      driver: local
+  pgbadger:
+    logging:
+      driver: local
+  ofelia:
+    logging:
+      driver: local
diff --git a/log_rotation/haf_block_explorer.log_rotation.yaml b/log_rotation/haf_block_explorer.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..df3a05ff1804660924c7c6076368c89b5394db1a
--- /dev/null
+++ b/log_rotation/haf_block_explorer.log_rotation.yaml
@@ -0,0 +1,28 @@
+services:
+  block-explorer-block-processing:
+    logging:
+      driver: local
+      options:
+        max-size: 100m
+        max-file: 10
+  block-explorer-postgrest:
+    logging:
+      driver: local
+      options:
+        max-size: 100m
+        max-file: 10
+  block-explorer-uninstall:
+    logging:
+      driver: local
+  block-explorer-uninstall-balance-tracker:
+    logging:
+      driver: local
+  block-explorer-postgrest-rewriter:
+    logging:
+      driver: local
+  block-explorer-install-schema:
+    logging:
+      driver: local
+  block-explorer-install-balance-tracker:
+    logging:
+      driver: local
diff --git a/log_rotation/hafah.log_rotation.yaml b/log_rotation/hafah.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8e60b7da48e72b7fed4e8b69d6715ac0469cf4d
--- /dev/null
+++ b/log_rotation/hafah.log_rotation.yaml
@@ -0,0 +1,19 @@
+services:
+  hafah-postgrest:
+    logging:
+      driver: local
+      options:
+        max-size: 100m
+        max-file: 10
+  hafah-benchmarks-old-style:
+    logging:
+      driver: local
+  hafah-install:
+    logging:
+      driver: local
+  hafah-uninstall:
+    logging:
+      driver: local
+  hafah-postgrest-rewriter:
+    logging:
+      driver: local
diff --git a/log_rotation/haproxy.log_rotation.yaml b/log_rotation/haproxy.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..509e3ee9514279c9b5e2993ff2d7b2f32ac322b2
--- /dev/null
+++ b/log_rotation/haproxy.log_rotation.yaml
@@ -0,0 +1,7 @@
+services:
+  haproxy:
+    logging:
+      driver: local
+  haproxy-healthchecks:
+    logging:
+      driver: local
diff --git a/log_rotation/hivemind.log_rotation.yaml b/log_rotation/hivemind.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9fc78cd9cd03435205d61049a02583a0b6e31195
--- /dev/null
+++ b/log_rotation/hivemind.log_rotation.yaml
@@ -0,0 +1,25 @@
+services:
+  hivemind-block-processing:
+    logging:
+      driver: local
+      options:
+        max-size: 100m
+        max-file: 10
+  hivemind-server:
+    logging:
+      driver: local
+      options:
+        max-size: 100m
+        max-file: 10
+  hivemind-install:
+    logging:
+      driver: local
+  hivemind-uninstall:
+    logging:
+      driver: local
+  hivemind-postgrest-server:
+    logging:
+      driver: local
+  hivemind-postgrest-rewriter:
+    logging:
+      driver: local
diff --git a/log_rotation/jussi.log_rotation.yaml b/log_rotation/jussi.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fdebbbed9086140cad45fcd57fa9789a59103846
--- /dev/null
+++ b/log_rotation/jussi.log_rotation.yaml
@@ -0,0 +1,10 @@
+services:
+  jussi:
+    logging:
+      driver: local
+      options:
+        max-size: 1000m
+        max-file: 5
+  redis:
+    logging:
+      driver: local
diff --git a/log_rotation/monitoring.log_rotation.yaml b/log_rotation/monitoring.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2f80dca0f4dfb45b0f774b37c8175be9c3ad23c3
--- /dev/null
+++ b/log_rotation/monitoring.log_rotation.yaml
@@ -0,0 +1,28 @@
+services:
+  prometheus:
+    logging:
+      driver: local
+  grafana:
+    logging:
+      driver: local
+  cadvisor:
+    logging:
+      driver: local
+  blackboxexporter:
+    logging:
+      driver: local
+  postgresexporter:
+    logging:
+      driver: local
+  promtail:
+    logging:
+      driver: local
+  hived-pme:
+    logging:
+      driver: local
+  nodeexporter:
+    logging:
+      driver: local
+  loki:
+    logging:
+      driver: local
diff --git a/log_rotation/reputation_tracker.log_rotation.yaml b/log_rotation/reputation_tracker.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e48a86aeedceef3d6cde7e8163f43e19ba6596bf
--- /dev/null
+++ b/log_rotation/reputation_tracker.log_rotation.yaml
@@ -0,0 +1,16 @@
+services:
+  reputation-tracker-uninstall:
+    logging:
+      driver: local
+  reputation-tracker-block-processing:
+    logging:
+      driver: local
+  reputation-tracker-postgrest:
+    logging:
+      driver: local
+  reputation-tracker-postgrest-rewriter:
+    logging:
+      driver: local
+  reputation-tracker-install:
+    logging:
+      driver: local
diff --git a/log_rotation/swagger.log_rotation.yaml b/log_rotation/swagger.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..87678d80e91cb840c7d7597858cf4221d421bb15
--- /dev/null
+++ b/log_rotation/swagger.log_rotation.yaml
@@ -0,0 +1,4 @@
+services:
+  swagger:
+    logging:
+      driver: local
diff --git a/log_rotation/sync_log_rotation.py b/log_rotation/sync_log_rotation.py
new file mode 100755
index 0000000000000000000000000000000000000000..7b20d792825272ae0bdbc1acc19c61819f6b0839
--- /dev/null
+++ b/log_rotation/sync_log_rotation.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+
+# This helper script can be run after editing the main .yaml files in the top-level
+# haf_api_node directory.  It will detect when new services have been added or removed,
+# and it will update the corresponding yaml files in this directory by adding or removing
+# services.  Newly-detected services will be added with a default config, edit by hand
+# if you want to add rotation.
+#
+# You'll need to edit compose.yml yourself if you've added/removed new yaml files
+#
+# Run this script from the top-level haf_api_node directory (..)
+
+import os
+import yaml
+import sys
+
+def load_yaml(file_path):
+    """Load YAML content from a file."""
+    if not os.path.exists(file_path):
+        return {}
+    with open(file_path, 'r') as f:
+        try:
+            return yaml.safe_load(f) or {}
+        except yaml.YAMLError as exc:
+            print(f"Error parsing YAML file {file_path}: {exc}")
+            sys.exit(1)
+
+def save_yaml(data, file_path):
+    """Save YAML content to a file."""
+    with open(file_path, 'w') as f:
+        yaml.dump(data, f, default_flow_style=False, sort_keys=False)
+
+def ensure_log_rotation_directory(log_rotation_dir):
+    """Ensure the log_rotation directory exists."""
+    if not os.path.exists(log_rotation_dir):
+        os.makedirs(log_rotation_dir)
+        print(f"Created directory: {log_rotation_dir}")
+
+def get_top_level_yaml_files(top_level_dir, exclude_files=None):
+    """Get all .yaml files in the top-level directory, excluding specified files."""
+    if exclude_files is None:
+        exclude_files = []
+    yaml_files = []
+    for file in os.listdir(top_level_dir):
+        if (
+            file.endswith('.yaml') or file.endswith('.yml')
+        ) and os.path.isfile(os.path.join(top_level_dir, file)) and file not in exclude_files:
+            yaml_files.append(file)
+    return yaml_files
+
+def sync_log_rotation(top_level_dir, log_rotation_dir, exclude_files=None):
+    """Synchronize log rotation configurations with top-level Docker Compose files."""
+    if exclude_files is None:
+        exclude_files = []
+    ensure_log_rotation_directory(log_rotation_dir)
+    yaml_files = get_top_level_yaml_files(top_level_dir, exclude_files)
+
+    for yaml_file in yaml_files:
+        top_yaml_path = os.path.join(top_level_dir, yaml_file)
+        top_yaml = load_yaml(top_yaml_path)
+        services = top_yaml.get('services', {}).keys()
+
+        log_rotation_file = os.path.splitext(yaml_file)[0] + '.log_rotation.yaml'
+        log_rotation_path = os.path.join(log_rotation_dir, log_rotation_file)
+
+        log_yaml = load_yaml(log_rotation_path)
+        log_services = log_yaml.get('services', {})
+
+        # Remove services that no longer exist
+        removed_services = set(log_services.keys()) - set(services)
+        if removed_services:
+            for svc in removed_services:
+                del log_services[svc]
+            print(f"Removed services from {log_rotation_file}: {', '.join(removed_services)}")
+
+        # Add new services with default logging config
+        added_services = set(services) - set(log_services.keys())
+        if added_services:
+            for svc in added_services:
+                log_services[svc] = {
+                    'logging': {
+                        'driver': 'local'
+                    }
+                }
+            print(f"Added services to {log_rotation_file}: {', '.join(added_services)}")
+
+        # Update the log_yaml structure
+        log_yaml['services'] = log_services
+
+        # Save the updated log rotation file
+        save_yaml(log_yaml, log_rotation_path)
+        print(f"Updated log rotation file: {log_rotation_file}")
+
+def main():
+    """Main function to execute the synchronization."""
+    top_level_dir = os.getcwd()  # Current working directory
+    log_rotation_dir = os.path.join(top_level_dir, 'log_rotation')
+    exclude_files = ['compose.yml', 'compose.yaml', '.gitlab-ci.yaml', '.gitlab-ci.yml']  # Add any other files to exclude if necessary
+
+    sync_log_rotation(top_level_dir, log_rotation_dir, exclude_files)
+
+if __name__ == "__main__":
+    main()
diff --git a/log_rotation/varnish.log_rotation.yaml b/log_rotation/varnish.log_rotation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aefdd9c738d2eb0968e0ac214e89b8486cee6fe7
--- /dev/null
+++ b/log_rotation/varnish.log_rotation.yaml
@@ -0,0 +1,7 @@
+services:
+  varnish:
+    logging:
+      driver: local
+      options:
+        max-size: 100m
+        max-file: 10
diff --git a/monitoring.yaml b/monitoring.yaml
index c92312e72b32229f329522575e2ccb30f072c7a9..d12b80a8dc71c0ad531a6d8c7a5e78a659722b08 100644
--- a/monitoring.yaml
+++ b/monitoring.yaml
@@ -1,6 +1,6 @@
 services:
   prometheus:
-    image: prom/prometheus:${PROMETHEUS_VERSION}
+    image: prom/prometheus:${PROMETHEUS_VERSION:-v2.49.1}
     profiles:
       - monitoring
     volumes:
@@ -22,7 +22,7 @@ services:
       org.label-schema.group: "monitoring"
 
   nodeexporter:
-    image: prom/node-exporter:${NODE_EXPORTER_VERSION}
+    image: prom/node-exporter:${NODE_EXPORTER_VERSION:-v1.7.0}
     profiles:
       - monitoring
     volumes:
@@ -43,7 +43,7 @@ services:
       org.label-schema.group: "monitoring"
 
   cadvisor:
-    image: gcr.io/cadvisor/cadvisor:${CADVISOR_VERSION}
+    image: gcr.io/cadvisor/cadvisor:${CADVISOR_VERSION:-v0.47.2}
     profiles:
       - monitoring
     privileged: true
@@ -64,7 +64,7 @@ services:
       org.label-schema.group: "monitoring"
 
   grafana:
-    image: grafana/grafana:${GRAFANA_VERSION}
+    image: grafana/grafana:${GRAFANA_VERSION:-10.3.3}
     profiles:
       - monitoring
     depends_on:
@@ -82,14 +82,14 @@ services:
       - GF_USERS_ALLOW_SIGN_UP=false
     restart: unless-stopped
     ports:
-      - "3000:3000"
+      - "3003:3000"
     networks:
       - haf-network
     labels:
       org.label-schema.group: "monitoring"
 
   loki:
-    image: grafana/loki:${LOKI_VERSION}
+    image: grafana/loki:${LOKI_VERSION:-2.9.4}
     profiles:
       - monitoring
     volumes:
@@ -105,7 +105,7 @@ services:
       org.label-schema.group: "monitoring"
 
   promtail:
-    image: grafana/promtail:${PROMTAIL_VERSION}
+    image: grafana/promtail:${PROMTAIL_VERSION:-2.9.4}
     profiles:
       - monitoring
     depends_on:
@@ -121,7 +121,7 @@ services:
       org.label-schema.group: "monitoring"
 
   hived-pme:
-    image: registry.gitlab.syncad.com/hive/hived-pme:${HIVED_PME_VERSION}
+    image: registry.gitlab.syncad.com/hive/hived-pme:${HIVED_PME_VERSION:-49a7312d}
     profiles:
       - monitoring
     restart: unless-stopped
@@ -142,10 +142,12 @@ services:
       org.label-schema.group: "monitoring"
 
   postgresexporter:
-    image: quay.io/prometheuscommunity/postgres-exporter
+    image: quay.io/prometheuscommunity/postgres-exporter:${POSTGRESEXPORTER_VERSION:-v0.16.0}
     profiles:
       - monitoring
     restart: unless-stopped
+    command:
+      - --no-collector.stat_bgwriter
     expose:
       - 9187
     depends_on:
@@ -154,12 +156,12 @@ services:
     networks:
       - haf-network
     environment:
-      - DATA_SOURCE_NAME=${DATA_SOURCE}
+      - DATA_SOURCE_NAME=${DATA_SOURCE:-postgresql://postgres@haf:5432/postgres?sslmode=disable}
     labels:
       org.label-schema.group: "monitoring"
 
   blackboxexporter:
-    image: prom/blackbox-exporter:${BLACKBOX_VERSION}
+    image: prom/blackbox-exporter:${BLACKBOX_VERSION:-v0.24.0}
     profiles:
       - monitoring
     restart: unless-stopped
diff --git a/pgtune.conf b/pgtune.conf
index aec54b708f93c2a5954acf6bd4e06bc31c44f2fb..b80b4fce2a16326060407f1f63230539d5687ddd 100644
--- a/pgtune.conf
+++ b/pgtune.conf
@@ -1,5 +1,5 @@
 shared_buffers = 16GB
-effective_cache_size = 8GB
+effective_cache_size = 48GB
 maintenance_work_mem = 4GB
 work_mem = 1024MB
 
diff --git a/postgrest/Dockerfile b/postgrest/Dockerfile
index c74b1a76842b95245c39de9dd7eba7ed50ceb76a..0b2f087f113fb824097fa8ad4f297f25ae338db3 100644
--- a/postgrest/Dockerfile
+++ b/postgrest/Dockerfile
@@ -1,4 +1,4 @@
-ARG POSTGREST_VERSION=v12.0.2
+ARG POSTGREST_VERSION=v12.2.3
 FROM postgrest/postgrest:${POSTGREST_VERSION} AS postgrest-base
 FROM alpine:latest AS base
 COPY --from=postgrest-base /bin/postgrest /bin
diff --git a/reputation_tracker.yaml b/reputation_tracker.yaml
index fd368b9d7dfacd0401dbdffb2fac609a26d3319d..bab526ba85ff027f1a8596036245e49e6f5c0e80 100644
--- a/reputation_tracker.yaml
+++ b/reputation_tracker.yaml
@@ -1,23 +1,23 @@
 services:
   reputation-tracker-install:
-    image: ${REPUTATION_TRACKER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/reputation_tracker}:${REPUTATION_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${REPUTATION_TRACKER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/reputation_tracker}:${REPUTATION_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - apps
       - hafbe
-      - reputation-tracker-setup
+      - hivemind
+      - reputation-tracker
     environment:
       POSTGRES_HOST: haf
     networks:
       haf-network:
     command:
       - install_app
-      - --schema=${REPTRACKER_SCHEMA:-hafbe_rep}
       - --swagger-url=${PUBLIC_HOSTNAME}
     depends_on:
-      semaphore:
-        condition: service_started
+      haf:
+        condition: service_healthy
   reputation-tracker-uninstall:
-    image: ${REPUTATION_TRACKER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/reputation_tracker}:${REPUTATION_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${REPUTATION_TRACKER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/reputation_tracker}:${REPUTATION_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - reputation-tracker-uninstall
     environment:
@@ -26,21 +26,23 @@ services:
       haf-network:
     command:
       - uninstall_app
-      - --schema=${REPTRACKER_SCHEMA:-hafbe_rep}
     depends_on:
       haf:
         condition: service_healthy
   reputation-tracker-block-processing:
-    image: ${REPUTATION_TRACKER_REGISTRY:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/reputation_tracker}:${REPUTATION_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
+    image: ${REPUTATION_TRACKER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/reputation_tracker}:${REPUTATION_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
-      - reputation-tracker-standalone
+      - apps
+      - hafbe
+      - hivemind
+      - reputation-tracker
     environment:
       POSTGRES_HOST: haf
+      POSTGRES_USER: reptracker_owner
     networks:
       haf-network:
     command:
       - process_blocks
-      - --schema=${REPTRACKER_SCHEMA:-hafbe_rep}
     healthcheck:
       test: ["CMD-SHELL","/app/block-processing-healthcheck.sh"]
       interval: 60s
@@ -53,20 +55,21 @@ services:
       haf:
         condition: service_healthy
   reputation-tracker-postgrest:
-    image: ${POSTGREST_REGISTRY:-registry.gitlab.syncad.com/hive/haf_api_node/postgrest}:${POSTGREST_VERSION:-latest}
+    image: ${POSTGREST_IMAGE:-registry.gitlab.syncad.com/hive/haf_api_node/postgrest}:${POSTGREST_VERSION:-latest}
     profiles:
       - apps
-      - reputation-tracker-standalone
+      - hivemind
+      - reputation-tracker
       - hafbe
     environment:
       PGRST_ADMIN_SERVER_PORT: 3001
-      PGRST_DB_URI: postgresql://reptracker_user@haf/haf_block_log
+      PGRST_DB_URI: postgresql://reptracker_user@haf/haf_block_log?application_name=reputation_tracker_postgrest
       PGRST_DB_SCHEMA: reptracker_endpoints
       PGRST_DB_ANON_ROLE: reptracker_user
       PGRST_DB_POOL: 20
       PGRST_DB_ROOT_SPEC: root
       PGRST_DB_POOL_ACQUISITION_TIMEOUT: 10
-      PGRST_DB_EXTRA_SEARCH_PATH: ${REPTRACKER_SCHEMA:-hafbe_rep}
+      PGRST_DB_EXTRA_SEARCH_PATH: reptracker_app
       # PGRST_OPENAPI_SERVER_PROXY_URI: http://${PUBLIC_HOSTNAME}/reptracker_user/
     healthcheck:
       test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:3001/ready || exit 1"]
@@ -82,20 +85,18 @@ services:
       haf:
         condition: service_healthy
   reputation-tracker-postgrest-rewriter:
-    image: nginx
+    image: ${REPUTATION_TRACKER_REWRITER_IMAGE:-${HIVE_API_NODE_REGISTRY:-registry.hive.blog}/reputation_tracker/postgrest-rewriter}:${REPUTATION_TRACKER_VERSION:-${HIVE_API_NODE_VERSION}}
     profiles:
       - apps
-      - reputation-tracker-standalone
+      - reputation-tracker
       - hafbe
-    volumes:
-      - type: bind
-        source: reputation_tracker/reputation_tracker_nginx.conf
-        target: /etc/nginx/nginx.conf
-        read_only: true
-      - type: bind
-        source: reputation_tracker/rewrite_rules.conf
-        target: /etc/nginx/rewrite_rules.conf
-        read_only: true
+      - hivemind
+    healthcheck:
+      test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:81/health || exit 1"]
+      interval: 10s
+      timeout: 3s
+      retries: 10
+      start_period: 1m
     depends_on:
       reputation-tracker-postgrest:
         condition: service_healthy
diff --git a/reputation_tracker/reputation_tracker_nginx.conf b/reputation_tracker/reputation_tracker_nginx.conf
deleted file mode 100644
index 5001753d27d8f7ca48968b9888c032ed0032f2e5..0000000000000000000000000000000000000000
--- a/reputation_tracker/reputation_tracker_nginx.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Homepage and endpoints of the API "HAF Block Explorer".
-#
-worker_processes  5;
-error_log /dev/stdout info;
-worker_rlimit_nofile 8192;
-
-events {
-  worker_connections 4096;
-}
-http {
-    access_log /dev/stdout;
-    server {
-            listen 0.0.0.0:80 default_server;
-            server_name _;
-
-            location / {
-                    include rewrite_rules.conf;
-                    rewrite_log on;
-
-                    proxy_pass  http://reputation-tracker-postgrest:3000;  # my PostREST is  here!
-
-                    proxy_set_header Host $host;
-                    proxy_set_header X-Real-IP $remote_addr;
-                    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-                    #default_type;
-                    proxy_hide_header Content-Location;
-                    proxy_set_header  Connection "";
-                    proxy_http_version 1.1;
-            }
-    }
-}
diff --git a/reputation_tracker/rewrite_rules.conf b/reputation_tracker/rewrite_rules.conf
deleted file mode 100644
index d93aa9c6e3c2c8fa2d2bc425d95268bee600b571..0000000000000000000000000000000000000000
--- a/reputation_tracker/rewrite_rules.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-rewrite ^/reputation/([^/]+) /rpc/get_account_reputation?account-name=$1 break;
-# endpoint for get /reptracker/reputation/{account-name}
-
-rewrite ^/$ / break;
-# endpoint for openapi spec itself
-
-rewrite ^/(.*)$ /rpc/$1 break;
-# default endpoint for everything else
diff --git a/snapshot_zfs_datasets.sh b/snapshot_zfs_datasets.sh
index 62dc339dad94455bb01981f6f134fcfc5a3bc4fc..1e0791c023fd23df2e6fe7a562a9fc4f387c51aa 100755
--- a/snapshot_zfs_datasets.sh
+++ b/snapshot_zfs_datasets.sh
@@ -13,9 +13,10 @@ print_help() {
   echo "                            dataset, then swap back afterwards.  That way the large logs files aren't"
   echo "                            of the snapshots.  This is a lot faster, but makes managing datasets more"
   echo "                            complicated, so only use it if you really need to"
+  echo "  --force, -f               continue without prompting, even if warnings are detected"
 }
 
-OPTIONS=$(getopt -o he:pt:l: --long env-file:,help,zpool:,top-level-dataset:,public-snapshot,temp-dir:,swap-logs-with-dataset: -n "$0" -- "$@")
+OPTIONS=$(getopt -o he:pt:l:f --long env-file:,help,zpool:,top-level-dataset:,public-snapshot,temp-dir:,swap-logs-with-dataset:,force -n "$0" -- "$@")
 
 if [ $? -ne 0 ]; then
     print_help
@@ -29,6 +30,7 @@ TOP_LEVEL_DATASET_MOUNTPOINT=""
 PUBLIC_SNAPSHOT=0
 SWAP_LOGS_DATASET=""
 TMPDIR=/tmp
+FORCE=0
 
 eval set -- "$OPTIONS"
 
@@ -58,6 +60,10 @@ while true; do
       SWAP_LOGS_DATASET="$2"
       shift 2
       ;;
+    --force|-f)
+      FORCE=1
+      shift 2
+      ;;
     --help|-h)
       print_help
       exit 0
@@ -134,6 +140,37 @@ fi
 
 echo "All datasets appear unmountable"
 
+if [ "$SNAPSHOT_NAME" != "empty" ]; then
+  if [ ! -e "${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory/shared_memory.bin" ]; then
+    echo "Warning: No shared memory file found in the shared_memory directory"
+    exit 1
+  fi
+
+  last_shared_memory_write=$(stat -c %Y "${TOP_LEVEL_DATASET_MOUNTPOINT}/shared_memory/shared_memory.bin")
+  last_blockchain_write=$(find "${TOP_LEVEL_DATASET_MOUNTPOINT}/blockchain" -type f -printf '%T@\n' | sort -n | tail -1 | cut -d. -f1)
+
+  if [ -z "$last_blockchain_write" ]; then
+    echo "Warning: No files found in the blockchain directory"
+    exit 1
+  fi
+
+  time_diff=$((last_blockchain_write - last_shared_memory_write))
+
+  if [ $time_diff -gt 300 ] || [ $time_diff -lt -300 ]; then
+    echo "Warning: The shared_memory.bin file was not written to within 5 minutes of the last write to a file in the blockchain directory."
+    if [ "$FORCE" -eq 1 ]; then
+      echo "Continuing due to --force option."
+    else
+      read -p "Do you want to continue? (y/n): " choice
+      case "$choice" in
+        y|Y ) echo "Continuing...";;
+        n|N ) echo "Aborting."; exit 1;;
+        * ) echo "Invalid input. Aborting."; exit 1;;
+      esac
+    fi
+  fi
+fi
+
 if [ $PUBLIC_SNAPSHOT -eq 1 ]; then
   stdbuf -o0 echo ""
   stdbuf -o0 echo "Moving log files out of the dataset because this is a public snapshot... "
diff --git a/swagger.yaml b/swagger.yaml
index 215d0b07e32481754fd7e08904e65df2b496f88b..f679823e6aa88197b0842cee0dce2470273a678a 100644
--- a/swagger.yaml
+++ b/swagger.yaml
@@ -3,6 +3,7 @@ services:
     image: swaggerapi/swagger-ui
     profiles:
       - hafbe
+      - apps
     environment:
       URLS: >-
         [{url: "https://${PUBLIC_HOSTNAME}/hafbe-api/", name: "HAF Block Explorer"},
diff --git a/varnish.yaml b/varnish.yaml
index 2ce101e693ecd80ab706b1447c12c243fffba088..348f4ab96ccfd168655d67a9b34847aef7a34cf5 100644
--- a/varnish.yaml
+++ b/varnish.yaml
@@ -1,6 +1,6 @@
 services:
   varnish:
-    image: varnish:7.3.0-alpine
+    image: varnish:7.6.1-alpine
     profiles:
       - servers
     volumes: