From 7656558da50beb2c514515f048e79b37188ea11f Mon Sep 17 00:00:00 2001
From: Konrad Botor <kbotor@syncad.com>
Date: Tue, 18 Jun 2024 12:25:38 +0200
Subject: [PATCH] Added Dockerisation, CI job templates and CI jobs

---
 .dockerignore                              |   6 +
 .env.example                               |  12 +-
 .gitattributes                             |   9 +
 .gitignore                                 |   2 +
 .gitlab-ci.yml                             | 154 ++++++++++++
 README.md                                  | 115 +++++----
 balance_tracker.yaml                       |   1 +
 ci/compose.yml                             |  95 +++++++
 ci/compose/Dockerfile                      |  54 ++++
 ci/compose/entrypoint.sh                   |  31 +++
 ci/config.ini                              | 275 +++++++++++++++++++++
 ci/dind/Dockerfile                         |  41 +++
 ci/dind/entrypoint.sh                      |  13 +
 ci/node-replay.gitlab-ci.yml               |  89 +++++++
 ci/scripts/after-script.sh                 |  27 ++
 ci/scripts/copy-datadir.sh                 |   6 +
 ci/scripts/copy-haf-config.sh              |  12 +
 ci/scripts/prepare-stack-data-directory.sh |  15 ++
 ci/scripts/replay-api-node.sh              | 105 ++++++++
 ci/scripts/set-up-stack.sh                 |  24 ++
 ci/scripts/test-api-node.sh                |  91 +++++++
 docker-bake.hcl                            |  81 ++++++
 haf_base.yaml                              |   4 +-
 haf_block_explorer.yaml                    |   1 +
 hafah.yaml                                 |   1 +
 haproxy/haproxy-alternate.cfg              | 122 +++++++++
 hivemind.yaml                              |   4 +
 reputation_tracker.yaml                    |   1 +
 swagger.yaml                               |  13 +-
 29 files changed, 1348 insertions(+), 56 deletions(-)
 create mode 100644 .dockerignore
 create mode 100644 .gitattributes
 create mode 100644 ci/compose.yml
 create mode 100644 ci/compose/Dockerfile
 create mode 100755 ci/compose/entrypoint.sh
 create mode 100644 ci/config.ini
 create mode 100644 ci/dind/Dockerfile
 create mode 100755 ci/dind/entrypoint.sh
 create mode 100644 ci/node-replay.gitlab-ci.yml
 create mode 100755 ci/scripts/after-script.sh
 create mode 100755 ci/scripts/copy-datadir.sh
 create mode 100755 ci/scripts/copy-haf-config.sh
 create mode 100755 ci/scripts/prepare-stack-data-directory.sh
 create mode 100755 ci/scripts/replay-api-node.sh
 create mode 100755 ci/scripts/set-up-stack.sh
 create mode 100755 ci/scripts/test-api-node.sh
 create mode 100644 docker-bake.hcl
 create mode 100644 haproxy/haproxy-alternate.cfg

diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..d527dfb
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,6 @@
+.env
+.gitlab-ci.yml
+.git/
+ci/haf-datadir/
+docker-bake.hcl
+*/Dockerfile
\ No newline at end of file
diff --git a/.env.example b/.env.example
index 1920e3c..ab97191 100644
--- a/.env.example
+++ b/.env.example
@@ -102,6 +102,9 @@ NETWORK_NAME=haf
 # HIVEMIND_VERSION=${HIVE_API_NODE_VERSION}
 # HIVEMIND_REWRITER_IMAGE=${HIVE_API_NODE_REGISTRY}/hivemind/postgrest-rewriter
 
+# Additional Hivemind sync arguments
+HIVEMIND_SYNC_ARGS=
+
 # The default setup will run the recommended version of balance tracker,
 # you can run a custom version by un-commenting and modifying the
 # values below
@@ -184,6 +187,14 @@ NETWORK_NAME=haf
 # this can be a local domain name.
 PUBLIC_HOSTNAME="your.hostname.com"
 
+# PostgREST server log levels
+# https://docs.postgrest.org/en/latest/references/configuration.html#log-level
+BALANCE_TRACKER_SERVER_LOG_LEVEL=error
+BLOCK_EXPLORER_SERVER_LOG_LEVEL=error
+HAFAH_SERVER_LOG_LEVEL=error
+HIVEMIND_SERVER_LOG_LEVEL=error
+REPUTATION_TRACKER_SERVER_LOG_LEVEL=error
+
 # There are several ways you can configure serving HTTP/HTTPS.  Some examples:
 # - to serve API using HTTPS with automatic redirect from HTTP -> HTTPS (the default), 
 #   just give the hostname:
@@ -223,7 +234,6 @@ TLS_SELF_SIGNED_SNIPPET=caddy/self-signed.snippet
 LOCAL_ADMIN_ONLY_SNIPPET=caddy/local-admin-only.snippet
 # LOCAL_ADMIN_ONLY_SNIPPET=/dev/null
 
-
 # Caddy will only accept requests on the /admin/ endpoints over https by default.
 # This is so that you can password-protect them with HTTP basicauth.
 # However, if you've configured your server to only serve http, and something
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..b4c848b
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,9 @@
+*.sh text eol=lf
+caddy/Dockerfile text eol=lf
+ci/compose/Dockerfile text eol=lf
+ci/dind/Dockerfile text eol=lf
+haproxy/Dockerfile text eol=lf
+healthchecks/Dockerfile text eol=lf
+logrotate/Dockerfile text eol=lf
+postgrest/Dockerfile text eol=lf
+version-display/Dockerfile text eol=lf
diff --git a/.gitignore b/.gitignore
index 376f62f..38061ba 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,5 @@
+ci/.env
+ci/haf-datadir/
 .env
 repo_versions.txt
 *.sw?
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 6df8d2f..a190c9c 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,5 +1,103 @@
 stages:
+  - build
+  - replay
+  - test
   - publish
+  - cleanup
+
+variables:
+  # Variables required by Common CI jobs
+  CI_COMMON_JOB_VERSION: "1ce04340ebfe838fd7fa09aebdde3bd7e1218bce"
+  DOCKER_BUILDER_TAG: "$CI_COMMON_JOB_VERSION"
+  DOCKER_DIND_TAG: "$CI_COMMON_JOB_VERSION"
+  IMAGE_REMOVER_TAG: "$CI_COMMON_JOB_VERSION"
+
+  # Git configuration
+  GIT_STRATEGY: clone
+  GIT_SUBMODULE_STRATEGY: recursive
+
+  # Replay confioguration
+  BLOCK_LOG_SOURCE_DIR: "/blockchain/block_log_5m"
+  REPLAY_DIRECTORY_PREFIX: "/cache/replay_data_api_node"
+  REPLAY_DIRECTORY: "${REPLAY_DIRECTORY_PREFIX}_${CI_COMMIT_SHORT_SHA}_1.27.8"
+  REPLAY_PIPELINE_DIRECTORY: "${REPLAY_DIRECTORY_PREFIX}_${CI_PIPELINE_ID}"
+  DOCKER_TLS_CERTDIR: "${REPLAY_PIPELINE_DIRECTORY}_certs"
+
+  # Other settings
+  TEST_IMAGE_TAG: ":ubuntu22.04-12"
+
+include:
+  - template: Workflows/Branch-Pipelines.gitlab-ci.yml
+  - project: hive/common-ci-configuration
+    ref: 1ce04340ebfe838fd7fa09aebdde3bd7e1218bce
+    file:
+      - /templates/docker_image_jobs.gitlab-ci.yml
+      - /templates/cache_cleanup.gitlab-ci.yml
+  - local: ci/node-replay.gitlab-ci.yml
+
+######## Build ########
+
+docker-build:
+  extends: 
+    - .docker_image_builder_job_template
+  stage: build
+  variables:
+    TAG: "${CI_COMMIT_SHORT_SHA}"
+  before_script:
+    - !reference [.docker_image_builder_job_template, before_script]
+    - |
+      echo -e "\e[0Ksection_start:$(date +%s):login[collapsed=true]\r\e[0KLogging to Docker registry..."
+      docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+      echo -e "\e[0Ksection_end:$(date +%s):login\r\e[0K"
+  script:
+    - |
+      echo -e "\e[0Ksection_start:$(date +%s):build[collapsed=true]\r\e[0KBaking Docker images..."
+      docker buildx bake --file=docker-bake.hcl --provenance=false --progress="plain" "ci"
+      echo -e "\e[0Ksection_end:$(date +%s):build\r\e[0K"
+  tags:
+    - public-runner-docker
+    - hived-for-tests
+
+######## End build ########
+
+######## Replay ########
+
+haf-node-replay:
+  extends: 
+    - .haf-node-replay
+  stage: replay
+  needs:
+    - docker-build
+  tags:
+    - data-cache-storage
+
+######## End replay ########
+
+######## Test ########
+
+# Creates a copy of replay data to be used in the tests
+haf_api_node_replay_data_copy:
+  extends: 
+    - .haf_api_node_replay_data_copy
+  stage: test
+  needs:
+    - haf-node-replay
+  tags:
+    - data-cache-storage
+
+# Tests if HAF API node is working properly
+haf_api_node_test:
+  extends: 
+    - .haf_api_node_test
+  stage: test
+  needs:
+    - haf_api_node_replay_data_copy
+  tags:
+    - data-cache-storage
+
+######## End test ########
+
+######## Publish ########
 
 build_haproxy_healthchecks_docker_image:
   stage: publish
@@ -19,5 +117,61 @@ build_haproxy_healthchecks_docker_image:
   tags:
     - public-runner-docker
   rules:
+    - if: $CI_PIPELINE_TRIGGERED
+      when: never
+    - if: $CI_PIPELINE_SOURCE == "pipeline"
+      when: never
     - if: $CI_COMMIT_TAG && $CI_COMMIT_TAG =~ /^1\..+$/
       when: always
+
+######## End publish ########
+
+######## Cleanup ########
+
+# Deletes replay data used by the tests and created by haf_api_node_replay_data_copy
+cleanup_haf_api_node_pipeline_cache:
+  needs:
+    - haf_api_node_replay_data_copy
+    - haf_api_node_test
+  extends: 
+    - .cleanup_cache_manual_template
+  stage: cleanup
+  variables:
+    CLEANUP_PATH_PATTERN: "${REPLAY_PIPELINE_DIRECTORY}*"
+  rules:
+    - if: $CI_PIPELINE_TRIGGERED
+      when: never
+    - if: $CI_PIPELINE_SOURCE == "pipeline"
+      when: never
+    - when: always
+  tags:
+    - data-cache-storage
+
+# Deletes all HAF API node replay data
+cleanup_haf_api_node_cache_manual:
+  extends: 
+    - .cleanup_cache_manual_template
+  stage: cleanup
+  variables:
+    CLEANUP_PATH_PATTERN: "${REPLAY_DIRECTORY_PREFIX}*"
+  rules:
+    - if: $CI_PIPELINE_TRIGGERED
+      when: never
+    - if: $CI_PIPELINE_SOURCE == "pipeline"
+      when: never
+    - when: manual
+      allow_failure: true
+  tags:
+    - data-cache-storage
+
+# Deletes HAF API node replay data older than 7 days
+cleanup_old_haf_api_node_cache:
+  extends:
+    - .cleanup_old_cache_template
+  stage: cleanup
+  variables:
+    CLEANUP_PATH_PATTERN: "${REPLAY_DIRECTORY_PREFIX}*"
+  tags:
+    - data-cache-storage
+
+######## End cleanup ########
\ No newline at end of file
diff --git a/README.md b/README.md
index b8d9b93..1a5cb8b 100644
--- a/README.md
+++ b/README.md
@@ -1,43 +1,46 @@
 # Using docker compose to install and maintain a HAF server and HAF apps
 
-# System Requirements
+## System Requirements
 
 We assume the base system will be running at least Ubuntu 22.04 (jammy).  Everything will likely work with later versions of Ubuntu. IMPORTANT UPDATE: experiments have shown 20% better API performance when running U23.10, so this latter version is recommended over Ubuntu 22 as a hosting OS.
 
 For a mainnet API node, we recommend:
-- at least 32GB of memory.  If you have 64GB, it will improve the time it takes to sync from scratch, but 
-  it should make less of a difference if you're starting from a mostly-synced HAF node (i.e., 
+
+- at least 32GB of memory.  If you have 64GB, it will improve the time it takes to sync from scratch, but
+  it should make less of a difference if you're starting from a mostly-synced HAF node (i.e.,
   restoring a recent ZFS snapshot) (TODO: quantify this?)
-- 4TB of NVMe storage 
+- 4TB of NVMe storage
   - Hive block log & shared memory: 500GB
   - Base HAF database: 3.5T (before 2x lz4 compression)
   - Hivemind database: 0.65T (before 2x lz4 compression)
   - base HAF + Hivemind:  2.14T (compressed)
   - HAF Block Explorer: xxx
 
-# Install prerequisites
+## Install prerequisites
 
-## Install ZFS support
+### Install ZFS support
 
-We strongly recommend running your HAF instance on a ZFS filesystem, and this documentation assumes 
+We strongly recommend running your HAF instance on a ZFS filesystem, and this documentation assumes
 you will be running ZFS.  Its compression and snapshot features are particularly useful when running a HAF node.
 
-We intend to publish ZFS snapshots of fully-synced HAF nodes that can downloaded to get a HAF node 
+We intend to publish ZFS snapshots of fully-synced HAF nodes that can downloaded to get a HAF node
 up & running quickly, avoiding multi-day replay times.
 
-```
+```bash
 sudo apt install zfsutils-linux
 ```
 
-## Install Docker
+### Install Docker
+
 Install the latest docker.  If you're running Ubuntu 22.04, the version provided by the
 native docker.io package is too old to work with the compose scripts.  Install the latest
 version from docker.com, following the instructions here:
 
-  https://docs.docker.com/engine/install/ubuntu/
+  [https://docs.docker.com/engine/install/ubuntu/]([https://docs.docker.com/engine/install/ubuntu/)
 
 Which are:
-```
+
+```bash
 sudo apt-get update
 sudo apt-get install ca-certificates curl gnupg
 sudo install -m 0755 -d /etc/apt/keyrings
@@ -52,30 +55,33 @@ sudo apt-get update
 sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
 ```
 
-## Create a ZFS pool
+### Create a ZFS pool
 
 Create your ZFS pool if necessary.  HAF requires at least 4TB of space, and 2TB NVMe drives are
 readily available, so we typically construct a pool striping data across several 2TB drives.
-If you have three or four drives, you will get somewhat better read/write performance, and 
+If you have three or four drives, you will get somewhat better read/write performance, and
 the extra space can come in handy.
 
 To create a pool named "haf-pool" using the first two NVMe drives in your system,
 use a command like:
-```
+
+```bash
 sudo zpool create haf-pool /dev/nvme0n1 /dev/nvme1n1
 ```
-If you name your ZFS pool something else, configure the name in the environment file, 
+
+If you name your ZFS pool something else, configure the name in the environment file,
 as described in the next section.
 
-Note: By default, ZFS tries to detect your disk's actual sector size, but it often gets it wrong 
+Note: By default, ZFS tries to detect your disk's actual sector size, but it often gets it wrong
 for modern NVMe drives, which will degrade performance due to having to write the same sector multiple
-times.  If you don't know the actual sector size, we recommend forcing the sector size to 8k by 
+times.  If you don't know the actual sector size, we recommend forcing the sector size to 8k by
 specifying setting ashift=13 (e.g., `zfs create -o ashift=13 haf-pool /dev....`)
 
-## Configure your environment
+### Configure your environment
 
 Make a copy of the file `.env.example` and customize it for your system.  This file contains
 configurable paramters for things like
+
 - directories
 - versions of hived, HAF, and associated tools
 
@@ -84,34 +90,37 @@ keep multiple configurations, you can give your environment files different name
 `.env.dev` and `.env.prod`, then explicitly specify the filename when running `docker compose`:
 `docker compose --env-file=.env.dev ...`
 
-## Set up ZFS filesystems
+### Set up ZFS filesystems
 
 The HAF installation is spread across multiple ZFS datasets, which allows us to set different
 ZFS options for different portions of the data. We recommend that most nodes keep the default
 datasets in order to enable easy sharing of snapshots.
 
-### Initializing from scratch
+#### Initializing from scratch
 
 If you're starting from scratch, after you've created your zpool and configured its name in the .env file
 as described above, run:
-```
+
+```bash
 sudo ./create_zfs_datasets.sh
 ```
+
 to create and mount the datasets.
 
 By default, the dataset holding most of the database storage uses zfs compression. The dataset for
 the blockchain data directory (which holds the block_log for hived and the shared_memory.bin file)
-is not compressed because hived directly manages compression of the block_log file. 
+is not compressed because hived directly manages compression of the block_log file.
 
 If you have a LOT of nvme storage (e.g. 6TB+), you can get better API performance at the cost of disk
 storage by disabling ZFS compression on the database dataset, but for most nodes this isn't recommended.
 
-#### Speeding up the initial sync
+##### Speeding up the initial sync
 
 Following the instructions above will get you a working HAF node, but there are some things you can
 do to speed up the initial sync.
 
-##### Replaying
+###### Replaying
+
 If you already have a recent block_log file (e.g., you're already running another instance of hived
 somewhere else on your local network), you can copy the block_log and block_log.artifacts files
 from that node into your /haf-pool/haf-datadir/blockchain directory.  After copying the files,
@@ -120,26 +129,30 @@ so hived can read/write them: `chown 1000:100 block_log block_log.artifacts`
 
 Before brining up the haf service, you will also need to add the `--replay-blockchain` argument to
 hived to tell it you want to replay.  Edit the `.env` file's `ARGUMENTS` line like so:
-```
+
+```bash
 ARGUMENTS="--replay-blockchain"
 ```
+
 Once the replay has finished, you can revert the `ARGUMENTS` line to the empty string
 
-##### Shared Memory on Ramdisk
+###### Shared Memory on Ramdisk
+
 If you have enough spare memory on your system, you can speed up the initial replay by placing the
 `shared_memory.bin` file on a ramdisk.
 
-The current default shared memory filesize is 24G, so this will only work if you have 24G free 
-(that's in addition to the memory you expect to be used by hived and HAF's integrated PostgreSQL 
-instance). 
+The current default shared memory filesize is 24G, so this will only work if you have 24G free
+(that's in addition to the memory you expect to be used by hived and HAF's integrated PostgreSQL
+instance).
 
 If you have a 64GB system, ensure you have a big enough swapfile (32GB is recommended
 and 8GB is known to not be sufficient) to handle peak memory usage needs during the replay.
-Peak memory usage currently occurs when haf table indexes are being built during the final 
+Peak memory usage currently occurs when haf table indexes are being built during the final
 stage of replay.
 
 To do this, first create a ramdisk:
-```
+
+```bash
 sudo mkdir /mnt/haf_shared_mem
 
 # then
@@ -152,15 +165,17 @@ sudo chown 1000:100 /mnt/haf_shared_mem
 ```
 
 Then, edit your `.env` file to tell it where to put the shared memory file:
-```
+
+```bash
 HAF_SHM_DIRECTORY="/mnt/haf_shared_mem"
 ```
 
 Now, when you resync / replay, your shared memory file will actually be in memory.  
 
-###### Moving Shared Memory back to disk
-Once your replay is finished, we suggest moving the shared_memory.bin file back to NVMe storage, 
+####### Moving Shared Memory back to disk
+Once your replay is finished, we suggest moving the shared_memory.bin file back to NVMe storage,
 because:
+
 - it doesn't make much performance difference once hived is in sync
 - you'll be able to have your zfs snapshots include your shared memory file
 - you won't be forced to replay if the power goes out
@@ -173,35 +188,37 @@ To do this:
 - update the `.env` file's location: `HAF_SHM_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/blockchain"`
 - bring the stack back up (`docker compose up -d`)
 
-### Initializing from a snapshot
+#### Initializing from a snapshot
 
 If you're starting with one of our snapshots, the process of restoring the snapshots will create the correct
 datasets with the correct options set.
 
-First, download the snapshot file from: TODO: http://xxxxxx
+First, download the snapshot file from: TODO: [http://xxxxxx](http://xxxxxx)
 
 Since these snapshots are huge, it's best to download the snapshot file to a different disk (a magnetic
 HDD will be fine for this) that has enough free space for the snapshot first, then restore it to the ZFS pool.
 This lets you easily resume the download if your transfer is interrupted.  If you download directly to
 the ZFS pool, any interruption would require you to start the download from the beginning.
 
-```
+```bash
 wget -c https://whatever.net/snapshot_filename
 ```
+
 If the transfer gets interrupted, run the same command again to resume.
 
 Then, to restore the snapshot, run:
-```
+
+```bash
 sudo zfs recv -d -v haf-pool < snapshot_filename
 ```
 
-## Launch procedure
+### Launch procedure
 
 ---
 
 start/stop HAF instance based on profiles enabled in your `.env` file
 
-```SH
+```bash
 docker compose up -d
 
 docker compose logs -f hivemind-block-processing # tail the hivemind sync logs to the console
@@ -215,12 +232,13 @@ This will start or stop all services selected by the profiles you have
 enabled in the `.env` file's `COMPOSE_PROFILES` variable.
 
 Currently available profiles are:
+
 - `core`: the minimal HAF system of a database and hived
 - `admin`: useful tools for administrating HAF: pgadmin, pghero
 - `apps`: core HAF apps: hivemind, HAfAH, haf-block-explorer
 - `servers`: services for routing/caching API calls: haproxy, jussi,varnish
 
-# Observing node startup
+## Observing node startup
 
 After you start your HAF instance, hived will need some time to catch up to the head block
 of the Hive blockchain (typically a few minutes or less if you started from a snapshot,
@@ -231,7 +249,7 @@ If syncing or replaying for the first time, HAF will delay creating indexes on i
 
 If you enabled the "admin" profile, you can use pghero's "Live Queries" view to monitor this process (e.g https://your_server/admin/pghero/live_queries). If not, you can still observe the cpu and disk io usage by postgresql during this process if you run a tool like htop.
 
-# After startup: Monitoring services and troubleshooting failures on your API node
+## After startup: Monitoring services and troubleshooting failures on your API node
 
 Haproxy can be used to monitor the state of the various services on your HAF server:
 `https://your_server_name/admin/haproxy/`
@@ -245,23 +263,26 @@ You can diagnose API performance problems using pgAdmin and PgHero. pgAdmin is b
 
 https://your_server_name/admin/
 
-# Creating a ZFS snapshot to backup your node
+## Creating a ZFS snapshot to backup your node
+
 Creating snapshots is fast and easy:
 
-```
+```bash
 docker compose down  #shut down haf
 ./snapshot_zfs_datasets.sh 20231023T1831Z-haf-only # where 20231023T1831Z-haf-only is an example snapshot name
 docker compose up -d
 ```
+
 Note: snapshot_zfs_datasets.sh unmounts the HAF datasets, takes a snapshot, and remounts them. Since it unmounts the datasets, the script will fail if you have anything accessing the datasets. In particular, be sure you don't have any terminals open with a current working directory set to those datasets. In theory, the script shouldn't have to unmount the datasets before taking the snapshot, but we have occassionally encountered issues where the snapshots didn't get all needed data.
 
-# Deleting Hivemind data from your database (or a similar app's data)
+## Deleting Hivemind data from your database (or a similar app's data)
 
 You may want to remove the Hivemind app's data from your database -- either because you no longer
-need it and want to free the space, or because you want want to replay your Hivemind app from 
+need it and want to free the space, or because you want want to replay your Hivemind app from
 scratch, which is required for some upgrades.
 
 To delete the data:
+
 - stop Hivemind, but leave the rest of the stack running: `docker compose down hivemind-install hivemind-block-processing hivemind-server`
 - run the uninstall script: `docker compose --profile=hivemind-uninstall up`
 - you'll see the results of a few sql statements scroll by, and it should exit after a few seconds
diff --git a/balance_tracker.yaml b/balance_tracker.yaml
index e50f12b..3f44ae0 100644
--- a/balance_tracker.yaml
+++ b/balance_tracker.yaml
@@ -74,6 +74,7 @@ services:
       PGRST_DB_POOL_ACQUISITION_TIMEOUT: 10
       PGRST_DB_EXTRA_SEARCH_PATH: ${BTRACKER_SCHEMA:-hafbe_bal}
       # PGRST_OPENAPI_SERVER_PROXY_URI: http://${PUBLIC_HOSTNAME}/btracker_api/
+      PGRST_LOG_LEVEL: ${BALANCE_TRACKER_SERVER_LOG_LEVEL}
     healthcheck:
       test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:3001/ready || exit 1"]
       interval: 10s
diff --git a/ci/compose.yml b/ci/compose.yml
new file mode 100644
index 0000000..c9b7b4e
--- /dev/null
+++ b/ci/compose.yml
@@ -0,0 +1,95 @@
+name: 'haf_api_node'
+services:
+  docker:
+    image: registry.gitlab.syncad.com/hive/haf_api_node/dind:${DIND_TAG:-latest}
+    privileged: true
+    environment:
+      DOCKER_TLS_CERTDIR: /certs
+      TOP_LEVEL_DATASET_MOUNTPOINT: /cache/haf-datadir
+    volumes:
+      - type: volume
+        source: docker-certs-ca
+        target: /certs/ca
+      - type: volume
+        source: docker-certs-client
+        target: /certs/client
+      - type: volume
+        source: docker-certs-server
+        target: /certs/server
+      - type: volume
+        source: docker-lib
+        target: /var/lib/docker
+      - type: volume
+        source: haf-datadir
+        target: /cache/haf-datadir
+    networks:
+      - docker
+    ports:
+      - name: docker-tls
+        target: 2376
+        published: 2376
+      - name: http
+        target: 80
+        published: 8080
+      - name: https
+        target: 443
+        published: 8443
+    healthcheck:
+      test: [ "CMD", "docker", "version"]
+      interval: 5s
+      timeout: 5s
+  compose:
+    image: registry.gitlab.syncad.com/hive/haf_api_node/compose:${COMPOSE_TAG:-latest}
+    environment:
+      DOCKER_TLS_CERTDIR: /certs
+      DOCKER_HOST: 
+      ARGUMENTS: 
+      TOP_LEVEL_DATASET_MOUNTPOINT: /cache/haf-datadir
+      PUBLIC_HOSTNAME:
+      FAKETIME:
+      USE_ALTERNATE_HAPROXY_CONFIG:
+      HIVEMIND_SYNC_ARGS:
+      HIVE_API_NODE_VERSION:
+      HAF_IMAGE:
+      HIVEMIND_IMAGE:
+      HIVEMIND_REWRITER_IMAGE:
+      HAFAH_IMAGE:
+      HAFAH_REWRITER_IMAGE:
+      REPUTATION_TRACKER_IMAGE:
+      REPUTATION_TRACKER_REWRITER_IMAGE:
+      HAF_VERSION:
+      HIVEMIND_VERSION:
+      HAFAH_VERSION:
+      REPUTATION_TRACKER_VERSION:
+      
+    volumes:
+      - type: volume
+        source: docker-certs-client
+        target: /certs/client
+      - type: volume
+        source: haf-datadir
+        target: /cache/haf-datadir  
+    networks:
+      - docker
+    healthcheck:
+      test: [ "CMD-SHELL", "wget --timeout=2 -nv -t1 --spider http://127.0.0.1 || exit 1" ] 
+      interval: 5s
+      timeout: 5s
+    depends_on:
+      docker:
+        condition: service_healthy
+volumes:
+  docker-certs-ca:
+  docker-certs-client:
+    name: docker-certs-client
+  docker-certs-server:
+  docker-lib:
+  haf-datadir:
+    driver: local
+    driver_opts:
+      o: bind
+      type: none
+      device: ${HAF_DATA_DIRECTORY}/
+networks:
+  docker:    
+    name: docker
\ No newline at end of file
diff --git a/ci/compose/Dockerfile b/ci/compose/Dockerfile
new file mode 100644
index 0000000..de6dc81
--- /dev/null
+++ b/ci/compose/Dockerfile
@@ -0,0 +1,54 @@
+# syntax=docker/dockerfile:1.5
+FROM registry.gitlab.syncad.com/hive/haf_api_node/docker:26.1.4-cli
+
+ENV TOP_LEVEL_DATASET_MOUNTPOINT=/haf-pool/haf-datadir \
+    COMPOSE_PROFILES="core,admin,hafah,hivemind,servers" \
+    BALANCE_TRACKER_SERVER_LOG_LEVEL=info \
+    BLOCK_EXPLORER_SERVER_LOG_LEVEL=info \
+    HAFAH_SERVER_LOG_LEVEL=info \
+    HIVEMIND_SERVER_LOG_LEVEL=info \
+    REPUTATION_TRACKER_SERVER_LOG_LEVEL=info
+
+RUN <<EOF
+set -e
+
+apk add --no-cache tini busybox-extras curl bash jq
+EOF
+
+WORKDIR /haf-api-node
+
+COPY ci/scripts /haf-api-node/ci/scripts
+COPY ci/config.ini /haf-api-node/ci/config.ini
+COPY *.yaml /haf-api-node/
+COPY compose.yml compose.yml
+COPY .env.example .env
+
+# Disable Hivemind's healthcheck since it randomly breaks the stack and is not necessary
+# for CI
+RUN <<EOF
+sed -i.bak -e 's/test.*block-processing-healthcheck.*/test: ["CMD-SHELL","true"]/' hivemind.yaml
+EOF
+
+COPY --chmod=644 <<EOF index.html
+<!doctype html>
+<html><body><h1>A webpage served by netcat</h1></body></html>
+EOF
+
+COPY --chmod=644 <<EOF faketime.yaml
+services:
+  haf:
+    environment:
+      FAKETIME:
+      LD_PRELOAD: "/home/hived_admin/hive_base_config/faketime/src/libfaketime.so.1"
+      OVERRIDE_LD_PRELOAD: "/home/hived_admin/hive_base_config/faketime/src/libfaketime.so.1"
+      FAKETIME_DONT_FAKE_MONOTONIC: 1
+      FAKETIME_DONT_RESET: 1
+      TZ: "UTC"
+EOF
+
+COPY --chmod=755 ci/compose/entrypoint.sh entrypoint.sh
+
+EXPOSE 80
+
+ENTRYPOINT [ "/sbin/tini", "-g", "--", "/haf-api-node/entrypoint.sh" ]
+CMD [ "up", "--quiet-pull" ]
\ No newline at end of file
diff --git a/ci/compose/entrypoint.sh b/ci/compose/entrypoint.sh
new file mode 100755
index 0000000..171109e
--- /dev/null
+++ b/ci/compose/entrypoint.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+#!/bin/bash
+
+set -e
+
+if [[ -f "${TOP_LEVEL_DATASET_MOUNTPOINT}/logs/haf-api-node.log" ]]
+then
+  mv -v "${TOP_LEVEL_DATASET_MOUNTPOINT}/logs/haf-api-node.log" "${TOP_LEVEL_DATASET_MOUNTPOINT}/logs/haf-api-node.$(date +"%d-%m-%y-%H-%M-%S").log"
+fi
+
+# Create log file and make it readable for anyone
+touch "${TOP_LEVEL_DATASET_MOUNTPOINT}/logs/haf-api-node.log"
+chmod 666 "${TOP_LEVEL_DATASET_MOUNTPOINT}/logs/haf-api-node.log"
+chown 1000:100 "${TOP_LEVEL_DATASET_MOUNTPOINT}/logs/haf-api-node.log"
+
+{
+  /haf-api-node/ci/scripts/set-up-stack.sh
+
+  echo "Waiting for Docker to start..."
+  until docker-entrypoint.sh docker version &>/dev/null
+  do 
+    echo "Waiting for Docker to start..."
+    sleep 10
+  done
+
+  # Necessary for GitLab CI service healthcheck
+  httpd
+
+  docker-entrypoint.sh docker compose "$@"
+} 2>&1 | tee -a "${TOP_LEVEL_DATASET_MOUNTPOINT}/logs/haf-api-node.log"
diff --git a/ci/config.ini b/ci/config.ini
new file mode 100644
index 0000000..a817e1b
--- /dev/null
+++ b/ci/config.ini
@@ -0,0 +1,275 @@
+# Appender definition JSON. Obligatory attributes:
+# "appender" - name of appender
+# "stream" - target stream, mutually exclusive with "file"
+# "file" - target filename (including path), mutually exclusive with "stream"
+# Optional attributes:
+# "time_format" - see time_format enum values (default: "iso_8601_seconds")
+# Optional attributes (applicable to file appender only):
+# "delta_times" - whether times should be printed as deltas since previous message (default: false)
+# "flush" - whether each log write should end with flush (default: true)
+# "truncate" - whether to truncate the log file at startup (default: true)
+# "rotate" - whether the log files should be rotated (default: true)
+# "rotation_interval" - seconds between file rotation (default: 3600)
+# "rotation_limit" - seconds before rotated file is removed (default: 86400)
+log-appender = {"appender":"stderr","stream":"std_error","time_format":"iso_8601_microseconds"} {"appender":"p2p","file":"logs/hived/p2p/p2p.log","truncate":false,"time_format":"iso_8601_milliseconds", "rotation_interval": 86400, "rotation_limit": 2592000} {"appender": "default", "file": "logs/hived/default/default.log","truncate":false, "time_format": "iso_8601_milliseconds", "rotation_interval": 86400, "rotation_limit": 2592000}
+
+# log-console-appender = 
+
+# log-file-appender = 
+
+# Logger definition JSON:
+# "name" - name of logger
+# "level" - level of reporting, see log_level enum values
+# "appenders" - list of designated appenders
+log-logger = {"name":"default","level":"info","appenders":["stderr", "default"]} {"name":"user","level":"debug","appenders":["stderr", "default"]} {"name":"p2p","level":"warn","appenders":["p2p"]}
+
+# list of addresses, that will receive notification about in-chain events
+# notifications-endpoint = 
+
+# Whether to print backtrace on SIGSEGV
+backtrace = yes
+
+# Plugin(s) to enable, may be specified multiple times
+plugin = node_status_api account_by_key account_by_key_api block_api condenser_api database_api json_rpc market_history market_history_api network_broadcast_api p2p rc_api state_snapshot transaction_status transaction_status_api wallet_bridge_api webserver
+
+# The location of the rocksdb database for account history. By default it is $DATA_DIR/blockchain/account-history-rocksdb-storage
+account-history-rocksdb-path = "blockchain/account-history-rocksdb-storage"
+
+# Defines a range of accounts to track as a json pair ["from","to"] [from,to] Can be specified multiple times.
+# account-history-rocksdb-track-account-range = 
+
+# Defines a list of operations which will be explicitly logged.
+# account-history-rocksdb-whitelist-ops = 
+
+# Defines a list of operations which will be explicitly ignored.
+# account-history-rocksdb-blacklist-ops = 
+
+# Where to export data (NONE to discard)
+block-data-export-file = NONE
+
+# Skip producing when no factory is registered
+block-data-skip-empty = 0
+
+# How often to print out block_log_info (default 1 day)
+block-log-info-print-interval-seconds = 86400
+
+# Whether to defer printing until block is irreversible
+block-log-info-print-irreversible = 1
+
+# Where to print (filename or special sink ILOG, STDOUT, STDERR)
+block-log-info-print-file = ILOG
+
+# the location of the chain shared memory files (absolute path or relative to application data dir)
+shared-file-dir = "blockchain"
+
+# Size of the shared memory file. Default: 24G. If running with many plugins, increase this value to 28G.
+shared-file-size = 24G
+
+# A 2 precision percentage (0-10000) that defines the threshold for when to autoscale the shared memory file. Setting this to 0 disables autoscaling. Recommended value for consensus node is 9500 (95%).
+shared-file-full-threshold = 0
+
+# A 2 precision percentage (0-10000) that defines how quickly to scale the shared memory file. When autoscaling occurs the file's size will be increased by this percent. Setting this to 0 disables autoscaling. Recommended value is between 1000-2000 (10-20%)
+shared-file-scale-rate = 0
+
+# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.
+# checkpoint = 
+
+# flush shared memory changes to disk every N blocks
+# flush-state-interval = 
+
+# Compress blocks using zstd as they're added to the block log
+enable-block-log-compression = 1
+
+# If enabled, corrupted block_log will try to fix itself automatically.
+enable-block-log-auto-fixing = 1
+
+# Block log zstd compression level 0 (fast, low compression) - 22 (slow, high compression)
+block-log-compression-level = 15
+
+# Number of worker threads used to pre-validate transactions and blocks
+blockchain-thread-pool-size = 8
+
+# Level of detail of block stat reports: NONE, MINIMAL, REGULAR, FULL. Default FULL (recommended for API nodes).
+block-stats-report-type = FULL
+
+# Where to put block stat reports: DLOG, ILOG, NOTIFY, LOG_NOTIFY. Default ILOG.
+block-stats-report-output = ILOG
+
+# Level of detail of daily RC stat reports: NONE, MINIMAL, REGULAR, FULL. Default REGULAR.
+rc-stats-report-type = REGULAR
+
+# Where to put daily RC stat reports: DLOG, ILOG, NOTIFY, LOG_NOTIFY. Default ILOG.
+rc-stats-report-output = ILOG
+
+# Whether the block log should be single file (-1), not used at all & keeping only head block in memory (0), or split into files each containing 1M blocks & keeping N full million latest blocks (N). Default -1.
+# Since CI performs both replay and sync of the node and since HAF opens the block lg in write mode after sync, this needs to be enabled to avoid block log access issues.
+block-log-split = 9999
+
+# WIF PRIVATE KEY to be used to sign each transaction.
+# colony-sign-with = 
+
+# Number of worker threads. Default is 4
+colony-threads = 4
+
+# Max number of transactions produced per block. When not set it will be sum of weights of individual types.
+# colony-transactions-per-block = 
+
+# Start producing transactions when block with given number becomes head block (or right at the start if the block already passed).
+colony-start-at-block = 0
+
+# Disables broadcasting of produced transactions - only local witness will include them in block.
+colony-no-broadcast = 0
+
+# Size and frequency parameters of article transactions.
+# colony-article = 
+
+# Size and frequency parameters of reply transactions.
+# colony-reply = 
+
+# Size and frequency parameters of vote transactions.
+# colony-vote = 
+
+# Size and frequency parameters of transfer transactions.
+# colony-transfer = 
+
+# Size and frequency parameters of custom_json transactions. If no other transaction type is requested, minimal custom jsons will be produced.
+# colony-custom = 
+
+# Starting block for comment cashout log
+# cashout-logging-starting-block = 
+
+# Ending block for comment cashout log
+# cashout-logging-ending-block = 
+
+# Path to log file
+# cashout-logging-log-path-dir = 
+
+# Database edits to apply on startup (may specify multiple times)
+# debug-node-edit-script = 
+
+# json-rpc log directory name.
+# log-json-rpc = 
+
+# Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers
+market-history-bucket-size = [15,60,300,3600,86400]
+
+# How far back in time to track history for each bucket size, measured in the number of buckets (default: 5760)
+market-history-buckets-per-size = 5760
+
+# The local IP address and port to listen for incoming connections.
+# p2p-endpoint = 
+
+# Maxmimum number of incoming connections on P2P endpoint.
+# p2p-max-connections = 
+
+# The IP address and port of a remote peer to sync with.
+p2p-seed-node = seed.hive.blog:2001 seed.openhive.network:2001 hive-seed.roelandp.nl:2001 hive-seed.arcange.eu:2001 anyx.io:2001 hived.splinterlands.com:2001 hive-api.3speak.tv:2001 node.mahdiyari.info:2001 hive-seed.lukestokes.info:2001 seed.deathwing.me:2001 hive-seed.actifit.io:2001 seed.shmoogleosukami.co.uk:2001 hiveseed.rishipanthee.com:2001 
+
+# P2P network parameters. (Default: {"listen_endpoint":"0.0.0.0:0","accept_incoming_connections":true,"wait_if_endpoint_is_busy":true,"private_key":"0000000000000000000000000000000000000000000000000000000000000000","desired_number_of_connections":20,"maximum_number_of_connections":200,"peer_connection_retry_timeout":30,"peer_inactivity_timeout":5,"peer_advertising_disabled":false,"maximum_number_of_blocks_to_handle_at_one_time":200,"maximum_number_of_sync_blocks_to_prefetch":20000,"maximum_blocks_per_peer_during_syncing":200,"active_ignored_request_timeout_microseconds":6000000} )
+# p2p-parameters = 
+
+# path to block_log file - source of block emissions
+# pacemaker-source = 
+
+# minimum time of emission offset from block timestamp in milliseconds, default -300ms
+pacemaker-min-offset = -300
+
+# maximum time of emission offset from block timestamp in milliseconds, default 20000ms (when exceeded, node will be stopped)
+pacemaker-max-offset = 20000
+
+# postgres connection string
+# psql-url = 
+
+# indexes/constraints will be recreated if `psql_block_number + psql_index_threshold >= head_block_number`
+psql-index-threshold = 20000000
+
+# number of threads which dump operations to database during reindexing
+psql-operations-threads-number = 5
+
+# number of threads which dump transactions to database during reindexing
+psql-transactions-threads-number = 2
+
+# number of threads which dump account operations to database during reindexing
+psql-account-operations-threads-number = 2
+
+# enable collect data to account_operations table
+psql-enable-account-operations-dump = 1
+
+# force open database even when irreversible data are inconsistent
+psql-force-open-inconsistent = false
+
+# threshold to move synchronization state during start immediatly to live
+psql-livesync-threshold = 0
+
+# Defines a range of accounts to track as a json pair ["from","to"] [from,to]. Can be specified multiple times.
+# psql-track-account-range = 
+
+# Defines operations' types to track. Can be specified multiple times.
+# psql-track-operations = 
+
+# For a type of operation it's defined a regex that filters body of operation and decides if it's excluded. Can be specified multiple times. A complex regex can cause slowdown or processing can be even abandoned due to complexity.
+# psql-track-body-operations = 
+
+# enable filtering accounts and operations
+psql-enable-filter = 1
+
+# first synced block
+psql-first-block = 1
+
+# write-ahead log for data sent from hived to PostgreSQL
+# psql-wal-directory = 
+
+# The location (root-dir) of the snapshot storage, to save/read portable state dumps
+snapshot-root-dir = "snapshot"
+
+# Endpoint to send statsd messages to.
+# statsd-endpoint = 
+
+# Size to batch statsd messages.
+statsd-batchsize = 1
+
+# Whitelist of statistics to capture.
+# statsd-whitelist = 
+
+# Blacklist of statistics to capture.
+# statsd-blacklist = 
+
+# Defines the number of blocks from the head block that transaction statuses will be tracked.
+transaction-status-block-depth = 64000
+
+# Local http endpoint for webserver requests.
+# webserver-http-endpoint = 
+
+# Local https endpoint for webserver requests.
+# webserver-https-endpoint = 
+
+# Local unix http endpoint for webserver requests.
+# webserver-unix-endpoint = 
+
+# Local websocket endpoint for webserver requests.
+# webserver-ws-endpoint = 
+
+# Enable the RFC-7692 permessage-deflate extension for the WebSocket server (only used if the client requests it).  This may save bandwidth at the expense of CPU
+webserver-ws-deflate = 0
+
+# Number of threads used to handle queries. Default: 32.
+webserver-thread-pool-size = 32
+
+# File name with a server's certificate.
+# webserver-https-certificate-file-name = 
+
+# File name with a server's private key.
+# webserver-https-key-file-name = 
+
+# Enable block production, even if the chain is stale.
+enable-stale-production = 0
+
+# Percent of witnesses (0-99) that must be participating in order to produce blocks
+required-participation = 33
+
+# name of witness controlled by this node (e.g. initwitness )
+# witness = 
+
+# WIF PRIVATE KEY to be used by one or more witnesses or miners
+# private-key = 
+
diff --git a/ci/dind/Dockerfile b/ci/dind/Dockerfile
new file mode 100644
index 0000000..f3feb88
--- /dev/null
+++ b/ci/dind/Dockerfile
@@ -0,0 +1,41 @@
+# syntax=docker/dockerfile:1.7-labs
+FROM registry.gitlab.syncad.com/hive/haf_api_node/docker:26.1.4-dind
+
+ENV TOP_LEVEL_DATASET_MOUNTPOINT=/haf-pool/haf-datadir
+
+RUN <<-EOF
+    set -e
+
+    mkdir -p /haf-api-node/caddy/snippets
+    mkdir -p /haf-api-node/caddy/admin_html
+    mkdir -p /haf-api-node/jussi
+    mkdir -p /haf-api-node/pgadmin
+    mkdir -p /haf-api-node/varnish
+EOF
+
+WORKDIR /haf-api-node
+
+COPY --chmod=755 ci/dind/entrypoint.sh entrypoint.sh
+COPY --chmod=755 ci/scripts/prepare-stack-data-directory.sh prepare-stack-data-directory.sh
+
+COPY --exclude=ci/* \
+     --exclude=*.md \
+     --exclude=*/*.md \
+     --exclude=*.sh \
+     --exclude=*/*.sh \
+     --exclude=*/*/*.sh \
+     --exclude=compose.yml \
+     --exclude=*/compose.yml \
+     --exclude=*/compose.*.yml \
+     --exclude=*.yaml \
+     --exclude=*/*.yaml \
+     --exclude=zfs.conf \
+     --exclude=.env.example \
+     --exclude=*/*.js . .
+
+COPY --parents drone/config.yaml .
+
+EXPOSE 2375 2376 80 443
+
+ENTRYPOINT ["/haf-api-node/entrypoint.sh"]
+CMD []
\ No newline at end of file
diff --git a/ci/dind/entrypoint.sh b/ci/dind/entrypoint.sh
new file mode 100755
index 0000000..4129a37
--- /dev/null
+++ b/ci/dind/entrypoint.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+/haf-api-node/prepare-stack-data-directory.sh "${TOP_LEVEL_DATASET_MOUNTPOINT}"
+
+# If PostgreSQL data directory already exists, reset its permissions
+[ -d "${TOP_LEVEL_DATASET_MOUNTPOINT}/haf_db_store"  ] && chown -R 105:109 "${TOP_LEVEL_DATASET_MOUNTPOINT}/haf_db_store"
+[ -d "${TOP_LEVEL_DATASET_MOUNTPOINT}/haf_postgresql_conf.d"  ] && chown -R 105:109 "${TOP_LEVEL_DATASET_MOUNTPOINT}/haf_postgresql_conf.d"
+
+echo "Starting dockerd..."
+
+exec dockerd-entrypoint.sh "$@"
\ No newline at end of file
diff --git a/ci/node-replay.gitlab-ci.yml b/ci/node-replay.gitlab-ci.yml
new file mode 100644
index 0000000..d338512
--- /dev/null
+++ b/ci/node-replay.gitlab-ci.yml
@@ -0,0 +1,89 @@
+variables:
+  API_NODE_TAG: ${CI_COMMIT_SHORT_SHA}
+
+.api-node-job:
+  extends: .job-defaults
+  variables:
+    HIVE_API_NODE_VERSION: "1.27.8"
+    HAF_VERSION: "${HIVE_API_NODE_VERSION}"
+    HIVEMIND_VERSION: "${HIVE_API_NODE_VERSION}"
+    HAFAH_VERSION: "${HIVE_API_NODE_VERSION}"
+    REPUTATION_TRACKER_VERSION: "${HIVE_API_NODE_VERSION}"
+    HIVE_API_NODE_REGISTRY: "registry.gitlab.syncad.com/hive"
+    HAF_IMAGE: "${HIVE_API_NODE_REGISTRY}/haf"
+    HAFAH_IMAGE: "${HIVE_API_NODE_REGISTRY}/hafah"
+    HAFAH_REWRITER_IMAGE: "${HIVE_API_NODE_REGISTRY}/hafah/postgrest-rewriter"
+    HIVEMIND_IMAGE: "${HIVE_API_NODE_REGISTRY}/hivemind"
+    HIVEMIND_REWRITER_IMAGE: "${HIVE_API_NODE_REGISTRY}/hivemind/postgrest-rewriter"
+    REPUTATION_TRACKER_IMAGE: "${HIVE_API_NODE_REGISTRY}/reputation_tracker"
+    REPUTATION_TRACKER_REWRITER_IMAGE: "${HIVE_API_NODE_REGISTRY}/reputation_tracker/postgrest-rewriter"
+    LAST_BLOCK_NUMBER: "5000000"
+    ARGUMENTS: "--replay-blockchain --stop-at-block ${LAST_BLOCK_NUMBER}"
+    HIVEMIND_SYNC_ARGS: "--community-start-block=4998000"
+    USE_ALTERNATE_HAPROXY_CONFIG: "true"
+    PUBLIC_HOSTNAME: "dind"
+    DOCKER_DRIVER: "overlay2"
+    DOCKER_HOST: "tcp://${PUBLIC_HOSTNAME}:2376"
+    DOCKER_TLS_SAN: "DNS:${PUBLIC_HOSTNAME}"
+    CI_DEBUG_SERVICES: "false" # Change to true to debug services in this job
+    GIT_STRATEGY: "none"
+    ADDITIONAL_CONFIGURATION_SCRIPT: "/haf-api-node/ci/scripts/copy-haf-config.sh"
+  image: 
+    name: registry.gitlab.syncad.com/hive/haf_api_node/compose:${API_NODE_TAG}
+    entrypoint: [ "" ]
+  services:
+    - name: registry.gitlab.syncad.com/hive/haf_api_node/dind:${API_NODE_TAG}
+      alias: dind
+      variables:
+        HEALTHCHECK_TCP_PORT: '2376'
+
+.haf-node-replay:
+  extends: .api-node-job
+  timeout: 2 hours
+  variables:
+    TOP_LEVEL_DATASET_MOUNTPOINT: "${REPLAY_DIRECTORY}"
+    HAF_DB_URL_HIVEMIND: "postgresql://hivemind@haf/haf_block_log"
+    HAF_DB_URL_REPTRACKER: "postgresql://reptracker_owner@haf/haf_block_log"
+    PSQL_COMMAND_HIVEMIND: "SELECT current_block_num FROM hafd.contexts WHERE name = 'hivemind_app'"
+    PSQL_COMMAND_REPTRACKER: "SELECT current_block_num FROM hafd.contexts WHERE name = 'reptracker_app'"
+  script:
+    - docker-entrypoint.sh /haf-api-node/ci/scripts/replay-api-node.sh
+  after_script:
+    - docker-entrypoint.sh /haf-api-node/ci/scripts/after-script.sh
+  artifacts:
+    when: always
+    expire_in: 1 week
+    paths:
+      - "*.txt"
+      - "*.log"
+      - "logs/"
+      - "*.json"
+
+.haf_api_node_replay_data_copy:
+  extends: .job-defaults
+  image: 
+    name: registry.gitlab.syncad.com/hive/haf_api_node/compose:${API_NODE_TAG}
+    entrypoint: [ "" ]
+  script:
+    - /haf-api-node/ci/scripts/copy-datadir.sh
+
+.haf_api_node_test:
+  extends: .api-node-job
+  variables:
+    TOP_LEVEL_DATASET_MOUNTPOINT: "${REPLAY_PIPELINE_DIRECTORY}"
+    FF_NETWORK_PER_BUILD: "true"
+  services:
+    - !reference [.api-node-job, services]
+    - name: registry.gitlab.syncad.com/hive/haf_api_node/compose:${API_NODE_TAG}
+  script:
+    - docker-entrypoint.sh /haf-api-node/ci/scripts/test-api-node.sh
+  after_script:
+    - docker-entrypoint.sh /haf-api-node/ci/scripts/after-script.sh
+  artifacts:
+    when: always
+    expire_in: 1 week
+    paths: # Must include paths from .haf-node-replay, !reference doesn't work
+      - "*.txt"
+      - "*.log"
+      - "logs/"
+      - "*.json"
\ No newline at end of file
diff --git a/ci/scripts/after-script.sh b/ci/scripts/after-script.sh
new file mode 100755
index 0000000..4284d30
--- /dev/null
+++ b/ci/scripts/after-script.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+set -e
+
+# If Docker CLI is available try and use it to obtain logs
+if command -v docker &> /dev/null
+then
+
+  cd /haf-api-node
+
+  echo "Getting a list of services..."
+  docker compose ps --all --format json > "${CI_PROJECT_DIR:?}/compose_processes.json"
+
+  echo "Fetching stack logs..."
+  docker compose logs --no-color > "${CI_PROJECT_DIR:?}/haf_api_node.log"
+ 
+  echo "Getting status of services..."
+  docker ps --all --format "{{.Names}}" | xargs -I {} bash -c "docker inspect --format \"{{json .State}}\" \"{}\" > \"{}.json\""
+
+  echo "Shutting down the stack..."
+  docker compose down --volumes
+fi
+
+cd "${CI_PROJECT_DIR:?}"
+
+cp -R "${TOP_LEVEL_DATASET_MOUNTPOINT:?}/logs" "${CI_PROJECT_DIR:?}/logs"
+rm -f "${REPLAY_DIRECTORY:?}/replay_running"
\ No newline at end of file
diff --git a/ci/scripts/copy-datadir.sh b/ci/scripts/copy-datadir.sh
new file mode 100755
index 0000000..c498919
--- /dev/null
+++ b/ci/scripts/copy-datadir.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+set -e
+
+echo "Copying replay data to ${REPLAY_PIPELINE_DIRECTORY:?}"
+cp -a "${REPLAY_DIRECTORY:?}" "${REPLAY_PIPELINE_DIRECTORY:?}"
\ No newline at end of file
diff --git a/ci/scripts/copy-haf-config.sh b/ci/scripts/copy-haf-config.sh
new file mode 100755
index 0000000..930895f
--- /dev/null
+++ b/ci/scripts/copy-haf-config.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+set -e
+
+echo "Performing additional configuration..."
+
+echo "Copying config.ini file..."
+cp "/haf-api-node/ci/config.ini" "${REPLAY_DIRECTORY:?}/config.ini"
+
+echo "Inspecting replay directory..."
+ls -lah "${REPLAY_DIRECTORY:?}"
+ls -lah "${REPLAY_DIRECTORY:?}/blockchain"
\ No newline at end of file
diff --git a/ci/scripts/prepare-stack-data-directory.sh b/ci/scripts/prepare-stack-data-directory.sh
new file mode 100755
index 0000000..95ed5aa
--- /dev/null
+++ b/ci/scripts/prepare-stack-data-directory.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+HAF_MOUNTPOINT="${1:?"Please pass a valid path for the stack's data directory to this script as its first argument"}"
+
+echo "Creating HAF's mountpoint at ${HAF_MOUNTPOINT}..."
+
+mkdir -p "${HAF_MOUNTPOINT}/blockchain"
+mkdir -p "${HAF_MOUNTPOINT}/shared_memory/haf_wal"
+mkdir -p "${HAF_MOUNTPOINT}/logs/caddy"
+mkdir -p "${HAF_MOUNTPOINT}/logs/pgbadger"
+mkdir -p "${HAF_MOUNTPOINT}/logs/postgresql"
+
+chown -R 1000:100 "${HAF_MOUNTPOINT}"
\ No newline at end of file
diff --git a/ci/scripts/replay-api-node.sh b/ci/scripts/replay-api-node.sh
new file mode 100755
index 0000000..ca0e4b5
--- /dev/null
+++ b/ci/scripts/replay-api-node.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+
+set -e
+     
+echo -e "\e[0Ksection_start:$(date +%s):check[collapsed=true]\r\e[0KChecking replay status..."
+
+echo "Replay directory is: ${REPLAY_DIRECTORY:?}"
+while [[ -f "${REPLAY_DIRECTORY:?}/replay_running" ]]
+do
+    echo "Another replay is running in ${REPLAY_DIRECTORY:?}. Waiting for it to end..."
+    sleep 60
+done
+if [[ -f "${REPLAY_DIRECTORY:?}/status" ]]
+then
+    echo "Previous replay exit code"
+    status=$(cat "${REPLAY_DIRECTORY:?}/status")
+    echo "$status"
+    if [[ -d "${REPLAY_DIRECTORY:?}/blockchain" ]]
+    then
+        cd "${REPLAY_DIRECTORY:?}/blockchain"
+        echo "Checking block_log checksum..."
+        sha256sum -c sha256s || true
+        checksum_check=${PIPESTATUS[0]}
+        cd -
+        if [[ "$status" -eq 0 && "$checksum_check" -eq 0 ]]
+        then
+            echo "Previous replay datadir is valid, exiting"
+            exit 0
+        fi
+    fi
+fi
+
+echo "Didn't find a valid replay, performing a fresh one..."
+ls "${REPLAY_DIRECTORY:?}" -lath
+# Delete an invalid replay if it exists
+# The asterisk has to be outside quotes because quotes disable all but the environment variable expansion
+rm "${REPLAY_DIRECTORY:?}/"* -rf
+ls "${REPLAY_DIRECTORY:?}" -lath
+
+# Create directories needed by the stack and set their permissions
+/haf-api-node/ci/scripts/prepare-stack-data-directory.sh "${REPLAY_DIRECTORY:?}"
+
+touch "${REPLAY_DIRECTORY:?}/replay_running"
+echo -e "\e[0Ksection_end:$(date +%s):check\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):prepare[collapsed=true]\r\e[0KPreparing replay directory and configuring stack..."
+echo "Hardlinking the block_log..."
+mkdir -p "${REPLAY_DIRECTORY:?}/..${BLOCK_LOG_SOURCE_DIR:?}/"
+chown 1000:1000 "${REPLAY_DIRECTORY:?}/..${BLOCK_LOG_SOURCE_DIR:?}/" 
+mkdir -p "${REPLAY_DIRECTORY:?}/blockchain"
+cp -u "${BLOCK_LOG_SOURCE_DIR:?}/block_log" "${REPLAY_DIRECTORY:?}/..${BLOCK_LOG_SOURCE_DIR:?}/block_log"
+ln -f "${REPLAY_DIRECTORY:?}/..${BLOCK_LOG_SOURCE_DIR:?}/block_log" "${REPLAY_DIRECTORY:?}/blockchain/block_log"
+if [[ -e "${BLOCK_LOG_SOURCE_DIR:?}/block_log.artifacts" ]]
+then
+    echo "Copying the artifacts file..." 
+    cp "${BLOCK_LOG_SOURCE_DIR:?}/block_log.artifacts" "${REPLAY_DIRECTORY:?}/blockchain/block_log.artifacts"
+fi
+cd "${REPLAY_DIRECTORY:?}/blockchain"
+sha256sum block_log > sha256s
+cd -
+chown -R 1000:100 "${REPLAY_DIRECTORY:?}/blockchain"
+/haf-api-node/ci/scripts/set-up-stack.sh
+if [[ -x "${ADDITIONAL_CONFIGURATION_SCRIPT}" ]]
+then
+    "${ADDITIONAL_CONFIGURATION_SCRIPT}"
+fi
+echo -e "\e[0Ksection_end:$(date +%s):prepare\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):replay[collapsed=true]\r\e[0KReplaying HAF API node..."
+docker version
+
+cd /haf-api-node
+docker compose up --detach --quiet-pull
+
+cd "${CI_PROJECT_DIR:?}"
+
+function wait-for-replay-to-finish() {
+    local service="$1"
+    local db_url="$2"
+    local command="$3"
+    local count=0
+    until [[ $(docker exec --env LC_ALL="C" "${service}" psql "${db_url}" --quiet --tuples-only --no-align --command "${command}") == "${LAST_BLOCK_NUMBER:?}" ]]
+    do
+        CURRENT_BLOCK=$(docker exec --env LC_ALL="C" "${service}" psql "${db_url}" --quiet --tuples-only --no-align --command "${command}")
+        echo -e "Waiting for ${service} replay to finish...\n Current block: ${CURRENT_BLOCK:?}"
+        count=$((count+10))
+        [[ $count -eq "${REPLAY_TIMEOUT:-6000}" ]] && exit 1
+        sleep 10s
+    done
+}
+
+wait-for-replay-to-finish "haf-world-reputation-tracker-block-processing-1" "${HAF_DB_URL_REPTRACKER:?}" "${PSQL_COMMAND_REPTRACKER:?}"
+wait-for-replay-to-finish "haf-world-hivemind-block-processing-1" "${HAF_DB_URL_HIVEMIND:?}" "${PSQL_COMMAND_HIVEMIND:?}"
+
+cd /haf-api-node
+docker compose stop
+
+# Remove the split block log and reconfigure HAF to use the monolithic one
+rm -f "${REPLAY_DIRECTORY:?}/blockchain/block_log_part."*
+sed -i 's/block-log-split = 9999/block-log-split = -1/g' "${REPLAY_DIRECTORY:?}/config.ini"
+
+cd "${CI_PROJECT_DIR:?}"
+status=$(docker inspect haf-world-haf-1 --format="{{.State.ExitCode}}")
+echo "${status}" > "${REPLAY_DIRECTORY:?}/status"
+echo -e "\e[0Ksection_end:$(date +%s):replay\r\e[0K"
\ No newline at end of file
diff --git a/ci/scripts/set-up-stack.sh b/ci/scripts/set-up-stack.sh
new file mode 100755
index 0000000..517941a
--- /dev/null
+++ b/ci/scripts/set-up-stack.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -e
+
+if [[ "${USE_ALTERNATE_HAPROXY_CONFIG:-}" == "true" ]]; then
+  echo "Enabling alternate HAproxy configuration..."
+  sed -i.bak -e 's#source: ./haproxy/haproxy.cfg#source: ./haproxy/haproxy-alternate.cfg#' /haf-api-node/haproxy.yaml
+else
+  echo "Enabling default HAproxy configuration..."
+  [[ -f /haf-api-node/haproxy.yaml.bak ]] && mv -v /haf-api-node/haproxy.yaml.bak /haf-api-node/haproxy.yaml
+fi
+
+if [[ -n "${FAKETIME:-}" ]]; then
+  echo "Enabling faketime for HAF..."
+  [[ ! -e /haf-api-node/compose.override.yml ]] && mv /haf-api-node/faketime.yaml /haf-api-node/compose.override.yml
+else
+  echo "Disabling faketime for HAF..."
+  [[ -f /haf-api-node/compose.override.yml ]] && mv -v /haf-api-node/compose.override.yml /haf-api-node/faketime.yaml
+fi
+
+# Without explicit exit command, the code returned by the script is the return code 
+# of the last command executed, which might be non-zero even
+# if the command executed successfully
+exit 0
\ No newline at end of file
diff --git a/ci/scripts/test-api-node.sh b/ci/scripts/test-api-node.sh
new file mode 100755
index 0000000..408858e
--- /dev/null
+++ b/ci/scripts/test-api-node.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+
+set -e
+
+echo -e "\e[0Ksection_start:$(date +%s):docker[collapsed=true]\r\e[0KWaiting for Docker to start..."
+count=0
+until docker version &>/dev/null
+do
+    echo "Waiting for Docker to start..."
+    count=$((count+5))
+    [[ $count -eq 600 ]] && exit 1
+    sleep 5s
+done
+      
+echo "Docker info (saved to docker-info.txt)"
+docker info > docker-info.txt
+
+echo -e "\nDocker processes"
+docker ps --all
+
+echo -e "\nDocker networks"
+docker network ls
+echo -e "\e[0Ksection_end:$(date +%s):docker\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):haproxy[collapsed=true]\r\e[0KWaiting for certain services to start..."
+
+function wait-for-service(){
+    local service="$1"
+    local format="$2"
+    local expected_status="$3"
+    echo "Waiting for ${service} to start..."
+    count=0
+    until [[ $(docker inspect --format "${format}" "${service}") == "${expected_status}" ]]
+    do
+        echo "Waiting for ${service} to start..."
+        count=$((count+10))
+        [[ $count -eq 600 ]] && exit 1
+        sleep 10s
+    done
+    echo "Done! ${service} has started successfully."
+}
+
+wait-for-service "haf-world-haf-1" "{{.State.Health.Status}}" "healthy"
+wait-for-service "haf-world-hafah-postgrest-1" "{{.State.Status}}" "running"
+wait-for-service "haf-world-hivemind-postgrest-server-1" "{{.State.Status}}" "running"
+wait-for-service "haf-world-haproxy-1" "{{.State.Health.Status}}" "healthy"
+
+# Sleep for additional 30s to ensure HAproxy has time to connect to all the servers
+sleep 30s
+
+echo -e "\e[0Ksection_end:$(date +%s):haproxy\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):haproxy_state[collapsed=true]\r\e[0KChecking HAproxy state..."
+docker exec haf-world-haproxy-1 sh -c "echo 'show servers conn' | socat stdio unix-connect:/run/haproxy/admin.sock"
+docker exec haf-world-haproxy-1 sh -c "echo 'show servers state' | socat stdio unix-connect:/run/haproxy/admin.sock"
+echo -e "\e[0Ksection_end:$(date +%s):haproxy_state\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):caddy[collapsed=true]\r\e[0KCaddy configuration... (saved to caddy-autosave.json)"
+docker exec haf-world-caddy-1 sh -c "cat /config/caddy/autosave.json" | jq | tee caddy-autosave.json
+echo -e "\e[0Ksection_end:$(date +%s):caddy\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):hive_link[collapsed=true]\r\e[0KTesting endpoints... Hive (via container link, simulating CI service)..."
+docker run --rm --link "haf-world-caddy-1:${PUBLIC_HOSTNAME:?}" --network haf curlimages/curl:8.8.0 -vk -X POST --data '{"jsonrpc":"2.0", "method":"condenser_api.get_block", "params":[1], "id":1}' "https://${PUBLIC_HOSTNAME:?}/"
+echo -e "\e[0Ksection_end:$(date +%s):hive_link\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):hive[collapsed=true]\r\e[0KHive directly..."
+curl -k --data '{"jsonrpc":"2.0", "method":"condenser_api.get_block", "params":[1], "id":1}' --trace-ascii hive-output.log "https://${PUBLIC_HOSTNAME:?}/"
+cat hive-output.log
+echo -e "\e[0Ksection_end:$(date +%s):hive\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):hafah[collapsed=true]\r\e[0KHAfAH..."
+curl -k --data '{"jsonrpc":"2.0", "method":"block_api.get_block", "params":{"block_num":1}, "id":1}' --trace-ascii hafah-output.log "https://${PUBLIC_HOSTNAME:?}/"
+cat hafah-output.log
+echo -e "\e[0Ksection_end:$(date +%s):hafah\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):hivemind[collapsed=true]\r\e[0KHivemind..."
+curl -k --data '{"jsonrpc":"2.0", "method":"condenser_api.get_blog", "params":["steem", 0, 1], "id":1}' --trace-ascii hivemind-output.log "https://${PUBLIC_HOSTNAME:?}/"
+
+cat hivemind-output.log
+echo -e "\e[0Ksection_end:$(date +%s):hivemind\r\e[0K"
+
+echo -e "\e[0Ksection_start:$(date +%s):check[collapsed=true]\r\e[0KChecking test results..."
+function check-log-for-errors() {
+    local logfile="$1"
+    echo "Checking file ${logfile} for errors..."
+    grep -i '"error"' "${logfile}" && exit 1 || echo "No errors found!"
+}
+check-log-for-errors hive-output.log
+check-log-for-errors hafah-output.log
+check-log-for-errors hivemind-output.log
+echo -e "\e[0Ksection_end:$(date +%s):check\r\e[0K"
\ No newline at end of file
diff --git a/docker-bake.hcl b/docker-bake.hcl
new file mode 100644
index 0000000..9566176
--- /dev/null
+++ b/docker-bake.hcl
@@ -0,0 +1,81 @@
+variable "CI_REGISTRY_IMAGE" {
+    default = "registry.gitlab.syncad.com/hive/haf_api_node"
+}
+variable TAG {
+    default = "latest"
+}
+variable "CI_COMMIT_TAG" {
+  default = ""
+}
+
+function "notempty" {
+  params = [variable]
+  result = notequal("", variable)
+}
+
+function "registry-name" {
+  params = [name, suffix]
+  result = notempty(suffix) ? "${CI_REGISTRY_IMAGE}/${name}/${suffix}" : "${CI_REGISTRY_IMAGE}/${name}"
+}
+
+group "default" {
+  targets = ["compose", "dind"]
+}
+
+group "ci" {
+  targets = ["compose-ci", "dind-ci"]
+}
+
+target "compose" {
+  dockerfile = "ci/compose/Dockerfile"
+  tags = [
+    "${registry-name("compose", "")}:${TAG}",
+    notempty(CI_COMMIT_TAG) ? "${registry-name("compose", "")}:${CI_COMMIT_TAG}": ""
+  ]
+  cache-to = [
+    "type=inline"
+  ]
+  cache-from = [
+    "${registry-name("compose", "")}:${TAG}",
+  ]
+  platforms = [
+    "linux/amd64"
+  ]
+  output = [
+    "type=docker"
+  ]
+}
+
+target "dind" {
+  dockerfile = "ci/dind/Dockerfile"
+  tags = [
+    "${registry-name("dind", "")}:${TAG}",
+    notempty(CI_COMMIT_TAG) ? "${registry-name("dind", "")}:${CI_COMMIT_TAG}": "",
+  ]
+  cache-to = [
+    "type=inline"
+  ]
+  cache-from = [
+    "type=registry,ref=${registry-name("dind", "")}:${TAG}",
+  ]
+  platforms = [
+    "linux/amd64"
+  ]
+  output = [
+    "type=docker"
+  ]
+}
+
+target "compose-ci" {
+  inherits = ["compose"]
+  output = [
+    "type=registry"
+  ]
+}
+
+target "dind-ci" {
+  inherits = ["dind"]
+  output = [
+    "type=registry"
+  ]
+}
\ No newline at end of file
diff --git a/haf_base.yaml b/haf_base.yaml
index 2f9d54f..b83f8ca 100644
--- a/haf_base.yaml
+++ b/haf_base.yaml
@@ -80,9 +80,9 @@ services:
       # PGCTLTIMEOUT sets how long we allow PostgreSQL to startup before giving up and shutting down the container.
       # If you have an unclean shutdown, and postgres needs to do startup recovery, working through all of the old
       # write-ahead logs, this can take longer than the default timeout, which is something like 60 seconds.
-      # 5 minutes is probably long enough that anyone using NVMes will be able to recover their database at
+      # 10 minutes is probably long enough that anyone using NVMes will be able to recover their database at
       # startup.  If you're on slower drives or older hardware, you may need to increase this.
-      PGCTLTIMEOUT: 300
+      PGCTLTIMEOUT: 600
       # HACK, do not commit
       OVERRIDE_LD_PRELOAD: ""
     # when mounting volumes, declare the directories you want to bind-mount here,
diff --git a/haf_block_explorer.yaml b/haf_block_explorer.yaml
index ff20cc3..f120ccb 100644
--- a/haf_block_explorer.yaml
+++ b/haf_block_explorer.yaml
@@ -121,6 +121,7 @@ services:
       PGRST_DB_ROOT_SPEC: root
       PGRST_DB_EXTRA_SEARCH_PATH: hafbe_bal, reptracker_app
       # PGRST_OPENAPI_SERVER_PROXY_URI: http://${PUBLIC_HOSTNAME}/hafbe_api/
+      PGRST_LOG_LEVEL: ${BLOCK_EXPLORER_SERVER_LOG_LEVEL}
     networks:
       haf-network:
     healthcheck:
diff --git a/hafah.yaml b/hafah.yaml
index 3ceabd2..43ab4c9 100644
--- a/hafah.yaml
+++ b/hafah.yaml
@@ -44,6 +44,7 @@ services:
       PGRST_DB_POOL: 20
       PGRST_DB_POOL_ACQUISITION_TIMEOUT: 10
       PGRST_OPENAPI_SERVER_PROXY_URI: http://127.0.0.1:3000
+      PGRST_LOG_LEVEL: ${HAFAH_SERVER_LOG_LEVEL}
     networks:
       haf-network:
     healthcheck:
diff --git a/haproxy/haproxy-alternate.cfg b/haproxy/haproxy-alternate.cfg
new file mode 100644
index 0000000..729d34b
--- /dev/null
+++ b/haproxy/haproxy-alternate.cfg
@@ -0,0 +1,122 @@
+# Alternate configuration for HAproxy, to be used when HAF API node is not
+# meant to work with all the blocks (eg. on CI where it has only 5'000'000 blocks). 
+global
+  daemon
+  log stdout format raw local0 debug
+  stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
+  ca-base /etc/ssl/certs
+  presetenv SYNC_BROADCAST_BACKEND_SERVER api.hive.blog
+  presetenv SYNC_BROADCAST_BACKEND_PORT 443
+  presetenv SYNC_BROADCAST_BACKEND_SSL ssl 
+
+defaults
+  log     global
+  mode    http
+  option  httplog
+  option  dontlognull
+  option  forwardfor
+  option  http-server-close
+  option  log-health-checks
+  timeout connect 5s
+  timeout client  30s
+  timeout server  30s
+  timeout tunnel  1h
+  default-server init-addr last,libc,none resolvers docker check
+  #errorfile 400 /etc/haproxy/errors/400.http
+  #errorfile 403 /etc/haproxy/errors/403.http
+  #errorfile 408 /etc/haproxy/errors/408.http
+  #errorfile 500 /etc/haproxy/errors/500.http
+  #errorfile 502 /etc/haproxy/errors/502.http
+  #errorfile 503 /etc/haproxy/errors/503.http
+  #errorfile 504 /etc/haproxy/errors/504.http
+
+resolvers docker
+  parse-resolv-conf
+
+frontend stats
+  bind *:8000
+  stats enable
+  stats uri /admin/haproxy/
+  stats refresh 10s
+  stats admin if TRUE
+
+frontend health
+  bind 127.0.0.1:8001
+  mode http
+  http-request return status 200 if { src 127.0.0.0/8 }
+
+####
+#### Hive Frontends
+####
+
+frontend hived-in-7001
+  bind *:7001
+  option http-server-close
+  default_backend hived
+
+frontend hivemind-in-7002
+  bind *:7002
+  option http-server-close
+  default_backend hivemind
+
+frontend hafah-in-7003
+  bind *:7003
+  option http-server-close
+  default_backend hafah
+
+frontend balance-tracker-in-7004
+  bind *:7004
+  option http-server-close
+  default_backend balance-tracker
+
+frontend block-explorer-in-7005
+  bind *:7005
+  option http-server-close
+  default_backend block-explorer
+
+frontend sync-hived-in-7006
+  bind *:7006
+  option http-server-close
+  default_backend sync-hived
+
+frontend hived-in-http-7008
+  bind *:7008
+  option http-server-close
+  default_backend hived-http
+
+frontend reputation-tracker-in-7009
+  bind *:7009
+  option http-server-close
+  default_backend reputation-tracker
+
+backend hived
+  balance roundrobin
+  server haf haf:8090 check
+
+backend hived-http
+  balance roundrobin
+  server haf haf:8091 check
+
+backend balance-tracker
+  balance roundrobin
+  server balance-tracker balance-tracker-postgrest-rewriter:80
+
+backend reputation-tracker
+  balance roundrobin
+  server reputation-tracker reputation-tracker-postgrest-rewriter:80
+
+backend hafah
+  balance roundrobin
+  server hafah-postgrest hafah-postgrest-rewriter:80
+
+backend hivemind
+  balance roundrobin
+  server hivemind hivemind-postgrest-rewriter:80
+
+backend block-explorer
+  balance roundrobin
+  server block-explorer block-explorer-postgrest-rewriter:80
+
+backend sync-hived
+  balance roundrobin
+  server sync-hived "$SYNC_BROADCAST_BACKEND_SERVER":"$SYNC_BROADCAST_BACKEND_PORT" check "$SYNC_BROADCAST_BACKEND_SSL" ca-file ca-certificates.crt
diff --git a/hivemind.yaml b/hivemind.yaml
index ac96fde..db3ffff 100644
--- a/hivemind.yaml
+++ b/hivemind.yaml
@@ -20,12 +20,15 @@ services:
     profiles:
       - apps
       - hivemind
+    environment:
+      HIVEMIND_SYNC_ARGS:
     networks:
       haf-network:
     command:
       - "sync"
       - "--database-url=postgresql://hivemind@haf/haf_block_log"
       - "--database-admin-url=postgresql://haf_admin@haf/haf_block_log"
+      - "${HIVEMIND_SYNC_ARGS:-}"
     healthcheck:
       test: ["CMD-SHELL","/home/hivemind/block-processing-healthcheck.sh || exit 1"]
       interval: 10s
@@ -82,6 +85,7 @@ services:
       PGRST_OPENAPI_MODE: "disabled" # unclear why this is set, I guess because we currently only support json-rpc?
       # when debugging, you can enable this
       # PGRST_DB_PLAN_ENABLED: true
+      PGRST_LOG_LEVEL: ${HIVEMIND_SERVER_LOG_LEVEL}
     healthcheck:
       test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:3001/ready || exit 1"]
       interval: 10s
diff --git a/reputation_tracker.yaml b/reputation_tracker.yaml
index bab526b..08d6ba7 100644
--- a/reputation_tracker.yaml
+++ b/reputation_tracker.yaml
@@ -71,6 +71,7 @@ services:
       PGRST_DB_POOL_ACQUISITION_TIMEOUT: 10
       PGRST_DB_EXTRA_SEARCH_PATH: reptracker_app
       # PGRST_OPENAPI_SERVER_PROXY_URI: http://${PUBLIC_HOSTNAME}/reptracker_user/
+      PGRST_LOG_LEVEL: ${REPUTATION_TRACKER_SERVER_LOG_LEVEL}
     healthcheck:
       test: ["CMD-SHELL", "wget --timeout=2 -nv -t1 --spider 127.0.0.1:3001/ready || exit 1"]
       interval: 10s
diff --git a/swagger.yaml b/swagger.yaml
index f679823..c18fbe8 100644
--- a/swagger.yaml
+++ b/swagger.yaml
@@ -14,12 +14,13 @@ services:
       haf-network:
     #healthcheck:
     #  test: ["CMD-SHELL","curl -f localhost:8080"]
+    # Only HAfAH is started by default
     depends_on:
-      balance-tracker-postgrest:
-        condition: service_started
-      reputation-tracker-postgrest:
-        condition: service_started
+    #   balance-tracker-postgrest:
+    #     condition: service_started
+    #   reputation-tracker-postgrest:
+    #     condition: service_started
       hafah-postgrest:
         condition: service_started
-      block-explorer-postgrest:
-        condition: service_started
+    #   block-explorer-postgrest:
+    #     condition: service_started
-- 
GitLab