From f8460eed4fa26b059e7769552dc0f28f9d57e1c0 Mon Sep 17 00:00:00 2001
From: feruzm <fmiboy@gmail.com>
Date: Wed, 3 Jul 2024 11:40:54 +0300
Subject: [PATCH] titles, translations fix, more string to translation

---
 _config.yml                        |   4 +-
 _i18n/en.yml                       |  24 ++-
 _layouts/default.html              |  11 +-
 _nodeop/clive.md                   | 242 ++++++++++++++++++++++++
 _nodeop/drone.md                   |  66 +++++++
 _nodeop/exchange-node.md           |   5 +-
 _nodeop/get-transaction-node.md    |   5 +-
 _nodeop/haf-api.md                 | 287 +++++++++++++++++++++++++++++
 _nodeop/index.md                   |   5 +-
 _nodeop/jussi-multiplexer.md       |   4 +-
 _nodeop/monit.md                   |   5 +-
 _nodeop/node-cli.md                |  14 +-
 _nodeop/node-config.md             |  71 +------
 _nodeop/plugin-and-api-list.md     |   4 +-
 _nodeop/seed-node.md               |   5 +-
 _nodeop/setting-up-a-testnet.md    |   5 +-
 _nodeop/using-hivemind.md          |   4 +-
 _services/jussi.md                 |   2 +-
 _testnet/tools.md                  |   2 +-
 _tutorials/index.md                |   2 +-
 apidefinitions.html                |   4 +-
 index.html                         |   2 +-
 layer2.html                        |   4 +-
 quickstart.html                    |   4 +-
 resources.html                     |   4 +-
 services.html                      |   4 +-
 unused/accounts.md                 |   0
 unused/authority_and_validation.md |   0
 unused/blocks_and_transactions.md  |   0
 unused/database_api.md             |   2 +-
 unused/docker.md                   |   2 +-
 unused/globals.md                  |   0
 unused/keys.md                     |   0
 unused/market.md                   |   0
 unused/steemd.md                   |   4 -
 unused/tags.md                     |   2 +-
 unused/tutorials.md                |   2 +-
 37 files changed, 667 insertions(+), 134 deletions(-)
 create mode 100644 _nodeop/clive.md
 create mode 100644 _nodeop/drone.md
 create mode 100644 _nodeop/haf-api.md
 delete mode 100644 unused/accounts.md
 delete mode 100644 unused/authority_and_validation.md
 delete mode 100644 unused/blocks_and_transactions.md
 delete mode 100644 unused/globals.md
 delete mode 100644 unused/keys.md
 delete mode 100644 unused/market.md
 delete mode 100644 unused/steemd.md

diff --git a/_config.yml b/_config.yml
index 875d1cdf..cefa718d 100644
--- a/_config.yml
+++ b/_config.yml
@@ -1,7 +1,7 @@
 # ----
 # Site
 
-title: Hive Developer
+title: Hive Developers
 description: Hive Developer Documentation.
 baseurl: ''
 url: https://developers.hive.io
@@ -118,8 +118,8 @@ collections:
 
 plugins:
   - jekyll-sitemap
-  - jekyll-seo-tag
   - jekyll-multiple-languages-plugin
+  - jekyll-seo-tag
 
 exclude:
   - readme.md
diff --git a/_i18n/en.yml b/_i18n/en.yml
index 7f4ffe1d..350f67ec 100644
--- a/_i18n/en.yml
+++ b/_i18n/en.yml
@@ -5,7 +5,9 @@ global:
   de: Deutsch
   fr: Français
   zh: 汉语
+Index: index
 titles:
+  portal: Hive Developer Portal
   home: API Docs
   api-def: API Definitions
   introduction: Introduction
@@ -14,6 +16,7 @@ titles:
   layer2: Layer 2
   resources: Resources
   services: Services
+  tools: Tools
   glossary: Glossary
   search: Search
   quickstart: Quickstart
@@ -68,6 +71,9 @@ titles:
   api: API
   market: Market
   exchange_node: Exchange Node
+  clive: Command Line Hive
+  docker: Docker
+  tags: Tags
   get_transaction_node: Get Transaction Node
   jussi_multiplexer: Using jussi as a Multiplexer
   mira_performance_tuning: MIRA Performance Tuning
@@ -142,7 +148,23 @@ titles:
   php: PHP
   recipes: Recipes
   node_operation: Node Operation
-
+  haf_api: Setup HAF API node
+  drone: Drone middleware
+descriptions:
+  jussi: A reverse proxy that forwards json-rpc requests.
+  node_config: All `config.ini` options available to `hived`
+  node_cli: All options available to `hived` at command line interface cli_wallet
+  clive: Interactive command line application for interacting with the Hive
+  plug_and_api_list: Run a `hived` node with your preferred APIs
+  haf_api: Setup full or light Hive nodes with HAF API
+  using_hivemind: Hivemind setup and API functionality
+  jussi_multiplexer: Optimize your local applications with jussi
+  drone: API caching layer application, Jussi replacement
+  monit: Use monit as a utility for managing and monitoring hived
+  seed_node: Setting up a seed node
+  exchange_node: Setting up a node for exchanges
+  get_transaction_node: Setting up a node that supports `*.get_transaction`
+  setting_up_testnet: Quick-start for deploying a Hive-based Testnet.
 
 
 
diff --git a/_layouts/default.html b/_layouts/default.html
index b11a0314..fc07f434 100644
--- a/_layouts/default.html
+++ b/_layouts/default.html
@@ -1,5 +1,5 @@
 <!DOCTYPE html>
-<html lang="en">
+<html lang="{{ site.lang }}">
 <head>
   <meta charset="utf-8">
   <meta name="viewport" content="initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no" >
@@ -8,6 +8,11 @@
   <meta name="apple-mobile-web-app-capable" content="yes" />
   <meta name="apple-mobile-web-app-status-bar-style" content="black" />
 
+  <title>{% t page.title %}</title>
+  <meta property="og:title" content="{% t page.title %}" />
+  <meta name="twitter:card" content="summary" />
+  <meta property="twitter:title" content="{% t page.title %}" />
+
   <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.4.1/jquery.min.js" integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo=" crossorigin="anonymous"></script>
   <link rel="shortcut icon" type="image/png" href="{{ '/favicon.png' | relative_url }}" />
   <link rel="apple-touch-icon" type="image/png" href="{{ '/favicon.png' | relative_url }}" />
@@ -22,7 +27,7 @@
   <script src="{{ '/js/main.js' | relative_url }}"></script>
   <script src="{{ '/js/adjust.js' | relative_url }}"></script>
 
-  {% seo %}
+  {% seo title=false %}
 
   {% if jekyll.environment == 'production' %}
 <!-- Global site tag (gtag.js) - Google Analytics -->
@@ -59,7 +64,7 @@
 </header>
 <div class="main" id="top">
   <div class="hero">
-    <h1 class="hero__h1">Hive Developer Portal {% if page.section %} - {{ page.section }}{% endif %}</h1>
+    <h1 class="hero__h1">{% t titles.portal %} {% if page.section %} - {% t page.section %}{% endif %}</h1>
     <img class="hero__img" src="{{ '/images/honey-comb-92.png' | relative_url }}" style="position: relative; top: 10px;" />
   </div>
   {{ content }}
diff --git a/_nodeop/clive.md b/_nodeop/clive.md
new file mode 100644
index 00000000..e82051ff
--- /dev/null
+++ b/_nodeop/clive.md
@@ -0,0 +1,242 @@
+---
+title: titles.clive
+position: 3
+description: descriptions.clive
+exclude: true
+layout: full
+canonical_url: clive.html
+---
+
+Command line options are typically expressed with double-dash (e.g., `--replay-blockchain`):
+
+```bash
+hived --data-dir=. --replay-blockchain
+```
+
+... or ...
+
+```bash
+hived --replay-blockchain --p2p-seed-node=hiveseed-se.privex.io:2001
+```
+
+Note, as the above example shows, options like `p2p-seed-node` are available as both a `config.ini` option as well a command-line options.  Nearly all options available as `config.ini` options are also available as command-line options.  See: [Node Config]({{ '/nodeop/node-config.html' | relative_url }})
+
+The following are *only* available as command-line options.
+
+### Sections
+
+* [`disable-get-block`](#disable-get-block)
+* [`statsd-record-on-replay`](#statsd-record-on-replay)
+* [`transaction-status-rebuild-state`](#transaction-status-rebuild-state)
+* [`p2p-force-validate`](#p2p-force-validate)
+* [`replay-blockchain`](#replay-blockchain)
+* [`force-open`](#force-open)
+* [`resync-blockchain`](#resync-blockchain)
+* [`stop-replay-at-block`](#stop-replay-at-block)
+* [`advanced-benchmark`](#advanced-benchmark)
+* [`set-benchmark-interval`](#set-benchmark-interval)
+* [`dump-memory-details`](#dump-memory-details)
+* [`check-locks`](#check-locks)
+* [`validate-database-invariants`](#validate-database-invariants)
+* [`account-history-rocksdb-immediate-import`](#account-history-rocksdb-immediate-import)
+* [`exit-after-replay`](#exit-after-replay)
+* [`force-replay`](#force-replay)
+* [`account-history-rocksdb-immediate-import`](#account-history-rocksdb-immediate-import)
+* [`account-history-rocksdb-stop-import-at-block`](#account-history-rocksdb-stop-import-at-block)
+* [`load-snapshot`](#load-snapshot)
+* [`dump-snapshot`](#dump-snapshot)
+
+* Testnet Only
+    * [`chain-id`](#chain-id)
+
+### `disable-get-block`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Disable `get_block` API call.
+
+```bash
+--disable-get-block
+```
+
+### `statsd-record-on-replay`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Records statsd events during replay
+
+Used by plugin: `statsd`
+
+See: [#2276]({{ 'https://github.com/steemit/steem/issues/2276' | archived_url }})
+
+```bash
+--statsd-record-on-replay
+```
+
+### `transaction-status-rebuild-state`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Indicates that the transaction status plugin must re-build its state upon startup.
+
+Used by plugin: `transaction_status`
+
+See: [Plugin & API List]({{ '/nodeop/plugin-and-api-list.html#transaction_status_api' | relative_url }}), [#2458]({{ 'https://github.com/steemit/steem/issues/2458' | archived_url }})
+
+```bash
+--transaction-status-rebuild-state
+```
+
+### `p2p-force-validate`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Force validation of all transactions.
+
+```bash
+--p2p-force-validate
+```
+
+### `replay-blockchain`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Clear chain database and replay all blocks.
+
+```bash
+--replay-blockchain
+```
+
+### `force-open`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Force open the database, skipping the environment check.  If the binary or configuration has changed, replay the blockchain explicitly using `--replay-blockchain`.  If you know what you are doing you can skip this check and force open the database using `--force-open`.
+
+**WARNING: THIS MAY CORRUPT YOUR DATABASE. FORCE OPEN AT YOUR OWN RISK.**
+
+See: [#3446]({{ 'https://github.com/steemit/steem/issues/3446' | archived_url }})
+
+```bash
+--force-open
+```
+
+### `resync-blockchain`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Clear chain database and block log.
+
+```bash
+--resync-blockchain
+```
+
+### `stop-replay-at-block`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Stop and exit after reaching given block number
+
+See: [#1590]({{ 'https://github.com/steemit/steem/issues/1590' | archived_url }})
+
+```bash
+--stop-replay-at-block=1234
+```
+
+### `advanced-benchmark`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Make profiling for every plugin.
+
+See: [#1996]({{ 'https://github.com/steemit/steem/issues/1996' | archived_url }})
+
+```bash
+--advanced-benchmark
+```
+
+### `set-benchmark-interval`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Print time and memory usage every given number of blocks.
+
+See: [#1590]({{ 'https://github.com/steemit/steem/issues/1590' | archived_url }})
+
+```bash
+--set-benchmark-interval
+```
+
+### `dump-memory-details`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Dump database objects memory usage info. Use `set-benchmark-interval` to set dump interval.
+
+See: [#1985]({{ 'https://github.com/steemit/steem/issues/1985' | archived_url }})
+
+```bash
+--dump-memory-details
+```
+
+### `check-locks`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Check correctness of *chainbase* locking.
+
+```bash
+--check-locks
+```
+
+### `validate-database-invariants`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Validate all supply invariants check out.
+
+See: [#1477]({{ 'https://github.com/steemit/steem/issues/1477' | archived_url }}), [#1649]({{ 'https://github.com/steemit/steem/issues/1649' | archived_url }})
+
+```bash
+--validate-database-invariants
+```
+
+### `account-history-rocksdb-immediate-import`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Allows to force immediate data import at plugin startup.  By default storage is supplied during reindex process.
+
+See: [#1987]({{ 'https://github.com/steemit/steem/issues/1987' | archived_url }})
+
+```bash
+--account-history-rocksdb-immediate-import
+```
+
+### `account-history-rocksdb-stop-import-at-block`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Allows you to specify the block number that the data import process should stop at.
+
+See: [#1987]({{ 'https://github.com/steemit/steem/issues/1987' | archived_url }})
+
+```bash
+--account-history-rocksdb-stop-import-at-block=1234
+```
+
+### `exit-after-replay`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Exit after reaching given block number
+
+```bash
+--exit-after-replay
+```
+
+### `force-replay`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Before replaying clean all old files
+
+```bash
+--force-replay
+```
+
+### `load-snapshot`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Allows to force immediate snapshot import at plugin startup.  All data in state storage are overwritten.
+
+```bash
+--load-snapshot=snapshot.json
+```
+
+See: [v1.24.2](https://gitlab.syncad.com/hive/hive/-/releases/v1.24.2)
+
+### `dump-snapshot`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Allows to force immediate snapshot dump at plugin startup.  All data in the snaphsot storage are overwritten.
+
+```bash
+--dump-snapshot=snapshot.json
+```
+
+See: [v1.24.2](https://gitlab.syncad.com/hive/hive/-/releases/v1.24.2)
+
+### `chain-id`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
+
+Chain ID to connect to.  **Testnet only.**
+
+See: [PR#1631]({{ 'https://github.com/steemit/steem/pull/1631' | archived_url }}), [#2827]({{ 'https://github.com/steemit/steem/issues/2827' | archived_url }})
+
+```bash
+--chain-id=d043ab83d223f25f37e1876fe48a240d49d8e4b1daa2342064990a8036a8bb5b
+```
diff --git a/_nodeop/drone.md b/_nodeop/drone.md
new file mode 100644
index 00000000..c60090b3
--- /dev/null
+++ b/_nodeop/drone.md
@@ -0,0 +1,66 @@
+---
+title: titles.drone
+position: 8
+description: descriptions.drone 
+exclude: true
+layout: full
+canonical_url: drone.html
+---
+
+### Drone
+
+Drone is an API caching layer application for the Hive blockchain. It is built using Rust with Actix Web, and its primary purpose is to cache and serve API requests for a specific set of methods.
+Drone is totally meant to be a Jussi replacement, it aims to improve API node performance.
+
+#### Features
+
+* Written in Rust for optimal performance and reliability.
+* Actix Web for high-performance, asynchronous HTTP handling.
+* LRU cache with time-based expiration to store API responses.
+* Multiple API endpoints support for seamless request handling with HAF apps.
+* Caching support for select Hive API methods to reduce strain on API nodes.
+
+
+#### Cached API Methods
+
+The list of which methods are cached and their cache TTL is configured in the config.yaml file.  The keys used to specify the method names in the config file Jussi's rules for parsing
+method names, so you should be able to port your existing Jussi config.json easily.
+
+
+#### Endpoints
+
+The application has the following two primary endpoints:
+
+`GET /`: Health check endpoint that returns the application status, version, and operator message in JSON format.
+`POST /`: API call endpoint that takes the JSON-RPC request, caches the response (if supported), and returns the response data.
+
+
+#### Configuration
+
+Drone comes with pre-determined settings, however, you will have to edit ENDPOINT settings in `drone` section of `config.yaml`
+before starting the application (or building the Docker image)
+
+```
+port: The port on which the application will listen for incoming connections (default: 8999).
+hostname: The hostname/IP address the application will bind to (default: "0.0.0.0").
+cache_max_capacity: The approximate max size of the cache, in bytes.  Memory usage may slightly exceed this
+                    limit, due to lazy eviction, but not by much.
+operator_message: Customizable message from the operator (default: "Drone by Deathwing").
+middleware_connection_threads: Specifies the number of HTTP connections to Hive endpoints kept alive (default: 8).
+```
+
+#### Usage
+
+#### Native
+
+To start the application after altering necessary configuration parameters execute the following command:
+
+`cargo run --release`
+
+If you are advanced and have knowledge about Rust, you can also build the binary using `cargo build --release` and then run it using `./target/release/drone`.
+
+#### Docker (Recommended)
+
+You can use docker-compose to build and run Drone.
+
+`docker-compose up --build -d`
diff --git a/_nodeop/exchange-node.md b/_nodeop/exchange-node.md
index 4694538a..9cc91816 100644
--- a/_nodeop/exchange-node.md
+++ b/_nodeop/exchange-node.md
@@ -1,8 +1,7 @@
 ---
 title: titles.exchange_node
-position: 7
-description: |
-  Setting up a node for exchanges.
+position: 11
+description: descriptions.exchange_node
 exclude: true
 layout: full
 canonical_url: exchange-node.html
diff --git a/_nodeop/get-transaction-node.md b/_nodeop/get-transaction-node.md
index e5493a9c..7a66cea2 100644
--- a/_nodeop/get-transaction-node.md
+++ b/_nodeop/get-transaction-node.md
@@ -1,8 +1,7 @@
 ---
 title: titles.get_transaction_node
-position: 8
-description: |
-  Setting up a node that supports `*.get_transaction`.
+position: 12
+description: descriptions.get_transaction_node
 exclude: true
 layout: full
 canonical_url: get-transaction-node.html
diff --git a/_nodeop/haf-api.md b/_nodeop/haf-api.md
new file mode 100644
index 00000000..0ca18def
--- /dev/null
+++ b/_nodeop/haf-api.md
@@ -0,0 +1,287 @@
+---
+title: titles.haf_api
+position: 5
+description: descriptions.haf_api
+exclude: true
+layout: full
+canonical_url: haf-api.html
+---
+
+
+### Using docker compose to install and maintain a HAF server and HAF apps
+
+#### System Requirements
+
+We assume the base system will be running at least Ubuntu 22.04 (jammy).  Everything will likely work with later versions of Ubuntu. IMPORTANT UPDATE: experiments have shown 20% better API performance when running U23.10, so this latter version is recommended over Ubuntu 22 as a hosting OS.
+
+For a mainnet API node, we recommend:
+- at least 32GB of memory.  If you have 64GB, it will improve the time it takes to sync from scratch, but
+  it should make less of a difference if you're starting from a mostly-synced HAF node (i.e.,
+  restoring a recent ZFS snapshot) (TODO: quantify this?)
+- 4TB of NVMe storage
+    - Hive block log & shared memory: 500GB
+    - Base HAF database: 3.5T (before 2x lz4 compression)
+    - Hivemind database: 0.65T (before 2x lz4 compression)
+    - base HAF + Hivemind:  2.14T (compressed)
+    - HAF Block Explorer: xxx
+
+# Install prerequisites
+
+## Install ZFS support
+
+We strongly recommend running your HAF instance on a ZFS filesystem, and this documentation assumes
+you will be running ZFS.  Its compression and snapshot features are particularly useful when running a HAF node.
+
+We intend to publish ZFS snapshots of fully-synced HAF nodes that can downloaded to get a HAF node
+up & running quickly, avoiding multi-day replay times.
+
+```
+sudo apt install zfsutils-linux
+```
+
+## Install Docker
+Install the latest docker.  If you're running Ubuntu 22.04, the version provided by the
+native docker.io package is too old to work with the compose scripts.  Install the latest
+version from docker.com, following the instructions here:
+
+https://docs.docker.com/engine/install/ubuntu/
+
+Which are:
+```
+sudo apt-get update
+sudo apt-get install ca-certificates curl gnupg
+sudo install -m 0755 -d /etc/apt/keyrings
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
+sudo chmod a+r /etc/apt/keyrings/docker.gpg
+echo \
+  "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
+  "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+  sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+```
+
+## Create a ZFS pool
+
+Create your ZFS pool if necessary.  HAF requires at least 4TB of space, and 2TB NVMe drives are
+readily available, so we typically construct a pool striping data across several 2TB drives.
+If you have three or four drives, you will get somewhat better read/write performance, and
+the extra space can come in handy.
+
+To create a pool named "haf-pool" using the first two NVMe drives in your system,
+use a command like:
+```
+sudo zpool create haf-pool /dev/nvme0n1 /dev/nvme1n1
+```
+If you name your ZFS pool something else, configure the name in the environment file,
+as described in the next section.
+
+Note: By default, ZFS tries to detect your disk's actual sector size, but it often gets it wrong
+for modern NVMe drives, which will degrade performance due to having to write the same sector multiple
+times.  If you don't know the actual sector size, we recommend forcing the sector size to 8k by
+specifying setting ashift=13 (e.g., `zfs create -o ashift=13 haf-pool /dev....`)
+
+## Configure your environment
+
+Make a copy of the file `.env.example` and customize it for your system.  This file contains
+configurable paramters for things like
+- directories
+- versions of hived, HAF, and associated tools
+
+The `docker compose` command will automatically read the file named `.env`.  If you want to
+keep multiple configurations, you can give your environment files different names like
+`.env.dev` and `.env.prod`, then explicitly specify the filename when running `docker compose`:
+`docker compose --env-file=.env.dev ...`
+
+## Set up ZFS filesystems
+
+The HAF installation is spread across multiple ZFS datasets, which allows us to set different
+ZFS options for different portions of the data. We recommend that most nodes keep the default
+datasets in order to enable easy sharing of snapshots.
+
+### Initializing from scratch
+
+If you're starting from scratch, after you've created your zpool and configured its name in the .env file
+as described above, run:
+```
+sudo ./create_zfs_datasets.sh
+```
+to create and mount the datasets.
+
+By default, the dataset holding most of the database storage uses zfs compression. The dataset for
+the blockchain data directory (which holds the block_log for hived and the shared_memory.bin file)
+is not compressed because hived directly manages compression of the block_log file.
+
+If you have a LOT of nvme storage (e.g. 6TB+), you can get better API performance at the cost of disk
+storage by disabling ZFS compression on the database dataset, but for most nodes this isn't recommended.
+
+#### Speeding up the initial sync
+
+Following the instructions above will get you a working HAF node, but there are some things you can
+do to speed up the initial sync.
+
+##### Replaying
+If you already have a recent block_log file (e.g., you're already running another instance of hived
+somewhere else on your local network), you can copy the block_log and block_log.artifacts files
+from that node into your /haf-pool/haf-datadir/blockchain directory.  After copying the files,
+make sure the ownership is set to the same owner as the /haf-pool/haf-datadir/blockchain directory
+so hived can read/write them: `chown 1000:100 block_log block_log.artifacts`
+
+Before brining up the haf service, you will also need to add the `--replay-blockchain` argument to
+hived to tell it you want to replay.  Edit the `.env` file's `ARGUMENTS` line like so:
+```
+ARGUMENTS="--replay-blockchain"
+```
+Once the replay has finished, you can revert the `ARGUMENTS` line to the empty string
+
+##### Shared Memory on Ramdisk
+If you have enough spare memory on your system, you can speed up the initial replay by placing the
+`shared_memory.bin` file on a ramdisk.
+
+The current default shared memory filesize is 24G, so this will only work if you have 24G free
+(that's in addition to the memory you expect to be used by hived and HAF's integrated PostgreSQL
+instance).
+
+If you have a 64GB system, ensure you have a big enough swapfile (32GB is recommended
+and 8GB is known to not be sufficient) to handle peak memory usage needs during the replay.
+Peak memory usage currently occurs when haf table indexes are being built during the final
+stage of replay.
+
+To do this, first create a ramdisk:
+```
+sudo mkdir /mnt/haf_shared_mem
+
+# then
+sudo mount -t tmpfs -o size=25g tmpfs /mnt/haf_shared_mem
+# - or -
+sudo mount -t ramfs ramfs /mnt/haf_shared_mem
+
+# then
+sudo chown 1000:100 /mnt/haf_shared_mem
+```
+
+Then, edit your `.env` file to tell it where to put the shared memory file:
+```
+HAF_SHM_DIRECTORY="/mnt/haf_shared_mem"
+```
+
+Now, when you resync / replay, your shared memory file will actually be in memory.
+
+###### Moving Shared Memory back to disk
+Once your replay is finished, we suggest moving the shared_memory.bin file back to NVMe storage,
+because:
+- it doesn't make much performance difference once hived is in sync
+- you'll be able to have your zfs snapshots include your shared memory file
+- you won't be forced to replay if the power goes out
+
+To do this:
+
+- take down the stack (`docker compose down`).
+- copy the shared memory: `sudo cp /mnt/haf_shared_mem/shared_memory.bin /haf-pool/haf-datadir/blockchain`
+- destroy the ramdisk: `sudo umount /mnt/haf_shared_mem`
+- update the `.env` file's location: `HAF_SHM_DIRECTORY="${TOP_LEVEL_DATASET_MOUNTPOINT}/blockchain"`
+- bring the stack back up (`docker compose up -d`)
+
+### Initializing from a snapshot
+
+If you're starting with one of our snapshots, the process of restoring the snapshots will create the correct
+datasets with the correct options set.
+
+First, download the snapshot file from: TODO: http://xxxxxx
+
+Since these snapshots are huge, it's best to download the snapshot file to a different disk (a magnetic
+HDD will be fine for this) that has enough free space for the snapshot first, then restore it to the ZFS pool.
+This lets you easily resume the download if your transfer is interrupted.  If you download directly to
+the ZFS pool, any interruption would require you to start the download from the beginning.
+
+```
+wget -c https://whatever.net/snapshot_filename
+```
+If the transfer gets interrupted, run the same command again to resume.
+
+Then, to restore the snapshot, run:
+```
+sudo zfs recv -d -v haf-pool < snapshot_filename
+```
+
+## Launch procedure
+
+---
+
+start/stop HAF instance based on profiles enabled in your `.env` file
+
+```
+docker compose up -d
+
+docker compose logs -f hivemind-block-processing # tail the hivemind sync logs to the console
+docker compose down hivemind-block-processing # shut down just the hivemind sync process
+docker compose up -d hivemind-block-processing # bring hivemind sync process back up
+
+docker compose down # shut down all containers
+```
+
+This will start or stop all services selected by the profiles you have
+enabled in the `.env` file's `COMPOSE_PROFILES` variable.
+
+Currently available profiles are:
+- `core`: the minimal HAF system of a database and hived
+- `admin`: useful tools for administrating HAF: pgadmin, pghero
+- `apps`: core HAF apps: hivemind, HAfAH, haf-block-explorer
+- `servers`: services for routing/caching API calls: haproxy, jussi,varnish
+
+# Observing node startup
+
+After you start your HAF instance, hived will need some time to catch up to the head block
+of the Hive blockchain (typically a few minutes or less if you started from a snapshot,
+otherwise it will take many hours or even days depending on your hardware). You can monitor
+this process using: `docker compose logs -f haf`
+
+If syncing or replaying for the first time, HAF will delay creating indexes on its tables until the blockchain data has mostly been added to the database. This means there will be a noticeable delay near the end of the catchup period while these indexes get created. Even on a fast machine this post-sync/replay process currently takes over 2 hours to create the indexes, and another two hours to cluster the account_operations table, so be patient. Do not interrupt the process or your database will be left in an invalid state and might require another full replay.
+
+If you enabled the "admin" profile, you can use pghero's "Live Queries" view to monitor this process (e.g https://your_server/admin/pghero/live_queries). If not, you can still observe the cpu and disk io usage by postgresql during this process if you run a tool like htop.
+
+# After startup: Monitoring services and troubleshooting failures on your API node
+
+Haproxy can be used to monitor the state of the various services on your HAF server:
+`https://your_server_name/admin/haproxy/`
+
+If you see a service is down, you can use an appropriate `docker compose log` command to
+diagnose the issue. When diagnosing issues, keep in mind that several services depend on other services
+(for example, all haf apps depend on the hived service) so start by checking the health of the lowest level
+services.
+
+You can diagnose API performance problems using pgAdmin and PgHero. pgAdmin is best for diagnosing severe problems (e.g. locked tables, etc) whereas PgHero is typically best for profiling to determine what queries are loading down your server and can potentially be optimized.
+
+https://your_server_name/admin/
+
+# Creating a ZFS snapshot to backup your node
+Creating snapshots is fast and easy:
+
+```
+docker compose down  #shut down haf
+./snapshot_zfs_datasets.sh 20231023T1831Z-haf-only # where 20231023T1831Z-haf-only is an example snapshot name
+docker compose up -d
+```
+Note: snapshot_zfs_datasets.sh unmounts the HAF datasets, takes a snapshot, and remounts them. Since it unmounts the datasets, the script will fail if you have anything accessing the datasets. In particular, be sure you don't have any terminals open with a current working directory set to those datasets. In theory, the script shouldn't have to unmount the datasets before taking the snapshot, but we have occassionally encountered issues where the snapshots didn't get all needed data.
+
+# Deleting Hivemind data from your database (or a similar app's data)
+
+You may want to remove the Hivemind app's data from your database -- either because you no longer
+need it and want to free the space, or because you want want to replay your Hivemind app from
+scratch, which is required for some upgrades.
+
+To delete the data:
+- stop Hivemind, but leave the rest of the stack running: `docker compose down hivemind-install hivemind-block-processing hivemind-server`
+- run the uninstall script: `docker compose --profile=hivemind-uninstall up`
+- you'll see the results of a few sql statements scroll by, and it should exit after a few seconds
+
+The Hivemind data is now gone.
+
+If you're uninstalling Hivemind permanently, then remember to remove the `hivemind` profile from your `.env` file's `COMPOSE_PROFILES` line so it doesn't start automatically next time you do a `docker compose up -d`.
+
+If you're upgrading to a new version of hivemind:
+- if you're upgrading to a pre-release version, you'll need to set `HIVEMIND_INSTANCE_VERSION` in your `.env` file to the correct tag for the version you want to run.  If you're just upgrading to a new release version (the ones tagged `haf_api_node`), you can leave this alone.
+- run `docker compose pull` to grab the new version
+- run `docker compose up -d` to bring up all services.  This should run hivemind's install, then launch the block processing container.
+- you can monitor Hivemind's sync process by watching the logs from `docker compose logs -f hivemind-block-processing`.  In a few short days, your Hivemind app should be fully synced and ready to handle API requests.
diff --git a/_nodeop/index.md b/_nodeop/index.md
index a7aea1c5..8cc29bf1 100644
--- a/_nodeop/index.md
+++ b/_nodeop/index.md
@@ -1,6 +1,6 @@
 ---
 title: titles.node_operation
-section: Node Operation
+section: titles.node_operation
 exclude: true
 exclude_in_index: true
 canonical_url: .
@@ -15,7 +15,8 @@ canonical_url: .
         {% unless doc.exclude_in_index %}
           <li>
             <a href="{{ doc.id | relative_url }}.html">{% t doc.title %}</a>
-            <span class="overview">{{ doc.description | markdownify }}</span>
+            {% capture description %}{% t doc.description %}{% endcapture %}
+            <span class="overview">{{ description | markdownify }}</span>
           </li>
         {% endunless %}
       {% endfor %}
diff --git a/_nodeop/jussi-multiplexer.md b/_nodeop/jussi-multiplexer.md
index 87e712b9..7ae16dee 100644
--- a/_nodeop/jussi-multiplexer.md
+++ b/_nodeop/jussi-multiplexer.md
@@ -1,7 +1,7 @@
 ---
 title: titles.jussi_multiplexer
-position: 5
-description: Optimize your local applications with jussi
+position: 7
+description: descriptions.jussi_multiplexer
 exclude: true
 layout: full
 canonical_url: jussi-multiplexer.html
diff --git a/_nodeop/monit.md b/_nodeop/monit.md
index 1639a6e4..265dcddd 100644
--- a/_nodeop/monit.md
+++ b/_nodeop/monit.md
@@ -1,8 +1,7 @@
 ---
 title: titles.monit
-position: 6
-description: |
-  Use monit as a utility for managing and monitoring hived.
+position: 9
+description: descriptions.monit
 exclude: true
 layout: full
 canonical_url: monit.html
diff --git a/_nodeop/node-cli.md b/_nodeop/node-cli.md
index 92ae49b7..dcbf1c06 100644
--- a/_nodeop/node-cli.md
+++ b/_nodeop/node-cli.md
@@ -1,7 +1,7 @@
 ---
 title: titles.node_cli
 position: 2
-description: All options available to `hived` at command line interface
+description: descriptions.node_cli
 exclude: true
 layout: full
 canonical_url: node-cli.html
@@ -29,7 +29,6 @@ The following are *only* available as command-line options.
 * [`statsd-record-on-replay`](#statsd-record-on-replay)
 * [`transaction-status-rebuild-state`](#transaction-status-rebuild-state)
 * [`p2p-force-validate`](#p2p-force-validate)
-* ~~[`force-validate`](#force-validate)~~
 * [`replay-blockchain`](#replay-blockchain)
 * [`force-open`](#force-open)
 * [`resync-blockchain`](#resync-blockchain)
@@ -39,7 +38,6 @@ The following are *only* available as command-line options.
 * [`dump-memory-details`](#dump-memory-details)
 * [`check-locks`](#check-locks)
 * [`validate-database-invariants`](#validate-database-invariants)
-* ~~[`database-cfg`](#database-cfg)~~
 * [`account-history-rocksdb-immediate-import`](#account-history-rocksdb-immediate-import)
 * [`exit-after-replay`](#exit-after-replay)
 * [`force-replay`](#force-replay)
@@ -91,16 +89,6 @@ Force validation of all transactions.
 --p2p-force-validate
 ```
 
-### `force-validate`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
-
-Force validation of all transactions.
-
-**Deprecated in favor of:** `p2p-force-validate`
-
-```bash
---force-validate
-```
-
 ### `replay-blockchain`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
 
 Clear chain database and replay all blocks.
diff --git a/_nodeop/node-config.md b/_nodeop/node-config.md
index 5b4cf237..162ef568 100644
--- a/_nodeop/node-config.md
+++ b/_nodeop/node-config.md
@@ -1,7 +1,7 @@
 ---
 title: titles.node_config
 position: 1
-description: All `config.ini` options available to `hived`
+description: descriptions.node_config
 exclude: true
 layout: full
 canonical_url: node-config.html
@@ -30,11 +30,8 @@ Also refer to:
 * [`backtrace`](#backtrace)
 * [`plugin`](#plugin)
 * [`account-history-track-account-range`](#account-history-track-account-range)
-* ~~[`track-account-range`](#track-account-range)~~
 * [`account-history-whitelist-ops`](#account-history-whitelist-ops)
-* ~~[`history-whitelist-ops`](#history-whitelist-ops)~~
 * [`account-history-blacklist-ops`](#account-history-blacklist-ops)
-* ~~[`history-blacklist-ops`](#history-blacklist-ops)~~
 * [`history-disable-pruning`](#history-disable-pruning)
 * [`account-history-rocksdb-path`](#account-history-rocksdb-path)
 * [`account-history-rocksdb-track-account-range`](#account-history-rocksdb-track-account-range)
@@ -52,7 +49,6 @@ Also refer to:
 * [`checkpoint`](#checkpoint)
 * [`flush-state-interval`](#flush-state-interval)
 * [`debug-node-edit-script`](#debug-node-edit-script)
-* ~~[`edit-script`](#edit-script)~~
 * [`follow-max-feed-size`](#follow-max-feed-size)
 * [`follow-start-feeds`](#follow-start-feeds)
 * [`log-json-rpc`](#log-json-rpc)
@@ -60,7 +56,6 @@ Also refer to:
 * [`market-history-buckets-per-size`](#market-history-buckets-per-size)
 * [`p2p-endpoint`](#p2p-endpoint)
 * [`p2p-max-connections`](#p2p-max-connections)
-* ~~[`seed-node`](#seed-node)~~
 * [`p2p-seed-node`](#p2p-seed-node)
 * [`p2p-parameters`](#p2p-parameters)
 * [`p2p-user-agent`](#p2p-user-agent)
@@ -79,7 +74,6 @@ Also refer to:
 * [`webserver-http-endpoint`](#webserver-http-endpoint)
 * [`webserver-ws-endpoint`](#webserver-ws-endpoint)
 * [`webserver-unix-endpoint`](#webserver-unix-endpoint)
-* ~~[`rpc-endpoint`](#rpc-endpoint)~~
 * [`webserver-thread-pool-size`](#webserver-thread-pool-size)
 * [`enable-stale-production`](#enable-stale-production)
 * [`required-participation`](#required-participation)
@@ -212,17 +206,6 @@ account-history-track-account-range = ["c", "f"]
 account-history-track-account-range =
 ```
 
-### `track-account-range`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
-
-Defines a range of accounts to track as a JSON pair `["from","to"]`.  Can be specified multiple times.
-
-**Deprecated in favor of:** `account-history-track-account-range`
-
-```ini
-# default
-track-account-range =
-```
-
 ### `account-history-whitelist-ops`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
 
 Defines a list of operations which will be explicitly logged.
@@ -239,17 +222,6 @@ account-history-whitelist-ops =
 account-history-whitelist-ops = transfer_operation transfer_to_vesting_operation withdraw_vesting_operation interest_operation transfer_to_savings_operation transfer_from_savings_operation cancel_transfer_from_savings_operation escrow_transfer_operation escrow_approve_operation escrow_dispute_operation escrow_release_operation fill_convert_request_operation fill_order_operation claim_reward_balance_operation author_reward_operation curation_reward_operation fill_vesting_withdraw_operation fill_transfer_from_savings_operation delegate_vesting_shares_operation return_vesting_delegation_operation comment_benefactor_reward_operation
 ```
 
-### `history-whitelist-ops`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
-
-Defines a list of operations which will be explicitly logged.
-
-**Deprecated in favor of:** `account-history-whitelist-ops`
-
-```ini
-# default
-history-whitelist-ops =
-```
-
 ### `account-history-blacklist-ops`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
 
 Defines a list of operations which will be explicitly ignored.
@@ -261,17 +233,6 @@ See: [#301]({{ 'https://github.com/steemit/steem/issues/301' | archived_url }}),
 account-history-blacklist-ops =
 ```
 
-### `history-blacklist-ops`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
-
-Defines a list of operations which will be explicitly ignored.
-
-**Deprecated in favor of:** `account-history-blacklist-ops`
-
-```ini
-# 
-history-blacklist-ops =
-```
-
 ### `history-disable-pruning`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
 
 Disables automatic account history trimming.
@@ -589,16 +550,6 @@ See: [`debug_node_plugin.md`](https://gitlab.syncad.com/hive/hive/-/blob/master/
 debug-node-edit-script =
 ```
 
-### `edit-script`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
-
-Database edits to apply on startup (may specify multiple times).
-
-**Deprecated in favor of:** `debug-node-edit-script`, see: [#1297]({{ 'https://github.com/steemit/steem/issues/1297' | archived_url }})
-
-```ini
-edit-script =
-```
-
 ### `follow-max-feed-size`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
 
 Set the maximum size of cached feed for an account.
@@ -679,16 +630,6 @@ Maxmimum number of incoming connections on P2P endpoint.
 p2p-max-connections =
 ```
 
-### `seed-node`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
-
-The IP address and port of a remote peer to sync with.
-
-**Deprecated in favor of:** `p2p-seed-node`, see: [#1314]({{ 'https://github.com/steemit/steem/issues/1314' | archived_url }})
-
-```ini
-seed-node =
-```
-
 ### `p2p-seed-node`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
 
 The IP address and port of a remote peer to sync with.  Multiple allowed.
@@ -978,16 +919,6 @@ See: [#3205]({{ 'https://github.com/steemit/steem/pull/3205' | archived_url }})
 webserver-unix-endpoint = /tmp/hived.sock
 ```
 
-### `rpc-endpoint`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
-
-Local http and websocket endpoint for webserver requests.
-
-**Deprecated in favor of:** `webserver-http-endpoint` and `webserver-ws-endpoint`, see: [#1297]({{ 'https://github.com/steemit/steem/issues/1297' | archived_url }})
-
-```ini
-rpc-endpoint =
-```
-
 ### `webserver-thread-pool-size`<a style="float: right" href="#sections"><i class="fas fa-chevron-up fa-sm" /></a>
 
 Number of threads used to handle queries.  Default: 32.
diff --git a/_nodeop/plugin-and-api-list.md b/_nodeop/plugin-and-api-list.md
index 8d3c20ba..c49d2546 100644
--- a/_nodeop/plugin-and-api-list.md
+++ b/_nodeop/plugin-and-api-list.md
@@ -1,7 +1,7 @@
 ---
 title: titles.plug_and_api_list
-position: 3
-description: Run a `hived` node with your preferred APIs.
+position: 4
+description: descriptions.plug_and_api_list
 exclude: true
 layout: full
 canonical_url: plugin-and-api-list.html
diff --git a/_nodeop/seed-node.md b/_nodeop/seed-node.md
index be1cf0ac..164a767a 100644
--- a/_nodeop/seed-node.md
+++ b/_nodeop/seed-node.md
@@ -1,8 +1,7 @@
 ---
 title: titles.seed_node
-position: 7
-description: |
-  Setting up a seed node.
+position: 10
+description: descriptions.seed_node
 exclude: true
 layout: full
 canonical_url: seed-node.html
diff --git a/_nodeop/setting-up-a-testnet.md b/_nodeop/setting-up-a-testnet.md
index 9f05ea8c..80a91858 100644
--- a/_nodeop/setting-up-a-testnet.md
+++ b/_nodeop/setting-up-a-testnet.md
@@ -1,8 +1,7 @@
 ---
 title: titles.setting_up_testnet
-position: 9
-description: |
-  "Quick-start" for deploying a Hive-based Testnet.
+position: 13
+description: descriptions.setting_up_testnet
 exclude: true
 layout: full
 canonical_url: setting-up-a-testnet.html
diff --git a/_nodeop/using-hivemind.md b/_nodeop/using-hivemind.md
index 163869de..91f820fe 100644
--- a/_nodeop/using-hivemind.md
+++ b/_nodeop/using-hivemind.md
@@ -1,7 +1,7 @@
 ---
 title: titles.using_hivemind
-position: 4
-description: Hivemind setup and API functionality
+position: 6
+description: descriptions.using_hivemind
 exclude: true
 layout: full
 canonical_url: using-hivemind.html
diff --git a/_services/jussi.md b/_services/jussi.md
index 2d25d968..1a321242 100644
--- a/_services/jussi.md
+++ b/_services/jussi.md
@@ -1,7 +1,7 @@
 ---
 title: titles.jussi
 position: 6
-description: A reverse proxy that forwards json-rpc requests.
+description: descriptions.jussi
 canonical_url: jussi.html
 ---
 
diff --git a/_testnet/tools.md b/_testnet/tools.md
index 4d7143aa..a256df37 100644
--- a/_testnet/tools.md
+++ b/_testnet/tools.md
@@ -1,5 +1,5 @@
 ---
-title: Tools
+title: titles.tools
 position: 1
 description: Directory of community testnet tools
 ---
diff --git a/_tutorials/index.md b/_tutorials/index.md
index 3dfc148c..4079a6a9 100644
--- a/_tutorials/index.md
+++ b/_tutorials/index.md
@@ -1,6 +1,6 @@
 ---
 title: titles.tutorials
-section: Tutorials
+section: titles.tutorials
 exclude: true
 canonical_url: .
 ---
diff --git a/apidefinitions.html b/apidefinitions.html
index 6576647e..f4107ad9 100644
--- a/apidefinitions.html
+++ b/apidefinitions.html
@@ -1,6 +1,6 @@
 ---
 title: titles.api-def
-section: API Definitions
+section: titles.api-def
 namespace: apidefinitions
 permalink: /apidefinitions/
 ---
@@ -19,7 +19,7 @@ permalink: /apidefinitions/
 				</a>
 			</h3>
 			{% if doc.description %}
-				<p class="description">{{doc.description}}</p>
+				<p class="description">{% t doc.description %}</p>
 			{% endif %}
 
 			{{ doc.content | replace: "<dl>", "<h6>Parameters</h6><dl>" }}
diff --git a/index.html b/index.html
index 4b77c42e..6a9cdc93 100644
--- a/index.html
+++ b/index.html
@@ -20,7 +20,7 @@ title: titles.home
 								</a>
 							</h3>
 							{% if doc.description %}
-								<p class="description">{{doc.description}}</p>
+								<p class="description">{% t doc.description %}</p>
 							{% endif %}
 
 							{{ doc.content | replace: "<dl>", "<h6>Parameters</h6><dl>" }}
diff --git a/layer2.html b/layer2.html
index c2c54cb4..88e3c3e2 100644
--- a/layer2.html
+++ b/layer2.html
@@ -1,6 +1,6 @@
 ---
 title: titles.layer2
-section: Layer2
+section: titles.layer2
 layout: default
 namespace: layer2
 permalink: /layer2/
@@ -20,7 +20,7 @@ permalink: /layer2/
 				</a>
 			</h3>
 			{% if doc.description %}
-				<p class="description">{{doc.description}}</p>
+				<p class="description">{% t doc.description %}</p>
 			{% endif %}
 
 			{{ doc.content | replace: "<dl>", "<h6>Parameters</h6><dl>" }}
diff --git a/quickstart.html b/quickstart.html
index 1d9f0762..da5591c3 100644
--- a/quickstart.html
+++ b/quickstart.html
@@ -1,6 +1,6 @@
 ---
 title: titles.quickstart
-section: Quickstart
+section: titles.quickstart
 layout: default
 namespace: quickstart
 permalink: /quickstart/
@@ -20,7 +20,7 @@ permalink: /quickstart/
 				</a>
 			</h3>
 			{% if doc.description %}
-				<p class="description">{{doc.description}}</p>
+				<p class="description">{% t doc.description %}</p>
 			{% endif %}
 
 			{{ doc.content | replace: "<dl>", "<h6>Parameters</h6><dl>" }}
diff --git a/resources.html b/resources.html
index 7664dc21..7a736808 100644
--- a/resources.html
+++ b/resources.html
@@ -1,6 +1,6 @@
 ---
 title: titles.resources
-section: Resources
+section: titles.resources
 layout: default
 namespace: resources
 permalink: /resources/
@@ -20,7 +20,7 @@ permalink: /resources/
 				</a>
 			</h3>
 			{% if doc.description %}
-				<p class="description">{{doc.description}}</p>
+				<p class="description">{% t doc.description %}</p>
 			{% endif %}
 
 			{{ doc.content | replace: "<dl>", "<h6>Parameters</h6><dl>" }}
diff --git a/services.html b/services.html
index 70b75bd0..c8733efd 100644
--- a/services.html
+++ b/services.html
@@ -1,6 +1,6 @@
 ---
 title: titles.services
-section: Services
+section: titles.services
 layout: default
 namespace: services
 permalink: /services/
@@ -20,7 +20,7 @@ permalink: /services/
 				</a>
 			</h3>
 			{% if doc.description %}
-				<p class="description">{{doc.description}}</p>
+				<p class="description">{% t doc.description %}</p>
 			{% endif %}
 
 			{{ doc.content | replace: "<dl>", "<h6>Parameters</h6><dl>" }}
diff --git a/unused/accounts.md b/unused/accounts.md
deleted file mode 100644
index e69de29b..00000000
diff --git a/unused/authority_and_validation.md b/unused/authority_and_validation.md
deleted file mode 100644
index e69de29b..00000000
diff --git a/unused/blocks_and_transactions.md b/unused/blocks_and_transactions.md
deleted file mode 100644
index e69de29b..00000000
diff --git a/unused/database_api.md b/unused/database_api.md
index 6af85b83..33e9398a 100644
--- a/unused/database_api.md
+++ b/unused/database_api.md
@@ -1,5 +1,5 @@
 ---
-title: Database API
+title: titles.database_api
 position: 2
 ---
 
diff --git a/unused/docker.md b/unused/docker.md
index 68d035ac..1dbe0142 100644
--- a/unused/docker.md
+++ b/unused/docker.md
@@ -1,5 +1,5 @@
 ---
-title: Docker
+title: titles.docker
 position: 1
 ---
 
diff --git a/unused/globals.md b/unused/globals.md
deleted file mode 100644
index e69de29b..00000000
diff --git a/unused/keys.md b/unused/keys.md
deleted file mode 100644
index e69de29b..00000000
diff --git a/unused/market.md b/unused/market.md
deleted file mode 100644
index e69de29b..00000000
diff --git a/unused/steemd.md b/unused/steemd.md
deleted file mode 100644
index d8acf2d2..00000000
--- a/unused/steemd.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-title: Hived
-position: 3
----
\ No newline at end of file
diff --git a/unused/tags.md b/unused/tags.md
index 8c5884b6..7b89fd09 100644
--- a/unused/tags.md
+++ b/unused/tags.md
@@ -1,5 +1,5 @@
 ---
-title: Tags
+title: titles.tags
 position: 3
 ---
 
diff --git a/unused/tutorials.md b/unused/tutorials.md
index 99c03090..faa2108f 100644
--- a/unused/tutorials.md
+++ b/unused/tutorials.md
@@ -1,5 +1,5 @@
 ---
-title: Tutorials
+title: titles.tutorials
 position: 2
 ---
 
-- 
GitLab