From 859f9321c9d9eaaf0b268afc803ebe5007271f94 Mon Sep 17 00:00:00 2001
From: Marcin Ickiewicz <mickiewicz@syncad.com>
Date: Tue, 7 Jan 2025 15:25:44 +0100
Subject: [PATCH] all update functions in one file in schema hive_update

---
 cmake/sql_extension.cmake                     |  14 +-
 src/hive_fork_manager/Readme.md               |   2 +-
 src/hive_fork_manager/authorization.sql       |  10 +-
 .../context_rewind/table_schema.sql           |   5 +-
 .../hive_fork_manager_save_restore_views.sql  | 165 --------
 ...fork_manager_update_script_generator.sh.in |  22 +-
 src/hive_fork_manager/hived_api.sql           |   4 +-
 .../table_schema_verification.sql             | 189 ---------
 src/hive_fork_manager/update.sql              | 364 ++++++++++++++++++
 .../shared_lib/calculate_schema_hash.sql      |   2 +-
 .../state_providers/hash.sql                  |  20 +-
 tests/integration/functional/tools/common.sh  |   2 +-
 tests/integration/functional/tools/test.sh    |   2 +-
 13 files changed, 399 insertions(+), 402 deletions(-)
 delete mode 100644 src/hive_fork_manager/hive_fork_manager_save_restore_views.sql
 delete mode 100644 src/hive_fork_manager/table_schema_verification.sql
 create mode 100644 src/hive_fork_manager/update.sql

diff --git a/cmake/sql_extension.cmake b/cmake/sql_extension.cmake
index c12776811..71f470e49 100644
--- a/cmake/sql_extension.cmake
+++ b/cmake/sql_extension.cmake
@@ -72,10 +72,8 @@ CONFIGURE_FILE( "${CMAKE_CURRENT_SOURCE_DIR}/hive_fork_manager_update_script_gen
   "${extension_path}/hive_fork_manager_update_script_generator.sh" @ONLY)
 
 # Only needed to be able to run update script from ${CMAKE_CURRENT_SOURCE_DIR} dir
-CONFIGURE_FILE( "${CMAKE_CURRENT_SOURCE_DIR}/hive_fork_manager_save_restore_views.sql"
-  "${extension_path}/hive_fork_manager_save_restore_views.sql" @ONLY)
-CONFIGURE_FILE( "${CMAKE_CURRENT_SOURCE_DIR}/table_schema_verification.sql"
-        "${extension_path}/table_schema_verification.sql" @ONLY)
+CONFIGURE_FILE( "${CMAKE_CURRENT_SOURCE_DIR}/update.sql"
+        "${extension_path}/update.sql" @ONLY)
 
 MESSAGE( STATUS "CONFIGURING the control file: ${CMAKE_BINARY_DIR}/extensions/${EXTENSION_NAME}/hive_fork_manager.control" )
 
@@ -104,13 +102,7 @@ INSTALL ( FILES "${extension_path}/hive_fork_manager_update_script_generator.sh"
           GROUP_EXECUTE GROUP_READ
           WORLD_EXECUTE WORLD_READ
         )
-INSTALL ( FILES "${CMAKE_CURRENT_SOURCE_DIR}/hive_fork_manager_save_restore_views.sql"
-          DESTINATION ${POSTGRES_SHAREDIR}/extension
-          PERMISSIONS OWNER_WRITE OWNER_READ
-          GROUP_EXECUTE GROUP_READ
-          WORLD_EXECUTE WORLD_READ
-        )
-INSTALL ( FILES "${CMAKE_CURRENT_SOURCE_DIR}/table_schema_verification.sql"
+INSTALL ( FILES "${CMAKE_CURRENT_SOURCE_DIR}/update.sql"
         DESTINATION ${POSTGRES_SHAREDIR}/extension
         PERMISSIONS OWNER_WRITE OWNER_READ
         GROUP_EXECUTE GROUP_READ
diff --git a/src/hive_fork_manager/Readme.md b/src/hive_fork_manager/Readme.md
index 3b14e197d..4ae6357a9 100644
--- a/src/hive_fork_manager/Readme.md
+++ b/src/hive_fork_manager/Readme.md
@@ -594,7 +594,7 @@ Disables triggers attached to a register table. It is useful for processing irre
 ##### hive.get_impacted_accounts( operation_body )
 Returns list of accounts ( their names ) impacted by the operation. 
 
-###### hive.calculate_schema_hash()
+###### hive-update.calculate_schema_hash()
 Calculates hash for group of tables in hafd schema, used by hive.create_database_hash.
 ###### hive.create_database_hash()
 Used in update procedure, creates database hash.
diff --git a/src/hive_fork_manager/authorization.sql b/src/hive_fork_manager/authorization.sql
index eabef59f6..ca665b1f0 100644
--- a/src/hive_fork_manager/authorization.sql
+++ b/src/hive_fork_manager/authorization.sql
@@ -194,12 +194,12 @@ GRANT EXECUTE ON FUNCTION
     , hafd._operation_from_jsonb(jsonb)
     , hafd.operation_to_jsontext(hafd.operation)
     , hafd.operation_from_jsontext(TEXT)
-    --, hive.create_database_hash()
-    --, hive.calculate_schema_hash()
+    --, hive_update.create_database_hash()
+    --, hive_update.calculate_schema_hash()
     , hive.all_indexes_have_status(_status hafd.index_status)
-    --, hive.calculate_table_schema_hash(schema_name TEXT,_table_name TEXT)
-    --, hive.calculate_state_provider_hash(_provider hafd.state_providers )
-    --, hive.calculate_state_provider_hashes()
+    --, hive_update.calculate_table_schema_hash(schema_name TEXT,_table_name TEXT)
+    --, hive_update.calculate_state_provider_hash(_provider hafd.state_providers )
+    --, hive_update.calculate_state_provider_hashes()
     , hive.are_any_indexes_missing()
     , hive.are_indexes_restored()
     , hive.are_fk_dropped()
diff --git a/src/hive_fork_manager/context_rewind/table_schema.sql b/src/hive_fork_manager/context_rewind/table_schema.sql
index 6bc4afa30..b0224f319 100644
--- a/src/hive_fork_manager/context_rewind/table_schema.sql
+++ b/src/hive_fork_manager/context_rewind/table_schema.sql
@@ -5,8 +5,5 @@ CREATE TABLE IF NOT EXISTS hafd.table_schema(
 
 SELECT pg_catalog.pg_extension_config_dump('hafd.table_schema', '');
 
-CREATE TYPE hafd.state_provider_and_hash AS(
-    provider hafd.state_providers,
-    hash TEXT
-);
+
 
diff --git a/src/hive_fork_manager/hive_fork_manager_save_restore_views.sql b/src/hive_fork_manager/hive_fork_manager_save_restore_views.sql
deleted file mode 100644
index 4e489b87f..000000000
--- a/src/hive_fork_manager/hive_fork_manager_save_restore_views.sql
+++ /dev/null
@@ -1,165 +0,0 @@
-/**
-Easy way to save and recreate table or view dependencies, when you need to alter
-something in them.
-See http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/.
-Enhanced by Wojciech Barcik wbarcik@syncad.com (handling of rules).
-Modified to also store and restore object definition itself.
-*/
-DROP SCHEMA IF EXISTS hive_update CASCADE;
-CREATE SCHEMA hive_update;
-
-CREATE OR REPLACE FUNCTION hive_update.deps_save_and_drop_dependencies(
-    p_view_schema character varying,
-    p_view_name character varying,
-    drop_relation BOOLEAN DEFAULT true
-  )
-  RETURNS void
-  LANGUAGE 'plpgsql'
-  COST 100
-  VOLATILE
-AS $BODY$
-/**
-From http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/
-@wojtek added DDL for rules.
-
-Saves view and it's dependencies in table `deps_saved_ddl`, for
-future restoration. Use function `deps_restore_dependencies` to restore them.
-*/
-declare
-  v_curr record;
-begin
-for v_curr in
-(
-  select obj_schema, obj_name, obj_type from
-  (
-  with recursive recursive_deps(obj_schema, obj_name, obj_type, depth) as
-  (
-    SELECT p_view_schema COLLATE "C", p_view_name COLLATE "C", relkind::VARCHAR, 0
-      FROM pg_class
-      JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
-      WHERE pg_namespace.nspname = p_view_schema AND pg_class.relname = p_view_name
-    union
-    select dep_schema::varchar, dep_name::varchar, dep_type::varchar,
-        recursive_deps.depth + 1 from
-    (
-      select ref_nsp.nspname ref_schema, ref_cl.relname ref_name,
-          rwr_cl.relkind dep_type, rwr_nsp.nspname dep_schema,
-          rwr_cl.relname dep_name
-      from pg_depend dep
-      join pg_class ref_cl on dep.refobjid = ref_cl.oid
-      join pg_namespace ref_nsp on ref_cl.relnamespace = ref_nsp.oid
-      join pg_rewrite rwr on dep.objid = rwr.oid
-      join pg_class rwr_cl on rwr.ev_class = rwr_cl.oid
-      join pg_namespace rwr_nsp on rwr_cl.relnamespace = rwr_nsp.oid
-      where dep.deptype = 'n'
-      and dep.classid = 'pg_rewrite'::regclass
-    ) deps
-    join recursive_deps on deps.ref_schema = recursive_deps.obj_schema
-        and deps.ref_name = recursive_deps.obj_name
-    where (deps.ref_schema != deps.dep_schema or deps.ref_name != deps.dep_name)
-  )
-  select obj_schema, obj_name, obj_type, depth
-  from recursive_deps
-  --  where depth > 0
-  ) t
-  group by obj_schema, obj_name, obj_type
-  order by max(depth) desc
-) loop
-
-  insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
-  select p_view_schema, p_view_name, 'COMMENT ON ' ||
-  case
-    when c.relkind = 'v' then 'VIEW'
-    when c.relkind = 'm' then 'MATERIALIZED VIEW'
-  else ''
-  end
-  || ' ' || n.nspname || '.' || c.relname || ' IS '''
-      || replace(d.description, '''', '''''') || ''';'
-  from pg_class c
-  join pg_namespace n on n.oid = c.relnamespace
-  join pg_description d on d.objoid = c.oid and d.objsubid = 0
-  where n.nspname = v_curr.obj_schema and c.relname = v_curr.obj_name
-      and d.description is not null;
-
-  insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
-  select p_view_schema, p_view_name, 'COMMENT ON COLUMN ' || n.nspname || '.'
-      || c.relname || '.' || a.attname || ' IS '''
-      || replace(d.description, '''', '''''') || ''';'
-  from pg_class c
-  join pg_attribute a on c.oid = a.attrelid
-  join pg_namespace n on n.oid = c.relnamespace
-  join pg_description d on d.objoid = c.oid and d.objsubid = a.attnum
-  where n.nspname = v_curr.obj_schema and c.relname = v_curr.obj_name
-      and d.description is not null;
-
-  insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
-  select p_view_schema, p_view_name, 'GRANT ' || privilege_type || ' ON '
-      || table_schema || '.' || table_name || ' TO ' || grantee
-  from information_schema.role_table_grants
-  where table_schema = v_curr.obj_schema and table_name = v_curr.obj_name;
-
-  if v_curr.obj_type = 'v' then
-
-    insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
-    select p_view_schema, p_view_name, definition
-    from pg_catalog.pg_rules
-    where schemaname = v_curr.obj_schema and tablename = v_curr.obj_name;
-
-    insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
-    select p_view_schema, p_view_name, 'CREATE VIEW '
-        || v_curr.obj_schema || '.' || v_curr.obj_name || ' AS ' || view_definition
-    from information_schema.views
-    where table_schema = v_curr.obj_schema and table_name = v_curr.obj_name;
-
-  elsif v_curr.obj_type = 'm' then
-    insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
-    select p_view_schema, p_view_name, 'CREATE MATERIALIZED VIEW '
-        || v_curr.obj_schema || '.' || v_curr.obj_name || ' AS ' || definition
-    from pg_matviews
-    where schemaname = v_curr.obj_schema and matviewname = v_curr.obj_name;
-  end if;
-
-  if drop_relation = true then
-    execute 'DROP ' ||
-    case
-      when v_curr.obj_type = 'v' then 'VIEW'
-      when v_curr.obj_type = 'm' then 'MATERIALIZED VIEW'
-    end
-    || ' ' || v_curr.obj_schema || '.' || v_curr.obj_name;
-  end if;
-
-end loop;
-end;
-$BODY$;
-
-
-CREATE OR REPLACE FUNCTION hive_update.deps_restore_dependencies(
-    p_view_schema character varying,
-    p_view_name character varying
-  )
-  RETURNS void
-  LANGUAGE 'plpgsql'
-  COST 100
-  VOLATILE
-AS $BODY$
-/**
-From http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/
-
-Restores dependencies dropped by function `deps_save_and_drop_dependencies`.
-*/
-declare
-  v_curr record;
-begin
-for v_curr in
-(
-  select deps_ddl_to_run
-  from hafd.deps_saved_ddl
-  where deps_view_schema = p_view_schema and deps_view_name = p_view_name
-  order by deps_id desc
-) loop
-  execute v_curr.deps_ddl_to_run;
-end loop;
-delete from hafd.deps_saved_ddl
-where deps_view_schema = p_view_schema and deps_view_name = p_view_name;
-end;
-$BODY$;
diff --git a/src/hive_fork_manager/hive_fork_manager_update_script_generator.sh.in b/src/hive_fork_manager/hive_fork_manager_update_script_generator.sh.in
index a091517b1..a68016827 100755
--- a/src/hive_fork_manager/hive_fork_manager_update_script_generator.sh.in
+++ b/src/hive_fork_manager/hive_fork_manager_update_script_generator.sh.in
@@ -81,14 +81,14 @@ POSTGRES_EXTENSION_DIR='@POSTGRES_SHAREDIR@/extension'
 DB_NAME_AFTER_UPDATE="upd_${DB_NAME}" # truncate before postgres will do this
 DB_NAME_AFTER_UPDATE="${DB_NAME_AFTER_UPDATE:0:63}"
 save_table_schema() {
-  psql_do -d "$DB_NAME" -o before_update_columns.txt -q -t -A -c "SELECT table_name, table_columns FROM hive.calculate_schema_hash()"
-  psql_do -d "$DB_NAME" -o before_update_constraints.txt -q -t -A -c "SELECT table_name, table_constraints FROM hive.calculate_schema_hash()"
-  psql_do -d "$DB_NAME" -o before_update_indexes.txt -q -t -A -c "SELECT table_name, table_indexes FROM hive.calculate_schema_hash()"
+  psql_do -d "$DB_NAME" -o before_update_columns.txt -q -t -A -c "SELECT table_name, table_columns FROM hive-update.calculate_schema_hash()"
+  psql_do -d "$DB_NAME" -o before_update_constraints.txt -q -t -A -c "SELECT table_name, table_constraints FROM hive-update.calculate_schema_hash()"
+  psql_do -d "$DB_NAME" -o before_update_indexes.txt -q -t -A -c "SELECT table_name, table_indexes FROM hive-update.calculate_schema_hash()"
   psql_do -d "$DB_NAME" -o before_update_providers.txt -q -t -A -c "SELECT provider, hash FROM hive.calculate_state_provider_hashes()"
 
-  psql_do -d "$DB_NAME_AFTER_UPDATE" -o after_update_columns.txt -q -t -A -c "SELECT table_name, table_columns FROM hive.calculate_schema_hash()"
-  psql_do -d "$DB_NAME_AFTER_UPDATE" -o after_update_constraings.txt -q -t -A -c "SELECT table_name, table_constraints FROM hive.calculate_schema_hash()"
-  psql_do -d "$DB_NAME_AFTER_UPDATE" -o after_update_indexes.txt -q -t -A -c "SELECT table_name, table_indexes FROM hive.calculate_schema_hash()"
+  psql_do -d "$DB_NAME_AFTER_UPDATE" -o after_update_columns.txt -q -t -A -c "SELECT table_name, table_columns FROM hive-update.calculate_schema_hash()"
+  psql_do -d "$DB_NAME_AFTER_UPDATE" -o after_update_constraings.txt -q -t -A -c "SELECT table_name, table_constraints FROM hive-update.calculate_schema_hash()"
+  psql_do -d "$DB_NAME_AFTER_UPDATE" -o after_update_indexes.txt -q -t -A -c "SELECT table_name, table_indexes FROM hive-update.calculate_schema_hash()"
   psql_do -d "$DB_NAME_AFTER_UPDATE" -o after_update_providers.txt -q -t -A -c "SELECT provider, hash FROM hive.calculate_state_provider_hashes()"
 }
 
@@ -96,10 +96,10 @@ verify_table_schema() {
   echo "Attempting to verify if existing table schema is correct..."
   psql_do -a -d postgres -c "CREATE DATABASE $DB_NAME_AFTER_UPDATE WITH OWNER $DB_ADMIN;"
   psql_do -a -d "$DB_NAME_AFTER_UPDATE" -c 'CREATE EXTENSION hive_fork_manager CASCADE;'
-  psql_do -d "$DB_NAME_AFTER_UPDATE" -q -t -A -f "$SCRIPTPATH/table_schema_verification.sql"
+  psql_do -d "$DB_NAME_AFTER_UPDATE" -q -t -A -f "$SCRIPTPATH/update.sql"
 
-  BEFORE_UPDATE=$(psql_do -d "$DB_NAME" -t -A -c "SELECT schema_hash FROM hive.create_database_hash()")
-  AFTER_UPDATE=$(psql_do -d "$DB_NAME_AFTER_UPDATE" -t -A -c "SELECT schema_hash FROM hive.create_database_hash()")
+  BEFORE_UPDATE=$(psql_do -d "$DB_NAME" -t -A -c "SELECT schema_hash FROM hive_update.create_database_hash()")
+  AFTER_UPDATE=$(psql_do -d "$DB_NAME_AFTER_UPDATE" -t -A -c "SELECT schema_hash FROM hive_update.create_database_hash()")
   if [ "$BEFORE_UPDATE" = "$AFTER_UPDATE" ]; then
     echo "The table schema is correct, verification completed."
     echo "Dropping temporary database"
@@ -248,10 +248,8 @@ check_functions_were_updated() {
   echo "End of Checking that all C functions were properly updated..."
 }
 
-psql_do -d "$DB_NAME" -q -t -A -f "$SCRIPTPATH/hive_fork_manager_save_restore_views.sql"
-
 # inject newer version of hash computation
-psql_do -d "$DB_NAME" -q -t -A -f "$SCRIPTPATH/table_schema_verification.sql"
+psql_do -d "$DB_NAME" -q -t -A -f "$SCRIPTPATH/update.sql"
 
 verify_table_schema
 
diff --git a/src/hive_fork_manager/hived_api.sql b/src/hive_fork_manager/hived_api.sql
index 0f6374743..965efbe4b 100644
--- a/src/hive_fork_manager/hived_api.sql
+++ b/src/hive_fork_manager/hived_api.sql
@@ -451,7 +451,7 @@ BEGIN
     IF EXISTS ( SELECT 1 FROM hafd.events_queue WHERE id = hive.unreachable_event_id() LIMIT 1 ) THEN
         SELECT MAX(eq.id) + 1 FROM hafd.events_queue eq WHERE eq.id != hive.unreachable_event_id() INTO __events_id;
         PERFORM SETVAL( 'hafd.events_queue_id_seq', __events_id, false );
-        -- PERFORM hive.create_database_hash();
+        -- PERFORM hive_update.create_database_hash();
         RETURN;
     END IF;
 
@@ -463,7 +463,7 @@ BEGIN
 
     INSERT INTO hafd.fork(block_num, time_of_fork) VALUES( 1, '2016-03-24 16:05:00'::timestamp ) ON CONFLICT DO NOTHING;
 
-    -- PERFORM hive.create_database_hash();
+    -- PERFORM hive_update.create_database_hash();
 END;
 $BODY$
 ;
diff --git a/src/hive_fork_manager/table_schema_verification.sql b/src/hive_fork_manager/table_schema_verification.sql
deleted file mode 100644
index 54002a595..000000000
--- a/src/hive_fork_manager/table_schema_verification.sql
+++ /dev/null
@@ -1,189 +0,0 @@
-CREATE TYPE hive.verify_table_schema AS(
-   table_name TEXT,
-   table_schema TEXT,
-   table_schema_hash UUID,
-   columns_hash UUID,
-   constraints_hash UUID,
-   indexes_hash UUID,
-   table_columns TEXT,
-   table_constraints TEXT,
-   table_indexes TEXT
-);
-
-
-DROP FUNCTION IF EXISTS hive.calculate_table_schema_hash;
-CREATE FUNCTION hive.calculate_table_schema_hash(schema_name TEXT,_table_name TEXT)
-    RETURNS hive.verify_table_schema
-    LANGUAGE plpgsql
-    STABLE
-AS
-$BODY$
-DECLARE
-    schemarow    hive.verify_table_schema;
-    _columns   TEXT;
-    _constraints   TEXT;
-    _indexes    TEXT;
-BEGIN
-    SELECT string_agg(c.agg_columns, ' | ') AS columns INTO _columns
-    FROM
-        (SELECT
-             array_to_string(
-                     ARRAY[
-                         column_name, data_type], ', ', '* ') as agg_columns
-         FROM
-             information_schema.columns isc
-         WHERE table_name=_table_name AND
-             table_schema=schema_name
-         ORDER BY
-             column_name ASC) c;
-    IF _columns IS NULL THEN
-        _columns = 'EMPTY';
-    END IF;
-
--- concatenation of constraints
-
-    SELECT string_agg(cc.agg_constraints, ' | ') AS columns INTO _constraints
-    FROM
-        (SELECT
-             array_to_string(
-                     ARRAY[
-                         constraint_name, constraint_type, is_deferrable, initially_deferred, enforced], ', ', '* ') as agg_constraints
-         FROM
-             information_schema.table_constraints
-         WHERE table_name=_table_name AND
-             constraint_schema=schema_name AND
-             NOT constraint_type='CHECK'
-         ORDER BY
-             constraint_name ASC) cc;
-    IF _constraints IS NULL THEN
-        _constraints = 'EMPTY';
-    END IF;
-
--- concatenation of indexes
-
-    SELECT string_agg(idx.agg_indexes, ' | ') AS indexes INTO _indexes
-    FROM
-        (SELECT
-             array_to_string(
-                     ARRAY[
-                         t.relname,
-                         i.relname,
-                         a.attname], ', ', '* ') as agg_indexes
-         from
-             pg_class t,
-             pg_class i,
-             pg_index ix,
-             pg_attribute a
-         where
-             t.oid = ix.indrelid
-           and i.oid = ix.indexrelid
-           and a.attrelid = t.oid
-           and a.attnum = ANY(ix.indkey)
-           and t.relkind = 'r'
-           and t.relname like _table_name
-           and t.relnamespace = (SELECT oid from pg_catalog.pg_namespace where nspname=schema_name)
-         order by
-             t.relname,
-             i.relname,
-             a.attname ASC) idx;
-    IF _indexes IS NULL THEN
-        _indexes = 'EMPTY';
-    END IF;
-
-    schemarow.table_name := _table_name;
-    --    schemarow.table_schema := (_columns || _constraints || _indexes);
-    schemarow.table_schema := (_columns);
-    schemarow.table_schema_hash := MD5(_columns || _constraints || _indexes)::uuid;
-    schemarow.columns_hash := MD5(_columns)::uuid;
-    schemarow.constraints_hash := MD5(_constraints)::uuid;
-    schemarow.indexes_hash := MD5(_indexes)::uuid;
-    schemarow.table_columns := _columns;
-    schemarow.table_constraints := _constraints;
-    schemarow.table_indexes := _indexes;
-    RETURN schemarow;
-END;
-$BODY$;
-
-DROP FUNCTION IF EXISTS hive.calculate_state_provider_hash;
-CREATE FUNCTION hive.calculate_state_provider_hash(_provider hafd.state_providers )
-    RETURNS TEXT --md5 of start_provider function
-    LANGUAGE plpgsql
-    STABLE
-AS
-$BODY$
-DECLARE
-    __md5     TEXT;
-BEGIN
-       -- to avoid complications with establish state providers and their tables ( it wil require
-       -- to create artificial context and register state providers into it ), only hash of code
-       -- which creates sp tables is taken into consideration
-       EXECUTE format( 'SELECT MD5(pg_get_functiondef(''hive.start_provider_%s''::regproc))', _provider )
-       INTO __md5;
-       RETURN __md5;
-END;
-$BODY$;
-
-DROP FUNCTION IF EXISTS hive.calculate_state_provider_hashes;
-CREATE FUNCTION hive.calculate_state_provider_hashes()
-    RETURNS SETOF hafd.state_provider_and_hash
-    LANGUAGE plpgsql
-    STABLE
-AS
-$BODY$
-BEGIN
-    RETURN QUERY
-        SELECT
-              sp.* as provider
-            , hive.calculate_state_provider_hash(sp.*) as hash
-        FROM unnest(enum_range(NULL::hafd.state_providers)) as sp;
-END;
-$BODY$;
-
-
-
--- calculate hafd schema hash
-DROP FUNCTION IF EXISTS hive.calculate_schema_hash;
-CREATE FUNCTION hive.calculate_schema_hash()
-    RETURNS SETOF hive.verify_table_schema
-    LANGUAGE plpgsql
-    STABLE
-AS
-$BODY$
-BEGIN
-
-    RETURN QUERY SELECT (hive.calculate_table_schema_hash( 'hafd', table_name)).*
-    FROM information_schema.tables
-    WHERE table_schema = 'hafd'
-    AND table_type = 'BASE TABLE';
-END;
-$BODY$
-;
-
-DROP FUNCTION IF EXISTS hive.create_database_hash();
-CREATE FUNCTION hive.create_database_hash()
-    RETURNS SETOF hafd.table_schema
-    LANGUAGE plpgsql
-    VOLATILE
-AS
-$BODY$
-DECLARE
-    ts hafd.table_schema%ROWTYPE;
-    _tmp TEXT;
-    _provider_hashes TEXT;
-BEGIN
-    TRUNCATE hafd.table_schema;
-
-    SELECT string_agg(table_schema, ' | ') FROM hive.calculate_schema_hash() INTO _tmp;
-
-    SELECT string_agg(provider || hash, ' | ') FROM hive.calculate_state_provider_hashes() INTO _provider_hashes;
-
-    _tmp = _tmp || _provider_hashes;
-    INSERT INTO hafd.table_schema VALUES ('hafd', MD5(_tmp)::uuid);
-
-    ts.schema_name := 'hafd';
-    ts.schema_hash := MD5(_tmp)::uuid;
-RETURN NEXT ts;
-END;
-$BODY$
-;
-
diff --git a/src/hive_fork_manager/update.sql b/src/hive_fork_manager/update.sql
new file mode 100644
index 000000000..4e3d5ba70
--- /dev/null
+++ b/src/hive_fork_manager/update.sql
@@ -0,0 +1,364 @@
+/**
+  The file defines all things which are used only during update
+ */
+
+DROP SCHEMA IF EXISTS hive_update CASCADE;
+CREATE SCHEMA hive_update;
+
+/**
+Easy way to save and recreate table or view dependencies, when you need to alter
+something in them.
+See http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/.
+Enhanced by Wojciech Barcik wbarcik@syncad.com (handling of rules).
+Modified to also store and restore object definition itself.
+*/
+
+CREATE OR REPLACE FUNCTION hive_update.deps_save_and_drop_dependencies(
+    p_view_schema character varying,
+    p_view_name character varying,
+    drop_relation BOOLEAN DEFAULT true
+)
+    RETURNS void
+    LANGUAGE 'plpgsql'
+    COST 100
+    VOLATILE
+AS $BODY$
+/**
+From http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/
+@wojtek added DDL for rules.
+
+Saves view and it's dependencies in table `deps_saved_ddl`, for
+future restoration. Use function `deps_restore_dependencies` to restore them.
+*/
+declare
+    v_curr record;
+begin
+    for v_curr in
+        (
+            select obj_schema, obj_name, obj_type from
+                (
+                    with recursive recursive_deps(obj_schema, obj_name, obj_type, depth) as
+                                       (
+                                           SELECT p_view_schema COLLATE "C", p_view_name COLLATE "C", relkind::VARCHAR, 0
+                                           FROM pg_class
+                                                    JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
+                                           WHERE pg_namespace.nspname = p_view_schema AND pg_class.relname = p_view_name
+                                           union
+                                           select dep_schema::varchar, dep_name::varchar, dep_type::varchar,
+                                                  recursive_deps.depth + 1 from
+                                               (
+                                                   select ref_nsp.nspname ref_schema, ref_cl.relname ref_name,
+                                                          rwr_cl.relkind dep_type, rwr_nsp.nspname dep_schema,
+                                                          rwr_cl.relname dep_name
+                                                   from pg_depend dep
+                                                            join pg_class ref_cl on dep.refobjid = ref_cl.oid
+                                                            join pg_namespace ref_nsp on ref_cl.relnamespace = ref_nsp.oid
+                                                            join pg_rewrite rwr on dep.objid = rwr.oid
+                                                            join pg_class rwr_cl on rwr.ev_class = rwr_cl.oid
+                                                            join pg_namespace rwr_nsp on rwr_cl.relnamespace = rwr_nsp.oid
+                                                   where dep.deptype = 'n'
+                                                     and dep.classid = 'pg_rewrite'::regclass
+                                               ) deps
+                                                   join recursive_deps on deps.ref_schema = recursive_deps.obj_schema
+                                                   and deps.ref_name = recursive_deps.obj_name
+                                           where (deps.ref_schema != deps.dep_schema or deps.ref_name != deps.dep_name)
+                                       )
+                    select obj_schema, obj_name, obj_type, depth
+                    from recursive_deps
+                    --  where depth > 0
+                ) t
+            group by obj_schema, obj_name, obj_type
+            order by max(depth) desc
+        ) loop
+
+            insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+            select p_view_schema, p_view_name, 'COMMENT ON ' ||
+                                               case
+                                                   when c.relkind = 'v' then 'VIEW'
+                                                   when c.relkind = 'm' then 'MATERIALIZED VIEW'
+                                                   else ''
+                                                   end
+                                                   || ' ' || n.nspname || '.' || c.relname || ' IS '''
+                                                   || replace(d.description, '''', '''''') || ''';'
+            from pg_class c
+                     join pg_namespace n on n.oid = c.relnamespace
+                     join pg_description d on d.objoid = c.oid and d.objsubid = 0
+            where n.nspname = v_curr.obj_schema and c.relname = v_curr.obj_name
+              and d.description is not null;
+
+            insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+            select p_view_schema, p_view_name, 'COMMENT ON COLUMN ' || n.nspname || '.'
+                                                   || c.relname || '.' || a.attname || ' IS '''
+                                                   || replace(d.description, '''', '''''') || ''';'
+            from pg_class c
+                     join pg_attribute a on c.oid = a.attrelid
+                     join pg_namespace n on n.oid = c.relnamespace
+                     join pg_description d on d.objoid = c.oid and d.objsubid = a.attnum
+            where n.nspname = v_curr.obj_schema and c.relname = v_curr.obj_name
+              and d.description is not null;
+
+            insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+            select p_view_schema, p_view_name, 'GRANT ' || privilege_type || ' ON '
+                                                   || table_schema || '.' || table_name || ' TO ' || grantee
+            from information_schema.role_table_grants
+            where table_schema = v_curr.obj_schema and table_name = v_curr.obj_name;
+
+            if v_curr.obj_type = 'v' then
+
+                insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+                select p_view_schema, p_view_name, definition
+                from pg_catalog.pg_rules
+                where schemaname = v_curr.obj_schema and tablename = v_curr.obj_name;
+
+                insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+                select p_view_schema, p_view_name, 'CREATE VIEW '
+                                                       || v_curr.obj_schema || '.' || v_curr.obj_name || ' AS ' || view_definition
+                from information_schema.views
+                where table_schema = v_curr.obj_schema and table_name = v_curr.obj_name;
+
+            elsif v_curr.obj_type = 'm' then
+                insert into hafd.deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+                select p_view_schema, p_view_name, 'CREATE MATERIALIZED VIEW '
+                                                       || v_curr.obj_schema || '.' || v_curr.obj_name || ' AS ' || definition
+                from pg_matviews
+                where schemaname = v_curr.obj_schema and matviewname = v_curr.obj_name;
+            end if;
+
+            if drop_relation = true then
+                execute 'DROP ' ||
+                        case
+                            when v_curr.obj_type = 'v' then 'VIEW'
+                            when v_curr.obj_type = 'm' then 'MATERIALIZED VIEW'
+                            end
+                            || ' ' || v_curr.obj_schema || '.' || v_curr.obj_name;
+            end if;
+
+        end loop;
+end;
+$BODY$;
+
+
+CREATE OR REPLACE FUNCTION hive_update.deps_restore_dependencies(
+    p_view_schema character varying,
+    p_view_name character varying
+)
+    RETURNS void
+    LANGUAGE 'plpgsql'
+    COST 100
+    VOLATILE
+AS $BODY$
+/**
+From http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/
+
+Restores dependencies dropped by function `deps_save_and_drop_dependencies`.
+*/
+declare
+    v_curr record;
+begin
+    for v_curr in
+        (
+            select deps_ddl_to_run
+            from hafd.deps_saved_ddl
+            where deps_view_schema = p_view_schema and deps_view_name = p_view_name
+            order by deps_id desc
+        ) loop
+            execute v_curr.deps_ddl_to_run;
+        end loop;
+    delete from hafd.deps_saved_ddl
+    where deps_view_schema = p_view_schema and deps_view_name = p_view_name;
+end;
+$BODY$;
+
+CREATE TYPE hive_update.verify_table_schema AS(
+   table_name TEXT,
+   table_schema TEXT,
+   table_schema_hash UUID,
+   columns_hash UUID,
+   constraints_hash UUID,
+   indexes_hash UUID,
+   table_columns TEXT,
+   table_constraints TEXT,
+   table_indexes TEXT
+);
+
+CREATE TYPE hive_update.state_provider_and_hash AS(
+   provider hafd.state_providers,
+   hash TEXT
+);
+
+DROP FUNCTION IF EXISTS hive_update.calculate_table_schema_hash;
+CREATE FUNCTION hive_update.calculate_table_schema_hash(schema_name TEXT,_table_name TEXT)
+    RETURNS hive_update.verify_table_schema
+    LANGUAGE plpgsql
+    STABLE
+AS
+$BODY$
+DECLARE
+    schemarow    hive_update.verify_table_schema;
+    _columns   TEXT;
+    _constraints   TEXT;
+    _indexes    TEXT;
+BEGIN
+    SELECT string_agg(c.agg_columns, ' | ') AS columns INTO _columns
+    FROM
+        (SELECT
+             array_to_string(
+                     ARRAY[
+                         column_name, data_type], ', ', '* ') as agg_columns
+         FROM
+             information_schema.columns isc
+         WHERE table_name=_table_name AND
+             table_schema=schema_name
+         ORDER BY
+             column_name ASC) c;
+    IF _columns IS NULL THEN
+        _columns = 'EMPTY';
+    END IF;
+
+-- concatenation of constraints
+
+    SELECT string_agg(cc.agg_constraints, ' | ') AS columns INTO _constraints
+    FROM
+        (SELECT
+             array_to_string(
+                     ARRAY[
+                         constraint_name, constraint_type, is_deferrable, initially_deferred, enforced], ', ', '* ') as agg_constraints
+         FROM
+             information_schema.table_constraints
+         WHERE table_name=_table_name AND
+             constraint_schema=schema_name AND
+             NOT constraint_type='CHECK'
+         ORDER BY
+             constraint_name ASC) cc;
+    IF _constraints IS NULL THEN
+        _constraints = 'EMPTY';
+    END IF;
+
+-- concatenation of indexes
+
+    SELECT string_agg(idx.agg_indexes, ' | ') AS indexes INTO _indexes
+    FROM
+        (SELECT
+             array_to_string(
+                     ARRAY[
+                         t.relname,
+                         i.relname,
+                         a.attname], ', ', '* ') as agg_indexes
+         from
+             pg_class t,
+             pg_class i,
+             pg_index ix,
+             pg_attribute a
+         where
+             t.oid = ix.indrelid
+           and i.oid = ix.indexrelid
+           and a.attrelid = t.oid
+           and a.attnum = ANY(ix.indkey)
+           and t.relkind = 'r'
+           and t.relname like _table_name
+           and t.relnamespace = (SELECT oid from pg_catalog.pg_namespace where nspname=schema_name)
+         order by
+             t.relname,
+             i.relname,
+             a.attname ASC) idx;
+    IF _indexes IS NULL THEN
+        _indexes = 'EMPTY';
+    END IF;
+
+    schemarow.table_name := _table_name;
+    --    schemarow.table_schema := (_columns || _constraints || _indexes);
+    schemarow.table_schema := (_columns);
+    schemarow.table_schema_hash := MD5(_columns || _constraints || _indexes)::uuid;
+    schemarow.columns_hash := MD5(_columns)::uuid;
+    schemarow.constraints_hash := MD5(_constraints)::uuid;
+    schemarow.indexes_hash := MD5(_indexes)::uuid;
+    schemarow.table_columns := _columns;
+    schemarow.table_constraints := _constraints;
+    schemarow.table_indexes := _indexes;
+    RETURN schemarow;
+END;
+$BODY$;
+
+DROP FUNCTION IF EXISTS hive_update.calculate_state_provider_hash;
+CREATE FUNCTION hive_update.calculate_state_provider_hash(_provider hafd.state_providers )
+    RETURNS TEXT --md5 of start_provider function
+    LANGUAGE plpgsql
+    STABLE
+AS
+$BODY$
+DECLARE
+    __md5     TEXT;
+BEGIN
+       -- to avoid complications with establish state providers and their tables ( it wil require
+       -- to create artificial context and register state providers into it ), only hash of code
+       -- which creates sp tables is taken into consideration
+       EXECUTE format( 'SELECT MD5(pg_get_functiondef(''hive.start_provider_%s''::regproc))', _provider )
+       INTO __md5;
+       RETURN __md5;
+END;
+$BODY$;
+
+DROP FUNCTION IF EXISTS hive_update.calculate_state_provider_hashes;
+CREATE FUNCTION hive_update.calculate_state_provider_hashes()
+    RETURNS SETOF hive_update.state_provider_and_hash
+    LANGUAGE plpgsql
+    STABLE
+AS
+$BODY$
+BEGIN
+    RETURN QUERY
+        SELECT
+              sp.* as provider
+            , hive_update.calculate_state_provider_hash(sp.*) as hash
+        FROM unnest(enum_range(NULL::hafd.state_providers)) as sp;
+END;
+$BODY$;
+
+
+
+-- calculate hafd schema hash
+DROP FUNCTION IF EXISTS hive_update.calculate_schema_hash;
+CREATE FUNCTION hive_update.calculate_schema_hash()
+    RETURNS SETOF hive_update.verify_table_schema
+    LANGUAGE plpgsql
+    STABLE
+AS
+$BODY$
+BEGIN
+
+    RETURN QUERY SELECT (hive_update.calculate_table_schema_hash( 'hafd', table_name)).*
+    FROM information_schema.tables
+    WHERE table_schema = 'hafd'
+    AND table_type = 'BASE TABLE';
+END;
+$BODY$
+;
+
+DROP FUNCTION IF EXISTS hive_update.create_database_hash;
+CREATE FUNCTION hive_update.create_database_hash()
+    RETURNS SETOF hafd.table_schema
+    LANGUAGE plpgsql
+    VOLATILE
+AS
+$BODY$
+DECLARE
+    ts hafd.table_schema%ROWTYPE;
+    _tmp TEXT;
+    _provider_hashes TEXT;
+BEGIN
+    TRUNCATE hafd.table_schema;
+
+    SELECT string_agg(table_schema, ' | ') FROM hive_update.calculate_schema_hash() INTO _tmp;
+
+    SELECT string_agg(provider || hash, ' | ') FROM hive_update.calculate_state_provider_hashes() INTO _provider_hashes;
+
+    _tmp = _tmp || _provider_hashes;
+    INSERT INTO hafd.table_schema VALUES ('hafd', MD5(_tmp)::uuid);
+
+    ts.schema_name := 'hafd';
+    ts.schema_hash := MD5(_tmp)::uuid;
+RETURN NEXT ts;
+END;
+$BODY$
+;
+
diff --git a/tests/integration/functional/hive_fork_manager/shared_lib/calculate_schema_hash.sql b/tests/integration/functional/hive_fork_manager/shared_lib/calculate_schema_hash.sql
index 5cfb0baf2..d0ec7c073 100644
--- a/tests/integration/functional/hive_fork_manager/shared_lib/calculate_schema_hash.sql
+++ b/tests/integration/functional/hive_fork_manager/shared_lib/calculate_schema_hash.sql
@@ -6,7 +6,7 @@ DECLARE
   _row TEXT;
 BEGIN
 FOR _row in
-  SELECT (ROW(f.table_name, f.table_schema_hash, f.columns_hash, f.constraints_hash, f.indexes_hash)::text) FROM hive.calculate_schema_hash() AS f
+  SELECT (ROW(f.table_name, f.table_schema_hash, f.columns_hash, f.constraints_hash, f.indexes_hash)::text) FROM hive_update.calculate_schema_hash() AS f
   EXCEPT SELECT unnest(ARRAY[
     '(blocks,6943f52d-ec57-ed27-b2e3-d8ba4b3288ca,4397b404-c56c-84e1-952e-a73d29745394,4c7b832d-5d52-83fe-fd2b-7e7a69416fae,2b354f61-618a-da7d-3380-3e12c45a3f30)',
     '(irreversible_data,dd1812c6-cabd-4382-a4bf-c355276b3839,53114e1c-c6e5-867b-6c67-1d55865180fe,77ed7932-7dab-20e3-b506-4a2d3fccfe75,f40cac4c-2fae-a597-11c8-8cc0f329e18f)',
diff --git a/tests/integration/functional/hive_fork_manager/state_providers/hash.sql b/tests/integration/functional/hive_fork_manager/state_providers/hash.sql
index d8b64bd0a..52032d01f 100644
--- a/tests/integration/functional/hive_fork_manager/state_providers/hash.sql
+++ b/tests/integration/functional/hive_fork_manager/state_providers/hash.sql
@@ -12,22 +12,22 @@ DECLARE
     __database_hash_before TEXT;
     __database_hash_after TEXT;
 BEGIN
-    ASSERT ( SELECT 1 FROM hive.calculate_state_provider_hashes() WHERE provider = 'ACCOUNTS' ) IS NOT NULL
+    ASSERT ( SELECT 1 FROM hive_update.calculate_state_provider_hashes() WHERE provider = 'ACCOUNTS' ) IS NOT NULL
         , 'ACCOUNTS not hashed';
 
-    ASSERT ( SELECT 1 FROM hive.calculate_state_provider_hashes() WHERE provider = 'KEYAUTH' ) IS NOT NULL
+    ASSERT ( SELECT 1 FROM hive_update.calculate_state_provider_hashes() WHERE provider = 'KEYAUTH' ) IS NOT NULL
         , 'KEYAUTH not hashed';
 
-    ASSERT ( SELECT 1 FROM hive.calculate_state_provider_hashes() WHERE provider = 'METADATA' ) IS NOT NULL
+    ASSERT ( SELECT 1 FROM hive_update.calculate_state_provider_hashes() WHERE provider = 'METADATA' ) IS NOT NULL
         , 'METADATA not hashed';
 
-    ASSERT ( SELECT COUNT(*) FROM hive.calculate_state_provider_hashes() ) = 3
+    ASSERT ( SELECT COUNT(*) FROM hive_update.calculate_state_provider_hashes() ) = 3
         , 'More   than 3 known providers are hashed';
 
-    SELECT STRING_AGG( hash, '|') FROM hive.calculate_state_provider_hashes() INTO __all_before_hashes;
-    SELECT * FROM hive.calculate_state_provider_hash( 'KEYAUTH'::hafd.state_providers ) INTO __keyauth_before_hash;
+    SELECT STRING_AGG( hash, '|') FROM hive_update.calculate_state_provider_hashes() INTO __all_before_hashes;
+    SELECT * FROM hive_update.calculate_state_provider_hash( 'KEYAUTH'::hafd.state_providers ) INTO __keyauth_before_hash;
 
-    SELECT schema_hash FROM hive.create_database_hash()  INTO __database_hash_before;
+    SELECT schema_hash FROM hive_update.create_database_hash()  INTO __database_hash_before;
 
     EXECUTE format( 'CREATE OR REPLACE FUNCTION hive.start_provider_keyauth( _context hafd.context_name )
     RETURNS TEXT[]
@@ -41,9 +41,9 @@ BEGIN
     $$
     ;');
 
-    SELECT STRING_AGG( hash, '|') FROM hive.calculate_state_provider_hashes() INTO __all_after_hashes;
-    SELECT * FROM hive.calculate_state_provider_hash( 'KEYAUTH'::hafd.state_providers ) INTO __keyauth_after_hash;
-    SELECT schema_hash FROM hive.create_database_hash()  INTO __database_hash_after;
+    SELECT STRING_AGG( hash, '|') FROM hive_update.calculate_state_provider_hashes() INTO __all_after_hashes;
+    SELECT * FROM hive_update.calculate_state_provider_hash( 'KEYAUTH'::hafd.state_providers ) INTO __keyauth_after_hash;
+    SELECT schema_hash FROM hive_update.create_database_hash()  INTO __database_hash_after;
 
     ASSERT __all_after_hashes != __all_before_hashes, 'Hashes not changed after modification';
     ASSERT __keyauth_after_hash != __keyauth_before_hash, 'Hash not changed after modification';
diff --git a/tests/integration/functional/tools/common.sh b/tests/integration/functional/tools/common.sh
index 846db08a2..2388d5e23 100755
--- a/tests/integration/functional/tools/common.sh
+++ b/tests/integration/functional/tools/common.sh
@@ -61,7 +61,7 @@ setup_test_database() {
 
   # ATTENTION: normally the extension does not contain hash functions
   # so the db is little different than production state, but these are functional tests so IMO it is acceptable
-  psql -p "${postgres_port}" -d "${DB_NAME}" -a -v ON_ERROR_STOP=on -f "${extension_path}/table_schema_verification.sql"
+  psql -p "${postgres_port}" -d "${DB_NAME}" -a -v ON_ERROR_STOP=on -f "${extension_path}/update.sql"
 
   # TODO(mickiewicz@syncad.com): remove when releasing on pg16 where 'public' schema is not accessible by default
   if ! psql -p "${postgres_port}" -d "${DB_NAME}" -a -v ON_ERROR_STOP=on -c "REVOKE CREATE ON SCHEMA public FROM PUBLIC;";
diff --git a/tests/integration/functional/tools/test.sh b/tests/integration/functional/tools/test.sh
index eefde30ca..23858e79a 100755
--- a/tests/integration/functional/tools/test.sh
+++ b/tests/integration/functional/tools/test.sh
@@ -70,7 +70,7 @@ for testfun in ${tests}; do
     psql ${pg_call} -c "UPDATE pg_extension SET extversion = '1.0' WHERE extname = 'hive_fork_manager';"
     sudo "${script_to_execute_after_testfun}" --haf-db-name="$DB_NAME";
     # for testing hash functions we ned to add them after update which remove them
-    psql ${pg_call} -f "${extension_path}/table_schema_verification.sql"
+    psql ${pg_call} -f "${extension_path}/update.sql"
   fi
 done
 
-- 
GitLab