diff --git a/hive b/hive index e266447c601c89f16a94cea69b4ca59249811897..9e0f071eae283cdb98fcb3af5277d3889b9c4495 160000 --- a/hive +++ b/hive @@ -1 +1 @@ -Subproject commit e266447c601c89f16a94cea69b4ca59249811897 +Subproject commit 9e0f071eae283cdb98fcb3af5277d3889b9c4495 diff --git a/tests/integration/system/haf/local_tools.py b/tests/integration/system/haf/local_tools.py index 46a84549190588204713a0463bd93f970d9414c1..d553732abe13092d632a0efcfa8673738ee894c6 100644 --- a/tests/integration/system/haf/local_tools.py +++ b/tests/integration/system/haf/local_tools.py @@ -101,36 +101,23 @@ def get_irreversible_block(node): return irreversible_block_num -def get_time_offset_from_file(name): +def get_timestamp_from_file(name): timestamp = '' with open(name, 'r') as f: timestamp = f.read() timestamp = timestamp.strip() - current_time = datetime.now(timezone.utc) - new_time = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - difference = round(new_time.timestamp()-current_time.timestamp()) - 10 # circa 10 seconds is needed for nodes to startup - time_offset = str(difference) + 's' - return time_offset + return timestamp def run_networks(world, blocklog_directory): - time_offset = get_time_offset_from_file(blocklog_directory/'timestamp') + timestamp = '' + with open(blocklog_directory/'timestamp', 'r') as f: + timestamp = f.read() block_log = BlockLog(None, blocklog_directory/'block_log', include_index=False) logger.info('Running nodes...') - - nodes = world.nodes() - nodes[0].run(wait_for_live=False, replay_from=block_log, time_offset=time_offset) - endpoint = nodes[0].get_p2p_endpoint() - for node in nodes[1:]: - node.config.p2p_seed_node.append(endpoint) - node.run(wait_for_live=False, replay_from=block_log, time_offset=time_offset) - - for network in world.networks(): - network.is_running = True - for node in nodes: - node.wait_for_live() + world.run_all_nodes(block_log, timestamp=timestamp, speedup=3, wait_for_live=True) def create_node_with_database(network, url): diff --git a/tests/integration/system/haf/test_event_massive_sync.py b/tests/integration/system/haf/test_event_massive_sync.py index 6941ddc549bdef681f23e02c296a83e5d9389a7b..d9fd0d290231e40d47a198983f32447a45f7d277 100644 --- a/tests/integration/system/haf/test_event_massive_sync.py +++ b/tests/integration/system/haf/test_event_massive_sync.py @@ -3,7 +3,7 @@ from sqlalchemy.orm.exc import NoResultFound from sqlalchemy.orm.exc import MultipleResultsFound from test_tools import logger, BlockLog -from local_tools import get_time_offset_from_file +from local_tools import get_timestamp_from_file MASSIVE_SYNC_BLOCK_NUM = 105 @@ -16,14 +16,14 @@ def test_event_massive_sync(world_with_witnesses_and_database): world, session, Base = world_with_witnesses_and_database node_under_test = world.network('Beta').node('NodeUnderTest') - time_offset = get_time_offset_from_file(Path().resolve()/'timestamp') + timestamp = get_timestamp_from_file(Path().resolve()/'timestamp') block_log = BlockLog(None, Path().resolve()/'block_log', include_index=False) events_queue = Base.classes.events_queue # WHEN logger.info('Running node...') - node_under_test.run(wait_for_live=False, replay_from=block_log, time_offset=time_offset) + world.run_all_nodes(block_log, timestamp=timestamp, speedup=1, wait_for_live=False, nodes=[node_under_test]) # TODO get_p2p_endpoint is workaround to check if replay is finished node_under_test.get_p2p_endpoint()