stages: - build_and_test_phase_1 - build_and_test_phase_2 - docker_build - deploy - cleanup - publish variables: PYTEST_NUMBER_OF_PROCESSES: 8 CTEST_NUMBER_OF_JOBS: 4 GIT_DEPTH: 1 GIT_SUBMODULE_DEPTH: 1 GIT_SUBMODULE_STRATEGY: recursive FF_ENABLE_JOB_CLEANUP: 1 GIT_STRATEGY: clone # uses registry.gitlab.syncad.com/hive/haf/ci-base-image:ubuntu22.04-17 BUILDER_IMAGE_TAG: "@sha256:234d3592e53d4cd7cc6df8e61366e8cbe69ac439355475c34fb2b0daf40e7a26" CI_DEBUG_SERVICES: "true" SETUP_SCRIPTS_PATH: "$CI_PROJECT_DIR/scripts" TEST_TOOLS_NODE_DEFAULT_WAIT_FOR_LIVE_TIMEOUT: 60 DATA_CACHE_HAF_PREFIX: "/cache/replay_data_haf" PIPELINE_DATA_CACHE_HAF_DIRECTORY: "${DATA_CACHE_HAF_PREFIX}_pipeline_${CI_PIPELINE_ID}" BLOCK_LOG_SOURCE_DIR_5M: /blockchain/block_log_5m SNAPSHOTS_PATH: /cache/snapshots_pipeline_${CI_PIPELINE_ID} BLOCK_LOG_SOURCE_DIR_MIRRORNET_5M: /cache/block_log_5m_mirrornet include: - template: Workflows/Branch-Pipelines.gitlab-ci.yml - local: '/scripts/ci-helpers/prepare_data_image_job.yml' - project: 'hive/common-ci-configuration' ref: e74d7109838ff05fdc239bced6a726aa7ad46a9b file: - '/templates/python_projects.gitlab-ci.yml' - '/templates/cache_cleanup.gitlab-ci.yml' - '/templates/docker_image_jobs.gitlab-ci.yml' verify_poetry_lock_sanity: extends: .verify_poetry_lock_sanity_template stage: build_and_test_phase_1 variables: PYPROJECT_DIR: "$CI_PROJECT_DIR/tests/integration/haf-local-tools" tags: - public-runner-docker .haf_image_build: extends: .prepare_haf_image stage: build_and_test_phase_1 tags: - public-runner-docker - hived-for-tests haf_image_build: extends: .haf_image_build variables: BINARY_CACHE_PATH: "$CI_PROJECT_DIR/haf-binaries" HIVE_NETWORK_TYPE: mainnet haf_image_build_testnet: extends: .haf_image_build variables: BINARY_CACHE_PATH: "$CI_PROJECT_DIR/haf-testnet-binaries" HIVE_NETWORK_TYPE: testnet haf_image_build_mirrornet: extends: .haf_image_build variables: BINARY_CACHE_PATH: "$CI_PROJECT_DIR/haf-mirrornet-binaries" HIVE_NETWORK_TYPE: mirrornet .haf-service: &haf-service name: $HAF_IMAGE_NAME alias: haf-instance variables: # Allow access from any network to eliminate CI IP addressing problems when hfm runs as service PG_ACCESS: | "host all haf_admin 0.0.0.0/0 trust" DATA_SOURCE: "${PIPELINE_DATA_CACHE_HAF_DIRECTORY}" LOG_FILE: $CI_JOB_NAME.log command: ["--replay-blockchain", "--stop-at-block=5000000"] .hfm-only-service: &hfm-only-service name: $HAF_IMAGE_NAME alias: hfm-only-instance variables: # Allow access from any network to eliminate CI IP addressing problems when hfm runs as service PG_ACCESS: | "host all haf_admin 0.0.0.0/0 trust" "host all test_app_owner 0.0.0.0/0 trust" "host all hived 0.0.0.0/0 trust" "host all all 0.0.0.0/0 scram-sha-256" command: [ "--execute-maintenance-script=${HAF_SOURCE_DIR}/scripts/maintenance-scripts/sleep_infinity.sh" ] .hfm_functional_tests: extends: .job-defaults image: name: $HAF_IMAGE_NAME entrypoint: [""] stage: build_and_test_phase_1 variables: PG_ACCESS: "host all all 127.0.0.1/32 trust" script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/scripts/maintenance-scripts/run_hfm_functional_tests.sh artifacts: paths: - "**/*.log" interruptible: true tags: - public-runner-docker - hived hfm_functional_tests: extends: .hfm_functional_tests needs: - job: haf_image_build artifacts: true hfm_functional_tests_testnet: extends: .hfm_functional_tests needs: - job: haf_image_build_testnet artifacts: true .pytest_based: extends: .job-defaults before_script: - | echo -e "\e[0Ksection_start:$(date +%s):python_venv[collapsed=true]\r\e[0KCreating Python virtual environment..." python3 -m venv --system-site-packages venv/ . venv/bin/activate (cd $CI_PROJECT_DIR/tests/integration/haf-local-tools && poetry install) echo -e "\e[0Ksection_end:$(date +%s):python_venv\r\e[0K" haf_system_tests: extends: .job-defaults stage: build_and_test_phase_2 needs: - job: haf_image_build_testnet artifacts: true image: name: $HAF_IMAGE_NAME entrypoint: [""] variables: PG_ACCESS: "host all all 127.0.0.1/32 trust" script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/scripts/maintenance-scripts/run_haf_system_tests.sh artifacts: paths: - "haf_system_tests.log" - "**/generated_during_*" - "**/generated_by_package_fixtures" exclude: - "**/generated_during_*/**/block_log" - "**/generated_during_*/**/block_log.artifacts" - "**/generated_during_*/**/shared_memory.bin" - "**/generated_during_*/**/*.sst" reports: junit: tests/integration/system/haf/report.xml when: always expire_in: 1 week interruptible: true tags: - public-runner-docker dump_snapshot_5m_mirrornet: extends: .job-defaults stage: build_and_test_phase_1 needs: - job: haf_image_build_mirrornet artifacts: true image: "$CI_REGISTRY_IMAGE/ci-base-image$BUILDER_IMAGE_TAG" variables: MIRRORNET_WORKING_DIR: "$CI_PROJECT_DIR/mirrornet_witness_node" BINARY_CACHE_PATH: "$CI_PROJECT_DIR/haf-mirrornet-binaries" HIVED_PATH: "$BINARY_CACHE_PATH/hived" script: #Prepare environment for hived run - cd $CI_PROJECT_DIR/docker - mkdir -vp $MIRRORNET_WORKING_DIR - cd $MIRRORNET_WORKING_DIR - mkdir blockchain - cd blockchain - cp $BLOCK_LOG_SOURCE_DIR_MIRRORNET_5M/block_log . #Prepare snapshot storage - mkdir -vp "$SNAPSHOTS_PATH" - cd "$SNAPSHOTS_PATH" - mkdir -vp 5m_mirrornet/snapshot #Prepare snapshot - $HIVED_PATH -d $MIRRORNET_WORKING_DIR --exit-before-sync --replay --block-log-split=-1 - echo "plugin = state_snapshot" >> $MIRRORNET_WORKING_DIR/config.ini - $HIVED_PATH -d $MIRRORNET_WORKING_DIR --dump-snapshot=snapshot --exit-before-sync --block-log-split=-1 #Store snapshot in cache - mv $MIRRORNET_WORKING_DIR/blockchain "$SNAPSHOTS_PATH/5m_mirrornet" - mv $MIRRORNET_WORKING_DIR/snapshot/snapshot "$SNAPSHOTS_PATH/5m_mirrornet/snapshot" tags: - data-cache-storage haf_system_tests_mirrornet: stage: build_and_test_phase_2 extends: .pytest_based timeout: 2h needs: - job: haf_image_build_mirrornet artifacts: true - job: dump_snapshot_5m_mirrornet artifacts: false image: "$CI_REGISTRY_IMAGE/ci-base-image$BUILDER_IMAGE_TAG" services: - *hfm-only-service variables: BINARY_CACHE_PATH: "$CI_PROJECT_DIR/haf-mirrornet-binaries" HIVED_PATH: "$$BINARY_CACHE_PATH/hived" COMPRESS_BLOCK_LOG_PATH: "$BINARY_CACHE_PATH/compress_block_log" BLOCK_LOG_UTIL_PATH: "$BINARY_CACHE_PATH/block_log_util" GET_DEV_KEY_PATH: "$BINARY_CACHE_PATH/get_dev_key" CLI_WALLET_PATH: "$BINARY_CACHE_PATH/cli_wallet" DB_NAME: haf_block_log DB_URL: "postgresql://haf_admin@hfm-only-instance:5432/$DB_NAME" script: # check that postgres service is ready - psql "$DB_URL" -c "SELECT 1" - mkdir $CI_PROJECT_DIR/tests/integration/system/haf/mirrornet_tests/tmp_block_log # This cp and call to compress_block_log is to keep backcompatibility with pipelines on master branch (where still 1.27.X line is present until next HF will apply), # where nodes does not support new format of block_log.artifacts file; # Issue: https://gitlab.syncad.com/hive/haf/-/issues/151 # copy block_log to tmp location - cp $BLOCK_LOG_SOURCE_DIR_MIRRORNET_5M/block_log $CI_PROJECT_DIR/tests/integration/system/haf/mirrornet_tests/tmp_block_log # copy block_log and generate new artifacts with compress_block_log util - time $COMPRESS_BLOCK_LOG_PATH --input-block-log $CI_PROJECT_DIR/tests/integration/system/haf/mirrornet_tests/tmp_block_log/block_log --output-block-log $CI_PROJECT_DIR/tests/integration/system/haf/mirrornet_tests/block_log --decompress # drop tmp location - rm -r $CI_PROJECT_DIR/tests/integration/system/haf/mirrornet_tests/tmp_block_log # prepare environment and run tests - cd $CI_PROJECT_DIR/tests/integration/system/haf/mirrornet_tests && ls -la - pytest -vvv --junitxml report.xml --timeout=3600 --block-log-dir-path=$CI_PROJECT_DIR/tests/integration/system/haf/mirrornet_tests --snapshot-path="$SNAPSHOTS_PATH/5m_mirrornet/snapshot/snapshot" -n ${PYTEST_NUMBER_OF_PROCESSES} -m mirrornet - du -Sah . | sort -rh artifacts: paths: - "**/generated_during_*" - "**/generated_by_package_fixtures" exclude: - "**/generated_during_*/**/block_log" - "**/generated_during_*/**/block_log.artifacts" - "**/generated_during_*/**/shared_memory.bin" - "**/generated_during_*/**/*.sst" reports: junit: tests/integration/system/haf/mirrornet_tests/report.xml when: always expire_in: 1 week interruptible: true tags: - data-cache-storage applications_system_tests: extends: .job-defaults stage: build_and_test_phase_2 needs: - job: haf_image_build_testnet artifacts: true image: name: $HAF_IMAGE_NAME entrypoint: [""] variables: PG_ACCESS: "host all all 127.0.0.1/32 trust" script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/scripts/maintenance-scripts/run_application_system_tests.sh artifacts: paths: - "applications_system_tests.log" - "**/generated_during_*" - "**/generated_by_package_fixtures" exclude: - "**/generated_during_*/**/block_log" - "**/generated_during_*/**/block_log.artifacts" - "**/generated_during_*/**/shared_memory.bin" - "**/generated_during_*/**/*.sst" reports: junit: tests/integration/system/applications/report.xml when: always expire_in: 1 week interruptible: true tags: - public-runner-docker .replay_step: extends: .job-defaults stage: build_and_test_phase_2 needs: - job: haf_image_build artifacts: true image: "$CI_REGISTRY_IMAGE/ci-base-image$BUILDER_IMAGE_TAG" services: - *hfm-only-service variables: PATTERNS_PATH: "" #Should be overriden in derived jobs RO_BLOCK_LOG_DIRECTORY: "/blockchain/block_log_5m/" BINARY_CACHE_PATH: "$CI_PROJECT_DIR/haf-testnet-binaries" HIVED_PATH: "$CI_PROJECT_DIR/haf-binaries/hived" DB_NAME: haf_block_log DB_ADMIN: "haf_admin" POSTGRES_HOST: "hfm-only-instance" POSTGRES_PORT: "5432" DB_URL: "postgresql://haf_admin@hfm-only-instance:5432/haf_block_log" REPLAY: "--force-replay" before_script: - !reference [.pytest_based, before_script] - ls -lath $CI_PROJECT_DIR # replay - | echo -e "\e[0Ksection_start:$(date +%s):replay[collapsed=true]\r\e[0KExecuting replay..." test -n "$PATTERNS_PATH" mkdir -vp "${PATTERNS_PATH}/blockchain" cp "${RO_BLOCK_LOG_DIRECTORY}/block_log" "${PATTERNS_PATH}/blockchain/" cd $CI_PROJECT_DIR/tests/integration/replay $HIVED_PATH --data-dir $PATTERNS_PATH $REPLAY --exit-before-sync --psql-url $DB_URL 2>&1 | tee -i node_logs.log echo -e "\e[0Ksection_end:$(date +%s):replay\r\e[0K" script: - pytest --junitxml report.xml artifacts: paths: - "**/node_logs.log" - "**/node_logs1.log" - "**/generated_during_*" - "**/generated_by_package_fixtures" - "**/*.out.csv" reports: junit: tests/integration/replay/report.xml when: always expire_in: 1 week interruptible: true tags: - public-runner-docker - hived-for-tests replay_with_haf: extends: .replay_step variables: PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns/no_filter" replay_with_haf_from_4_9m: extends: .replay_step variables: PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns_4_9m/no_filter" replay_accounts_filtered_with_haf_from_4_9m: extends: .replay_step variables: PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns_4_9m/accounts_filtered" replay_accounts_filtered_with_haf: extends: .replay_step variables: PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns/accounts_filtered" replay_accounts_operations_filtered_with_haf: extends: .replay_step variables: PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns/accounts_operations_filtered" replay_virtual_operations_filtered_with_haf: extends: .replay_step variables: PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns/virtual_operations_filtered" replay_operations_filtered_with_haf: extends: .replay_step variables: PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns/operations_filtered" replay_body_operations_filtered_with_haf: extends: .replay_step variables: PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns/body_operations_filtered" replay_accounts_body_operations_filtered_with_haf: extends: .replay_step variables: PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns/accounts_body_operations_filtered" replay_with_update: extends: .job-defaults image: name: $HAF_IMAGE_NAME entrypoint: [""] stage: build_and_test_phase_2 needs: - job: haf_image_build artifacts: true variables: PG_ACCESS: "host all all 127.0.0.1/32 trust" script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/scripts/maintenance-scripts/run_replay_with_update.sh artifacts: paths: - "replay_with_update.log" - "node_logs.log" - "node_logs1.log" tags: - public-runner-docker - hived-for-tests replay_with_keyauths: extends: .job-defaults image: name: $HAF_IMAGE_NAME entrypoint: [""] stage: build_and_test_phase_2 needs: - job: haf_image_build artifacts: true variables: PG_ACCESS: "host all all 127.0.0.1/32 trust" script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/tests/integration/state_provider/run_replay_with_keyauth.sh artifacts: paths: - "replay_with_keyauths.log" - "node_logs.log" - "node_logs1.log" tags: - public-runner-docker - hived-for-tests replay_with_json_metadata: extends: .job-defaults image: name: $HAF_IMAGE_NAME entrypoint: [""] stage: build_and_test_phase_2 needs: - job: haf_image_build artifacts: true variables: PG_ACCESS: "host all all 127.0.0.1/32 trust" script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/tests/integration/state_provider/run_replay_with_json_metadata.sh artifacts: paths: - "replay_with_json_metadata.log" - "node_logs.log" - "node_logs1.log" tags: - public-runner-docker - hived-for-tests replay_with_app: extends: .job-defaults image: name: $HAF_IMAGE_NAME entrypoint: [""] stage: build_and_test_phase_2 needs: - job: haf_image_build artifacts: true variables: PG_ACCESS: "host all all 127.0.0.1/32 trust" script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/scripts/maintenance-scripts/run_live_replay_with_app.sh artifacts: paths: - "replay_with_app.log" - "node_logs.log" - "node_logs1.log" tags: - public-runner-docker - hived-for-tests replay_restarts_with_app: extends: .job-defaults image: name: $HAF_IMAGE_NAME entrypoint: [""] stage: build_and_test_phase_2 needs: - job: haf_image_build artifacts: true variables: PG_ACCESS: "host all all 127.0.0.1/32 trust" script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/scripts/maintenance-scripts/run_live_replay_with_restarts_and_app.sh artifacts: paths: - "node_logs.log" tags: - public-runner-docker - hived-for-tests replay_with_restart: extends: .job-defaults image: name: $HAF_IMAGE_NAME entrypoint: [""] stage: build_and_test_phase_2 needs: - job: haf_image_build artifacts: true variables: PG_ACCESS: "host all all 127.0.0.1/32 trust" script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/scripts/maintenance-scripts/run_replay_with_breaks.sh artifacts: paths: - "node_logs.log" tags: - public-runner-docker - hived-for-tests update_with_wrong_table_schema: extends: .job-defaults image: name: $HAF_IMAGE_NAME entrypoint: [""] stage: build_and_test_phase_2 needs: - job: haf_image_build artifacts: true variables: PG_ACCESS: "host all all 127.0.0.1/32 trust" script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/scripts/maintenance-scripts/run_update_with_wrong_table_schema.sh artifacts: paths: - "update_with_wrong_table_schema.log" - "node_logs.log" tags: - public-runner-docker - hived-for-tests block_api_tests: extends: .replay_step image: $CI_REGISTRY_IMAGE/ci-base-image:ubuntu22.04-8-jmeter needs: - job: haf_image_build artifacts: true variables: FF_NETWORK_PER_BUILD: 1 PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns/accounts_body_operations_filtered" BENCHMARK_DIR: "$CI_PROJECT_DIR/hive/tests/python/hive-local-tools/tests_api/benchmarks" script: # setup - | echo -e "\e[0Ksection_start:$(date +%s):blocks_api_test_setup[collapsed=true]\r\e[0KSetting up blocks api tests..." psql $DB_URL -c "CREATE ROLE bench LOGIN PASSWORD 'mark' INHERIT IN ROLE hived_group;" export BENCHMARK_DB_URL="postgresql://bench:mark@hfm-only-instance:5432/$DB_NAME" echo -e "\e[0Ksection_end:$(date +%s):blocks_api_test_setup\r\e[0K" # run pattern tests - | echo -e "\e[0Ksection_start:$(date +%s):blocks_api_test[collapsed=true]\r\e[0KRunning blocks api tests..." cd "$BENCHMARK_DIR" python3 benchmark.py --loops 200 --threads 5 -n blocks_api -p 5432 -c perf_5M_light.csv --skip-version-check -d wdir --postgres $BENCHMARK_DB_URL --call-style postgres 2>&1 | tee -i $CI_PROJECT_DIR/python_benchmark.log echo -e "\e[0Ksection_end:$(date +%s):blocks_api_test\r\e[0K" # generate JUNIT report file - | echo -e "\e[0Ksection_start:$(date +%s):report[collapsed=true]\r\e[0KGenerating Junit report..." m2u --input "$BENCHMARK_DIR/wdir/raw_jmeter_report.xml" --output $CI_PROJECT_DIR/report.junit echo -e "\e[0Ksection_end:$(date +%s):report\r\e[0K" artifacts: name: "$CI_JOB_NAME-$CI_COMMIT_REF_NAME" reports: junit: $CI_PROJECT_DIR/report.junit paths: - $BENCHMARK_DIR/wdir - $CI_PROJECT_DIR/python_benchmark.log when: always expire_in: 1 week tags: - public-runner-docker - hived-for-tests prepare_haf_data: extends: .prepare_haf_data_5m needs: - job: haf_image_build artifacts: true stage: build_and_test_phase_1 variables: HIVE_NETWORK_TYPE: mainnet BLOCK_LOG_SOURCE_DIR: "$BLOCK_LOG_SOURCE_DIR_5M" CONFIG_INI_SOURCE: "$CI_PROJECT_DIR/docker/config_5M.ini" tags: - data-cache-storage # Creates a temporary copy of replay data for the exclusive use of current pipeline replay_data_copy: extends: .job-defaults image: "${CI_REGISTRY_IMAGE}/ci-base-image${BUILDER_IMAGE_TAG}" stage: build_and_test_phase_1 needs: - prepare_haf_data - haf_image_build script: - | set -e echo "Copying replay data to ${PIPELINE_DATA_CACHE_HAF_DIRECTORY:?}" sudo cp -a "${DATA_CACHE_HAF_PREFIX:?}_${HAF_COMMIT:?}" "${PIPELINE_DATA_CACHE_HAF_DIRECTORY:?}" tags: - data-cache-storage dead_app_auto_detach: extends: .job-defaults stage: build_and_test_phase_2 image: "$CI_REGISTRY_IMAGE/ci-base-image$BUILDER_IMAGE_TAG" needs: - job: prepare_haf_data artifacts: true - job: replay_data_copy services: - name: $HAF_IMAGE_NAME alias: haf-instance variables: # Allow access from any network to eliminate CI IP addressing problems when hfm runs as service PG_ACCESS: | "host all haf_admin 0.0.0.0/0 trust" "host all hived 0.0.0.0/0 trust" "host all test_app_owner 0.0.0.0/0 trust" DATA_SOURCE: "${PIPELINE_DATA_CACHE_HAF_DIRECTORY}" LOG_FILE: $CI_JOB_NAME.log command: ["--replay-blockchain", "--stop-at-block=1000000"] variables: HIVED_UID: $HIVED_UID HAF_COMMIT: $HAF_COMMIT script: - $CI_PROJECT_DIR/tests/integration/system/applications/auto_detaching/scenario1.sh "haf-instance" artifacts: paths: - scenario*.log tags: - data-cache-storage start_haf_as_service: extends: .job-defaults stage: build_and_test_phase_2 image: "$CI_REGISTRY_IMAGE/ci-base-image$BUILDER_IMAGE_TAG" needs: - job: prepare_haf_data artifacts: true - job: replay_data_copy services: - *haf-service variables: HAF_POSTGRES_URL: postgresql://haf_admin@haf-instance:5432/haf_block_log HIVED_UID: $HIVED_UID HAF_COMMIT: $HAF_COMMIT script: - curl -I haf-instance:8091 || (echo "error connecting to service hived-instance" && false) - | curl -XPOST -d '{ "jsonrpc": "2.0", "method": "database_api.get_dynamic_global_properties", "params": { }, "id": 2 }' haf-instance:8091 after_script: - rm docker_entrypoint.log -f - cp "${DATA_CACHE_HAF_PREFIX}_${HAF_COMMIT}/datadir/$CI_JOB_NAME.log" "$CI_PROJECT_DIR/docker_entrypoint.log" artifacts: paths: - docker_entrypoint.log tags: - data-cache-storage cleanup_haf_cache_manual: extends: .cleanup_cache_manual_template stage: cleanup variables: CLEANUP_PATH_PATTERN: "/cache/replay_data_haf_*" tags: - data-cache-storage cleanup_haf_snapshot_from_cache: extends: .cleanup_cache_manual_template stage: cleanup variables: CLEANUP_PATH_PATTERN: "/cache/snapshots_pipeline_*" tags: - data-cache-storage cleanup_old_haf_cache: extends: .cleanup_old_cache_template stage: cleanup variables: CLEANUP_PATH_PATTERN: "/cache/replay_data_haf_*" tags: - data-cache-storage # Deletes replay data used by the tests and created by replay_data_copy cleanup_pipeline_cache: needs: - replay_data_copy - start_haf_as_service - dead_app_auto_detach extends: - .cleanup_cache_manual_template stage: cleanup variables: CLEANUP_PATH_PATTERN: "${DATA_CACHE_HAF_PREFIX}_pipeline_*" when: always tags: - data-cache-storage build_and_publish_image: stage: publish extends: .publish_docker_image_template before_script: - !reference [.publish_docker_image_template, before_script] script: - scripts/ci-helpers/build_and_publish_instance.sh tags: - public-runner-docker - hived-for-tests double_haf_replay_tests: extends: .docker_image_builder_job stage: build_and_test_phase_2 variables: CONFIG_INI_SOURCE: "$CI_PROJECT_DIR/docker/config_5M.ini" needs: - job: haf_image_build artifacts: true script: - docker container ps -a - docker images - docker context ls tags: - public-runner-docker - hived-for-tests op_body_filter_tests: extends: .job-defaults stage: build_and_test_phase_2 needs: - job: haf_image_build_testnet artifacts: true image: name: $HAF_IMAGE_NAME entrypoint: [""] script: - /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/scripts/maintenance-scripts/run_op_body_filter_tests.sh artifacts: paths: - "op_body_filter_tests.log" - "**/generated_during_*" - "**/generated_by_package_fixtures" exclude: - "**/generated_during_*/**/block_log" - "**/generated_during_*/**/block_log.artifacts" - "**/generated_during_*/**/*.sst" reports: junit: tests/integration/tools/op_body_filter/report.xml when: always expire_in: 1 week interruptible: true tags: - public-runner-docker