Commit 48b4bb51 authored by Eric Frias's avatar Eric Frias Committed by Dan Notestein
Browse files

Move compression dictionaries to a submodule that shouldn't be needed

for testnet, lots of cleanup of compression code
parent cefbf3ad
file(GLOB HEADERS "include/hive/chain/*.hpp" "include/hive/chain/util/*.hpp" "include/hive/chain/smt_objects/*.hpp" "include/hive/chain/sps_objects/*.hpp")
add_subdirectory(zstd_dictionaries)
if (NOT BUILD_HIVE_TESTNET)
if( IS_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/compression_dictionaries"
AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/compression_dictionaries/CMakeLists.txt" )
add_subdirectory(compression_dictionaries)
set( COMPRESSION_DICTIONARY_LIBRARIES hive_chain_compression_dictionaries )
set( HAS_COMPRESSION_DICTIONARIES TRUE )
else()
message( FATAL_ERROR "The compression_dicitonaries submodule has not been cloned, it is required for mainnet builds")
endif()
else()
set( COMPRESSION_DICTIONARY_LIBRARIES )
set( HAS_COMPRESSION_DICTIONARIES FALSE )
endif()
## SORT .cpp by most likely to change / break compile
add_library( hive_chain
......@@ -42,11 +55,14 @@ add_library( hive_chain
${HEADERS}
)
target_link_libraries( hive_chain hive_jsonball hive_protocol fc chainbase hive_schema appbase libzstd_static hive_chain_zstd_dictionaries
target_link_libraries( hive_chain hive_jsonball hive_protocol fc chainbase hive_schema appbase libzstd_static ${COMPRESSION_DICTIONARY_LIBRARIES}
${PATCH_MERGE_LIB} ${BROTLI_LIBRARIES} ${ZLIB_LIBRARIES})
target_include_directories( hive_chain
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}" "${BROTLI_INCLUDE_DIRS}" "${ZLIB_INCLUDE_DIRS}"
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" )
if ( HAS_COMPRESSION_DICTIONARIES )
target_compile_definitions( hive_chain PUBLIC -DHAS_COMPRESSION_DICTIONARIES )
endif()
if( CLANG_TIDY_EXE )
set_target_properties(
......
#define ZSTD_STATIC_LINKING_ONLY
#include <zstd.h>
#include <thread>
#include <mutex>
#include <fc/log/logger.hpp>
#include <fc/exception/exception.hpp>
#include <hive/chain/block_compression_dictionaries.hpp>
#include <hive/chain/raw_compression_dictionaries.hpp>
namespace hive { namespace chain {
#ifdef HAS_COMPRESSION_DICTIONARIES
// we store our dictionaries in compressed form, this is the maximum size
// one will be when decompressed. At the time of writing, we've decided
......@@ -10,20 +18,13 @@
#define MAX_DICTIONARY_LENGTH (1 << 20)
std::mutex dictionaries_mutex;
struct raw_dictionary_info
{
const void* buffer;
unsigned size;
};
struct decompressed_raw_dictionary_info
{
std::unique_ptr<char[]> buffer;
size_t size;
};
// maps dictionary_number to raw data (zstd compressed zstd dictionaries)
std::map<uint8_t, raw_dictionary_info> raw_dictionaries;
// maps dictionary_number to zstd dictionaries
typedef std::map<uint8_t, decompressed_raw_dictionary_info> decompressed_raw_dictionary_map_t;
decompressed_raw_dictionary_map_t decompressed_raw_dictionaries;
......@@ -69,10 +70,14 @@ const decompressed_raw_dictionary_info& get_decompressed_raw_dictionary(uint8_t
return decompressed_dictionary_iter->second;
}
void init_raw_dictionaries();
fc::optional<uint8_t> get_best_available_zstd_compression_dictionary_number_for_block(uint32_t block_number)
{
uint8_t last_available_dictionary = raw_dictionaries.rbegin()->first;
return std::min<uint8_t>(block_number / 1000000, last_available_dictionary);
}
ZSTD_DDict* get_zstd_decompression_dictionary(uint8_t dictionary_number)
{
init_raw_dictionaries();
std::lock_guard<std::mutex> guard(dictionaries_mutex);
// try to find the dictionary already fully loaded for decompression
......@@ -91,7 +96,6 @@ ZSTD_DDict* get_zstd_decompression_dictionary(uint8_t dictionary_number)
ZSTD_CDict* get_zstd_compression_dictionary(uint8_t dictionary_number, int compression_level)
{
init_raw_dictionaries();
std::lock_guard<std::mutex> guard(dictionaries_mutex);
auto iter = compression_dictionaries.find(std::make_pair(dictionary_number, compression_level));
if (iter != compression_dictionaries.end())
......@@ -104,149 +108,22 @@ ZSTD_CDict* get_zstd_compression_dictionary(uint8_t dictionary_number, int compr
compression_dictionaries[std::make_pair(dictionary_number, compression_level)] = dictionary;
return dictionary;
}
extern "C"
#else // !defined(HAS_COMPRESSION_DICTIONARIES)
fc::optional<uint8_t> get_best_available_zstd_compression_dictionary_number_for_block(uint32_t block_number)
{
#define DICT(prefixed_num) \
extern unsigned char __ ## prefixed_num ## M_dict_zst[]; \
extern unsigned __ ## prefixed_num ## M_dict_zst_len;
DICT(000);
DICT(001);
DICT(002);
DICT(003);
DICT(004);
DICT(005);
DICT(006);
DICT(007);
DICT(008);
DICT(009);
DICT(010);
DICT(011);
DICT(012);
DICT(013);
DICT(014);
DICT(015);
DICT(016);
DICT(017);
DICT(018);
DICT(019);
DICT(020);
DICT(021);
DICT(022);
DICT(023);
DICT(024);
DICT(025);
DICT(026);
DICT(027);
DICT(028);
DICT(029);
DICT(030);
DICT(031);
DICT(032);
DICT(033);
DICT(034);
DICT(035);
DICT(036);
DICT(037);
DICT(038);
DICT(039);
DICT(040);
DICT(041);
DICT(042);
DICT(043);
DICT(044);
DICT(045);
DICT(046);
DICT(047);
DICT(048);
DICT(049);
DICT(050);
DICT(051);
DICT(052);
DICT(053);
DICT(054);
DICT(055);
DICT(056);
DICT(057);
DICT(058);
DICT(059);
DICT(060);
DICT(061);
DICT(062);
#undef DICT
return fc::optional<uint8_t>();
}
void init_raw_dictionaries()
ZSTD_DDict* get_zstd_decompression_dictionary(uint8_t dictionary_number)
{
std::lock_guard<std::mutex> guard(dictionaries_mutex);
bool initialized = false;
if (initialized)
return;
initialized = true;
#define DICT(num, prefixed_num) \
raw_dictionaries[num] = {(const void*)__ ## prefixed_num ## M_dict_zst, (unsigned)__ ## prefixed_num ## M_dict_zst_len};
DICT(0, 000);
DICT(1, 001);
DICT(2, 002);
DICT(3, 003);
DICT(4, 004);
DICT(5, 005);
DICT(6, 006);
DICT(7, 007);
DICT(8, 008);
DICT(9, 009);
DICT(10, 010);
DICT(11, 011);
DICT(12, 012);
DICT(13, 013);
DICT(14, 014);
DICT(15, 015);
DICT(16, 016);
DICT(17, 017);
DICT(18, 018);
DICT(19, 019);
DICT(20, 020);
DICT(21, 021);
DICT(22, 022);
DICT(23, 023);
DICT(24, 024);
DICT(25, 025);
DICT(26, 026);
DICT(27, 027);
DICT(28, 028);
DICT(29, 029);
DICT(30, 030);
DICT(31, 031);
DICT(32, 032);
DICT(33, 033);
DICT(34, 034);
DICT(35, 035);
DICT(36, 036);
DICT(37, 037);
DICT(38, 038);
DICT(39, 039);
DICT(40, 040);
DICT(41, 041);
DICT(42, 042);
DICT(43, 043);
DICT(44, 044);
DICT(45, 045);
DICT(46, 046);
DICT(47, 047);
DICT(48, 048);
DICT(49, 049);
DICT(50, 050);
DICT(51, 051);
DICT(52, 052);
DICT(53, 053);
DICT(54, 054);
DICT(55, 055);
DICT(56, 056);
DICT(57, 057);
DICT(58, 058);
DICT(59, 059);
DICT(60, 060);
DICT(61, 061);
DICT(62, 062);
FC_THROW_EXCEPTION(fc::key_not_found_exception, "No dictionary ${dictionary_number} available -- hived was not built with compression dictionaries", (dictionary_number));
}
ZSTD_CDict* get_zstd_compression_dictionary(uint8_t dictionary_number, int compression_level)
{
FC_THROW_EXCEPTION(fc::key_not_found_exception, "No dictionary ${dictionary_number} available -- hived was not built with compression dictionaries", (dictionary_number));
}
#endif // HAS_COMPRESSION_DICTIONARIES
} } // end namespace hive::chain
......@@ -70,6 +70,11 @@ namespace hive { namespace chain {
bool compression_enabled = true;
// during testing (around block 63M) we found level 15 to be a good balance between ratio
// and compression/decompression times of ~3.5ms & 65μs, so we're making level 15 the default, and the
// dictionaries are optimized for level 15
int zstd_level = 15;
signed_block read_block_from_offset_and_size(uint64_t offset, uint64_t size);
signed_block_header read_block_header_from_offset_and_size(uint64_t offset, uint64_t size);
};
......@@ -402,19 +407,20 @@ namespace hive { namespace chain {
if (my->compression_enabled)
{
// here, we'll just use the first available method, assuming brotli > zstd > zlib.
// here, we'll just use the first available method, assuming zstd > brotli > zlib.
// in the compress_block_log helper app, we try all three and use the best
try
{
std::tuple<std::unique_ptr<char[]>, size_t> brotli_compressed_block = compress_block_brotli(serialized_block.data(), serialized_block.size());
block_start_pos = append_raw(std::get<0>(brotli_compressed_block).get(), std::get<1>(brotli_compressed_block), {block_flags::brotli});
fc::optional<uint8_t> dictionary_number_to_use = get_best_available_zstd_compression_dictionary_number_for_block(b.block_num());
std::tuple<std::unique_ptr<char[]>, size_t> zstd_compressed_block = compress_block_zstd(serialized_block.data(), serialized_block.size(), dictionary_number_to_use, my->zstd_level);
block_start_pos = append_raw(std::get<0>(zstd_compressed_block).get(), std::get<1>(zstd_compressed_block), {block_flags::zstd});
}
catch (const fc::exception&)
{
try
{
std::tuple<std::unique_ptr<char[]>, size_t> zstd_compressed_block = compress_block_zstd(serialized_block.data(), serialized_block.size(), b.block_num());
block_start_pos = append_raw(std::get<0>(zstd_compressed_block).get(), std::get<1>(zstd_compressed_block), {block_flags::zstd});
std::tuple<std::unique_ptr<char[]>, size_t> brotli_compressed_block = compress_block_brotli(serialized_block.data(), serialized_block.size());
block_start_pos = append_raw(std::get<0>(brotli_compressed_block).get(), std::get<1>(brotli_compressed_block), {block_flags::brotli});
}
catch (const fc::exception&)
{
......@@ -800,6 +806,11 @@ namespace hive { namespace chain {
my->compression_enabled = enabled;
}
void block_log::set_compression_level(int level)
{
my->zstd_level = level;
}
std::tuple<std::unique_ptr<char[]>, size_t> compress_block_zstd_helper(const char* uncompressed_block_data,
size_t uncompressed_block_size,
fc::optional<uint8_t> dictionary_number,
......
......@@ -178,6 +178,7 @@ void database::initialize_state_independent_data(const open_args& args)
{
_block_log.open(args.data_dir / "block_log");
_block_log.set_compression(args.enable_block_log_compression);
_block_log.set_compression_level(args.block_log_compression_level);
});
_shared_file_full_threshold = args.shared_file_full_threshold;
......
#define ZSTD_STATIC_LINKING_ONLY
#include <zstd.h>
#include <cstdint>
#define DICTIONARY_NUMBER_FROM_BLOCK_NUMBER(x) (x / 1000000)
extern "C"
{
struct ZSTD_CDict_s;
typedef struct ZSTD_CDict_s ZSTD_CDict;
struct ZSTD_DDict_s;
typedef struct ZSTD_DDict_s ZSTD_DDict;
}
ZSTD_CDict* get_zstd_compression_dictionary(uint8_t dictionary_number, int compression_level);
ZSTD_DDict* get_zstd_decompression_dictionary(uint8_t dictionary_number);
namespace hive { namespace chain {
fc::optional<uint8_t> get_best_available_zstd_compression_dictionary_number_for_block(uint32_t block_number);
ZSTD_CDict* get_zstd_compression_dictionary(uint8_t dictionary_number, int compression_level);
ZSTD_DDict* get_zstd_decompression_dictionary(uint8_t dictionary_number);
} }
......@@ -2,10 +2,13 @@
#include <fc/filesystem.hpp>
#include <hive/protocol/block.hpp>
struct ZSTD_CCtx_s;
typedef struct ZSTD_CCtx_s ZSTD_CCtx;
struct ZSTD_DCtx_s;
typedef struct ZSTD_DCtx_s ZSTD_DCtx;
extern "C"
{
struct ZSTD_CCtx_s;
typedef struct ZSTD_CCtx_s ZSTD_CCtx;
struct ZSTD_DCtx_s;
typedef struct ZSTD_DCtx_s ZSTD_DCtx;
}
namespace hive { namespace chain {
......@@ -83,6 +86,7 @@ namespace hive { namespace chain {
signed_block read_head()const;
const boost::shared_ptr<signed_block> head() const;
void set_compression(bool enabled);
void set_compression_level(int level);
static std::tuple<std::unique_ptr<char[]>, size_t> compress_block_zstd(const char* uncompressed_block_data, size_t uncompressed_block_size, fc::optional<uint8_t> dictionary_number,
fc::optional<int> compression_level = fc::optional<int>(),
......
......@@ -90,6 +90,7 @@ namespace chain {
bool replay_in_memory = false;
std::vector< std::string > replay_memory_indices{};
bool enable_block_log_compression = false;
int block_log_compression_level = 15;
// The following fields are only used on reindexing
uint32_t stop_replay_at = 0;
......
#include <map>
#include <cstdint>
namespace hive { namespace chain {
struct raw_dictionary_info
{
const void* buffer;
unsigned size;
};
extern const std::map<uint8_t, raw_dictionary_info> raw_dictionaries;
} }
......@@ -127,6 +127,7 @@ class chain_plugin_impl
bool replay_in_memory = false;
std::vector< std::string > replay_memory_indices{};
bool enable_block_log_compression = false;
int block_log_compression_level = 15;
flat_map<uint32_t,block_id_type> loaded_checkpoints;
uint32_t allow_future_time = 5;
......@@ -584,6 +585,7 @@ void chain_plugin_impl::initial_settings()
db_open_args.replay_in_memory = replay_in_memory;
db_open_args.replay_memory_indices = replay_memory_indices;
db_open_args.enable_block_log_compression = enable_block_log_compression;
db_open_args.block_log_compression_level = block_log_compression_level;
}
bool chain_plugin_impl::check_data_consistency()
......@@ -769,6 +771,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip
("check-locks", bpo::bool_switch()->default_value(false), "Check correctness of chainbase locking" )
("validate-database-invariants", bpo::bool_switch()->default_value(false), "Validate all supply invariants check out" )
("enable-block-log-compression", bpo::bool_switch()->default_value(false), "Compress blocks as they're added to the block log" )
("block-log-compression-level", bpo::value<int>()->default_value(15), "Block log zstd compression level 0 (fast, low compression) - 22 (slow, high compression)" )
#ifdef USE_ALTERNATE_CHAIN_ID
("chain-id", bpo::value< std::string >()->default_value( HIVE_CHAIN_ID ), "chain ID to connect to")
("skeleton-key", bpo::value< std::string >()->default_value(default_skeleton_privkey), "WIF PRIVATE key to be used as skeleton key for all accounts")
......@@ -812,6 +815,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) {
my->validate_invariants = options.at( "validate-database-invariants" ).as<bool>();
my->dump_memory_details = options.at( "dump-memory-details" ).as<bool>();
my->enable_block_log_compression = options.at( "enable-block-log-compression" ).as<bool>();
my->block_log_compression_level = options.at( "block-log-compression-level" ).as<int>();
if( options.count( "flush-state-interval" ) )
my->flush_interval = options.at( "flush-state-interval" ).as<uint32_t>();
else
......
......@@ -2,6 +2,7 @@
#include <fc/crypto/hex.hpp>
#include <fc/filesystem.hpp>
#include <hive/chain/block_log.hpp>
#include <hive/chain/block_compression_dictionaries.hpp>
#include <boost/thread/future.hpp>
#include <boost/program_options.hpp>
......@@ -10,6 +11,7 @@
#include <chrono>
#include <memory>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <mutex>
#include <condition_variable>
......@@ -49,6 +51,7 @@ fc::optional<int> deflate_level;
uint32_t starting_block_number = 1;
fc::optional<uint32_t> blocks_to_compress;
fc::optional<fc::path> raw_block_output_path;
bool benchmark_decompression = false;
std::mutex queue_mutex;
std::condition_variable queue_condition_variable;
......@@ -90,7 +93,7 @@ void compress_blocks()
queue_condition_variable.wait(lock);
if (pending_queue.empty() && all_blocks_enqueued)
{
ilog("No more blocks to compress, exiting worker thread");
dlog("No more blocks to compress, exiting worker thread");
return;
}
......@@ -116,7 +119,7 @@ void compress_blocks()
};
std::vector<compressed_data> compressed_versions;
fc::optional<uint8_t> dictionary_number_to_use = std::min<uint8_t>(uncompressed->block_number / 1000000, 62);
fc::optional<uint8_t> dictionary_number_to_use = hive::chain::get_best_available_zstd_compression_dictionary_number_for_block(uncompressed->block_number);
// zstd
if (enable_zstd)
......@@ -130,7 +133,8 @@ void compress_blocks()
//idump((zstd_compressed_data.size));
fc::time_point after_compress = fc::time_point::now();
hive::chain::block_log::decompress_block_zstd(zstd_compressed_data.data.get(), zstd_compressed_data.size, dictionary_number_to_use, zstd_decompression_context);
if (benchmark_decompression)
hive::chain::block_log::decompress_block_zstd(zstd_compressed_data.data.get(), zstd_compressed_data.size, dictionary_number_to_use, zstd_decompression_context);
fc::time_point after_decompress = fc::time_point::now();
zstd_compressed_data.method = hive::chain::block_log::block_flags::zstd;
zstd_compressed_data.dictionary_number = dictionary_number_to_use;
......@@ -156,7 +160,8 @@ void compress_blocks()
fc::time_point before = fc::time_point::now();
std::tie(brotli_compressed_data.data, brotli_compressed_data.size) = hive::chain::block_log::compress_block_brotli(uncompressed->uncompressed_block_data.get(), uncompressed->uncompressed_block_size, brotli_quality);
fc::time_point after_compress = fc::time_point::now();
hive::chain::block_log::decompress_block_brotli(brotli_compressed_data.data.get(), brotli_compressed_data.size);
if (benchmark_decompression)
hive::chain::block_log::decompress_block_brotli(brotli_compressed_data.data.get(), brotli_compressed_data.size);
fc::time_point after_decompress = fc::time_point::now();
brotli_compressed_data.method = hive::chain::block_log::block_flags::brotli;
......@@ -181,7 +186,8 @@ void compress_blocks()
fc::time_point before = fc::time_point::now();
std::tie(deflate_compressed_data.data, deflate_compressed_data.size) = hive::chain::block_log::compress_block_deflate(uncompressed->uncompressed_block_data.get(), uncompressed->uncompressed_block_size, deflate_level);
fc::time_point after_compress = fc::time_point::now();
hive::chain::block_log::decompress_block_deflate(deflate_compressed_data.data.get(), deflate_compressed_data.size);
if (benchmark_decompression)
hive::chain::block_log::decompress_block_deflate(deflate_compressed_data.data.get(), deflate_compressed_data.size);
fc::time_point after_decompress = fc::time_point::now();
deflate_compressed_data.method = hive::chain::block_log::block_flags::deflate;
......@@ -295,9 +301,14 @@ void drain_completed_queue(const fc::path& block_log)
if (compressed->block_number % 100000 == 0)
{
ilog("at block ${block_number}: total uncompressed ${input_size} compressed to ${output_size}",
float total_compression_ratio = 100.f * (1.f - (float)(total_compressed_size + size_of_start_positions) / (float)(total_uncompressed_size + size_of_start_positions));
std::ostringstream total_compression_ratio_string;
total_compression_ratio_string << std::fixed << std::setprecision(2) << total_compression_ratio;
ilog("at block ${block_number}: total uncompressed ${input_size} compressed to ${output_size} (${total_compression_ratio}%)",
("block_number", compressed->block_number)("input_size", total_uncompressed_size + size_of_start_positions)
("output_size", total_compressed_size + size_of_start_positions));
("output_size", total_compressed_size + size_of_start_positions)
("total_compression_ratio", total_compression_ratio_string.str()));
}
......@@ -375,7 +386,7 @@ void fill_pending_queue(const fc::path& block_log)
++current_block_number;
}
ilog("All uncompressed blocks enqueued, exiting the fill_ending_queue() thread");
dlog("All uncompressed blocks enqueued, exiting the fill_ending_queue() thread");
{
std::unique_lock<std::mutex> lock(queue_mutex);
......@@ -401,11 +412,12 @@ int main(int argc, char** argv)
boost::program_options::options_description options("Allowed options");
options.add_options()("enable-zstd", boost::program_options::value<std::string>()->default_value("yes"), "Whether to use zstd compression");
options.add_options()("enable-brotli", boost::program_options::value<std::string>()->default_value("yes"), "Whether to use brotli compression");
options.add_options()("enable-deflate", boost::program_options::value<std::string>()->default_value("yes"), "Whether to use deflate compression");
options.add_options()("zstd-level", boost::program_options::value<int>(), zstd_levels_description.c_str());
options.add_options()("enable-brotli", boost::program_options::value<std::string>()->default_value("no"), "Whether to use brotli compression");
options.add_options()("enable-deflate", boost::program_options::value<std::string>()->default_value("no"), "Whether to use deflate compression");
options.add_options()("zstd-level", boost::program_options::value<int>()->default_value(15), zstd_levels_description.c_str());
options.add_options()("brotli-quality", boost::program_options::value<int>(), "The brotli compression quality to use (0 - 11)");
options.add_options()("deflate-level", boost::program_options::value<int>(), "The zlib deflate compression level to use (0 - 9)");
options.add_options()("benchmark-decompression", "decompress each block and report the decompression times at the end");
options.add_options()("jobs,j", boost::program_options::value<int>()->default_value(1), "The number of threads to use for compression");
options.add_options()("input-block-log,i", boost::program_options::value<std::string>()->required(), "The directory containing the input block log");
options.add_options()("output-block-log,o", boost::program_options::value<std::string>()->required(), "The directory to contain the compressed block log");
......@@ -425,13 +437,16 @@ int main(int argc, char** argv)
enable_brotli = options_map["enable-brotli"].as<std::string>() == "yes";
enable_deflate = options_map["enable-deflate"].as<std::string>() == "yes";
if (options_map.count("zstd-level"))
zstd_level = options_map["zstd-level"].as<int>();
zstd_level = options_map["zstd-level"].as<int>();
ilog("Compressing using zstd level ${zstd_level}", (zstd_level));
if (options_map.count("brotli-quality"))
brotli_quality = options_map["brotli-quality"].as<int>();
if (options_map.count("deflate-level"))
deflate_level = options_map["deflate-level"].as<int>();
benchmark_decompression = options_map.count("benchmark-decompression") > 0;
unsigned jobs = options_map["jobs"].as<int>();
starting_block_number = options_map["starting-block-number"].as<uint32_t>();
......@@ -481,10 +496,11 @@ int main(int argc, char** argv)
("total_zstd_size", total_zstd_size + size_of_start_positions)
(total_zstd_compression_time)
("average_zstd_time", total_zstd_compression_time.count() / total_blocks_processed));
ilog(" decompression total time: ${total_zstd_decompression_time}μs, average time per block: ${average_zstd_decompression_time}μs",
("total_zstd_size", total_zstd_size + size_of_start_positions)
(total_zstd_decompression_time)
("average_zstd_decompression_time", total_zstd_decompression_time.count() / total_blocks_processed));
if (benchmark_decompression)
ilog(" decompression total time: ${total_zstd_decompression_time}μs, average time per block: ${average_zstd_decompression_time}μs",
("total_zstd_size", total_zstd_size + size_of_start_positions)
(total_zstd_decompression_time)
("average_zstd_decompression_time", total_zstd_decompression_time.count() / total_blocks_processed));
}
if (enable_brotli)
{
......@@ -492,10 +508,11 @@ int main(int argc, char** argv)
("total_brotli_size", total_brotli_size + size_of_start_positions)
(total_brotli_compression_time)
("average_brotli_time", total_brotli_compression_time.count() / total_blocks_processed));
ilog(" decompression total time: ${total_brotli_decompression_time}μs, average time per block: ${average_brotli_decompression_time}μs",
("total_brotli_size", total_brotli_size + size_of_start_positions)
(total_brotli_decompression_time)
("average_brotli_decompression_time", total_brotli_decompression_time.count() / total_blocks_processed));
if (benchmark_decompression)
ilog(" decompression total time: ${total_brotli_decompression_time}μs, average time per block: ${average_brotli_decompression_time}μs",
("total_brotli_size", total_brotli_size + size_of_start_positions)
(total_brotli_decompression_time)
("average_brotli_decompression_time", total_brotli_decompression_time.count() / total_blocks_processed));
}
if (enable_deflate)
{
......@@ -503,10 +520,11 @@ int main(int argc, char** argv)
("total_deflate_size", total_deflate_size + size_of_start_positions)
(total_deflate_compression_time)
("average_deflate_time", total_deflate_compression_time.count() / total_blocks_processed));
ilog(" decompression total time: ${total_deflate_decompression_time}μs, average time per block: ${average_deflate_decompression_time}μs",
("total_deflate_size", total_deflate_size + size_of_start_positions)
(total_deflate_decompression_time)
("average_deflate_decompression_time", total_deflate_decompression_time.count() / total_blocks_processed));
if (benchmark_decompression)
ilog(" decompression total time: ${total_deflate_decompression_time}μs, average time per block: ${average_deflate_decompression_time}μs",
("total_deflate_size", total_deflate_size + size_of_start_positions)
(total_deflate_decompression_time)
("average_deflate_decompression_time", total_deflate_decompression_time.count() / total_blocks_processed));