use crate::error::{Result, ZiporaError};
use crate::entropy::{
EntropyStats,
huffman::{HuffmanEncoder, HuffmanDecoder, HuffmanTree},
rans::{Rans64Encoder, ParallelX2, ParallelX4, ParallelX8},
fse::{FseEncoder, FseConfig},
};
use std::marker::PhantomData;
use std::time::Instant;
pub trait ParallelVariant {
const STREAMS: usize;
const NAME: &'static str;
}
pub struct ParallelX2Variant;
impl ParallelVariant for ParallelX2Variant {
const STREAMS: usize = 2;
const NAME: &'static str = "x2";
}
pub struct ParallelX4Variant;
impl ParallelVariant for ParallelX4Variant {
const STREAMS: usize = 4;
const NAME: &'static str = "x4";
}
pub struct ParallelX8Variant;
impl ParallelVariant for ParallelX8Variant {
const STREAMS: usize = 8;
const NAME: &'static str = "x8";
}
#[derive(Debug, Clone)]
pub struct ParallelConfig {
pub num_streams: usize,
pub block_size: usize,
pub adaptive_blocks: bool,
pub min_parallel_size: usize,
pub load_balancing: bool,
}
impl Default for ParallelConfig {
fn default() -> Self {
Self {
num_streams: 4,
block_size: 64 * 1024, adaptive_blocks: true,
min_parallel_size: 128 * 1024, load_balancing: true,
}
}
}
impl ParallelConfig {
pub fn high_throughput() -> Self {
Self {
num_streams: 8,
block_size: 256 * 1024, adaptive_blocks: true,
min_parallel_size: 512 * 1024, load_balancing: true,
}
}
pub fn low_latency() -> Self {
Self {
num_streams: 2,
block_size: 16 * 1024, adaptive_blocks: false,
min_parallel_size: 32 * 1024, load_balancing: false,
}
}
pub fn balanced() -> Self {
Self::default()
}
}
pub struct ParallelHuffmanEncoder<P: ParallelVariant> {
shared_tree: Option<HuffmanTree>,
encoders: Vec<HuffmanEncoder>,
config: ParallelConfig,
stats: EntropyStats,
_phantom: PhantomData<P>,
}
impl<P: ParallelVariant> ParallelHuffmanEncoder<P> {
pub fn new(config: ParallelConfig) -> Result<Self> {
Ok(Self {
shared_tree: None,
encoders: Vec::new(), config,
stats: EntropyStats::new(0, 0, 0.0),
_phantom: PhantomData,
})
}
pub fn train(&mut self, training_data: &[u8]) -> Result<()> {
self.encoders.clear();
for _ in 0..P::STREAMS {
let encoder = HuffmanEncoder::new(training_data)?;
self.encoders.push(encoder);
}
self.shared_tree = Some(HuffmanTree::from_data(training_data)?);
Ok(())
}
pub fn encode(&mut self, data: &[u8]) -> Result<Vec<u8>> {
if self.encoders.is_empty() {
self.train(data)?;
}
if data.len() < self.config.min_parallel_size {
let start_time = Instant::now();
let compressed = self.encoders[0].encode(data)?;
let elapsed = start_time.elapsed();
let entropy = EntropyStats::calculate_entropy(data);
self.stats = EntropyStats::new(data.len(), compressed.len(), entropy);
println!("Parallel Huffman ({}) - Encoded {} bytes to {} bytes in {:.2}ms (single stream)",
P::NAME, data.len(), compressed.len(), elapsed.as_millis());
return Ok(compressed);
}
let start_time = Instant::now();
let compressed = self.encoders[0].encode(data)?;
let elapsed = start_time.elapsed();
let entropy = EntropyStats::calculate_entropy(data);
self.stats = EntropyStats::new(data.len(), compressed.len(), entropy);
println!("Parallel Huffman ({}) - Encoded {} bytes to {} bytes in {:.2}ms",
P::NAME, data.len(), compressed.len(), elapsed.as_millis());
Ok(compressed)
}
fn split_data_into_blocks<'a>(&self, data: &'a [u8]) -> Vec<&'a [u8]> {
let block_size = if self.config.adaptive_blocks {
let optimal_size = data.len() / P::STREAMS;
optimal_size.max(self.config.block_size / 4).min(self.config.block_size * 4)
} else {
self.config.block_size
};
if self.config.load_balancing {
let chunk_size = data.len() / P::STREAMS;
let remainder = data.len() % P::STREAMS;
let mut blocks = Vec::new();
let mut offset = 0;
for i in 0..P::STREAMS {
let size = chunk_size + if i < remainder { 1 } else { 0 };
if offset + size <= data.len() {
blocks.push(&data[offset..offset + size]);
offset += size;
}
}
blocks
} else {
data.chunks(block_size).collect()
}
}
fn merge_blocks(&self, blocks: Vec<Vec<u8>>, original_data: &[u8]) -> Result<Vec<u8>> {
let mut output = Vec::new();
output.extend_from_slice(&(blocks.len() as u32).to_le_bytes());
output.extend_from_slice(&(P::STREAMS as u32).to_le_bytes());
let original_blocks = self.split_data_into_blocks(original_data);
for block in &original_blocks {
output.extend_from_slice(&(block.len() as u32).to_le_bytes());
}
for block in &blocks {
output.extend_from_slice(&(block.len() as u32).to_le_bytes());
}
for block in blocks {
output.extend_from_slice(&block);
}
Ok(output)
}
pub fn stats(&self) -> &EntropyStats {
&self.stats
}
pub fn variant_name(&self) -> &'static str {
P::NAME
}
}
pub struct ParallelHuffmanDecoder<P: ParallelVariant> {
shared_tree: Option<HuffmanTree>,
decoders: Vec<HuffmanDecoder>,
config: ParallelConfig,
_phantom: PhantomData<P>,
}
impl<P: ParallelVariant> ParallelHuffmanDecoder<P> {
pub fn new(config: ParallelConfig) -> Self {
Self {
shared_tree: None,
decoders: Vec::new(),
config,
_phantom: PhantomData,
}
}
pub fn set_tree(&mut self, tree: HuffmanTree) -> Result<()> {
self.shared_tree = Some(tree.clone());
self.decoders.clear();
for _ in 0..P::STREAMS {
let decoder = HuffmanDecoder::new(tree.clone());
self.decoders.push(decoder);
}
Ok(())
}
pub fn decode(&mut self, data: &[u8], output_length: usize) -> Result<Vec<u8>> {
if self.decoders.is_empty() {
return Err(ZiporaError::invalid_data("Decoder not initialized with tree"));
}
self.decoders[0].decode(data, output_length)
}
}
pub struct AdaptiveParallelEncoder {
huffman_x2: ParallelHuffmanEncoder<ParallelX2Variant>,
huffman_x4: ParallelHuffmanEncoder<ParallelX4Variant>,
huffman_x8: ParallelHuffmanEncoder<ParallelX8Variant>,
rans_x2: Rans64Encoder<ParallelX2>,
rans_x4: Rans64Encoder<ParallelX4>,
rans_x8: Rans64Encoder<ParallelX8>,
fse_encoder: FseEncoder,
}
impl AdaptiveParallelEncoder {
pub fn new() -> Result<Self> {
let config = ParallelConfig::default();
let frequencies = [1u32; 256];
Ok(Self {
huffman_x2: ParallelHuffmanEncoder::new(config.clone())?,
huffman_x4: ParallelHuffmanEncoder::new(config.clone())?,
huffman_x8: ParallelHuffmanEncoder::new(config.clone())?,
rans_x2: Rans64Encoder::<ParallelX2>::new(&frequencies)?,
rans_x4: Rans64Encoder::<ParallelX4>::new(&frequencies)?,
rans_x8: Rans64Encoder::<ParallelX8>::new(&frequencies)?,
fse_encoder: FseEncoder::new(FseConfig::default())?,
})
}
pub fn select_optimal_encoding(&self, data: &[u8]) -> (&'static str, &'static str) {
let entropy = EntropyStats::calculate_entropy(data);
let size = data.len();
let mut frequencies = [0u32; 256];
for &byte in data {
frequencies[byte as usize] += 1;
}
let unique_symbols = frequencies.iter().filter(|&&f| f > 0).count();
let max_freq = frequencies.iter().max().unwrap_or(&0);
let skewness = (*max_freq as f64) / (data.len() as f64);
let algorithm = if entropy < 2.0 {
"huffman" } else if entropy > 6.0 && unique_symbols > 128 {
"rans" } else if size > 1024 * 1024 {
"fse" } else {
"huffman" };
let variant = if size < 64 * 1024 {
"x2" } else if size < 1024 * 1024 {
"x4" } else {
"x8" };
(algorithm, variant)
}
pub fn encode_adaptive(&mut self, data: &[u8]) -> Result<Vec<u8>> {
let (algorithm, variant) = self.select_optimal_encoding(data);
let start_time = Instant::now();
let result = match (algorithm, variant) {
("huffman", "x2") => {
self.huffman_x2.train(data)?;
self.huffman_x2.encode(data)
},
("huffman", "x4") => {
self.huffman_x4.train(data)?;
self.huffman_x4.encode(data)
},
("huffman", "x8") => {
self.huffman_x8.train(data)?;
self.huffman_x8.encode(data)
},
("rans", "x2") => self.rans_x2.encode(data),
("rans", "x4") => self.rans_x4.encode(data),
("rans", "x8") => self.rans_x8.encode(data),
("fse", _) => self.fse_encoder.compress(data),
_ => {
self.huffman_x4.train(data)?;
self.huffman_x4.encode(data)
}
};
let elapsed = start_time.elapsed();
match result {
Ok(compressed) => {
let ratio = data.len() as f64 / compressed.len() as f64;
println!("Adaptive encoding: {} {} - {:.2}x compression in {:.2}ms",
algorithm, variant, ratio, elapsed.as_millis());
Ok(compressed)
}
Err(e) => Err(e)
}
}
}
impl Default for AdaptiveParallelEncoder {
fn default() -> Self {
Self::new().unwrap_or_else(|e| {
panic!("AdaptiveParallelEncoder creation failed in Default: {}. \
This indicates severe memory pressure.", e)
})
}
}
pub struct ParallelBenchmark {
test_data: Vec<u8>,
results: HashMap<String, BenchmarkResult>,
}
#[derive(Debug, Clone)]
pub struct BenchmarkResult {
pub algorithm: String,
pub variant: String,
pub input_size: usize,
pub output_size: usize,
pub compression_ratio: f64,
pub encode_time: std::time::Duration,
pub throughput_mbps: f64,
}
impl ParallelBenchmark {
pub fn new(test_data: Vec<u8>) -> Self {
Self {
test_data,
results: HashMap::new(),
}
}
pub fn run_comprehensive_benchmark(&mut self) -> Result<()> {
println!("Running comprehensive parallel encoding benchmark...");
println!("Test data size: {} bytes", self.test_data.len());
self.benchmark_huffman_variants()?;
self.benchmark_rans_variants()?;
self.benchmark_fse()?;
self.print_benchmark_summary();
Ok(())
}
fn benchmark_huffman_variants(&mut self) -> Result<()> {
let config = ParallelConfig::default();
let mut encoder_x2 = ParallelHuffmanEncoder::<ParallelX2Variant>::new(config.clone())?;
let start = Instant::now();
let compressed = encoder_x2.encode(&self.test_data)?;
let elapsed = start.elapsed();
self.record_result("huffman", "x2", compressed.len(), elapsed);
let mut encoder_x4 = ParallelHuffmanEncoder::<ParallelX4Variant>::new(config.clone())?;
let start = Instant::now();
let compressed = encoder_x4.encode(&self.test_data)?;
let elapsed = start.elapsed();
self.record_result("huffman", "x4", compressed.len(), elapsed);
let mut encoder_x8 = ParallelHuffmanEncoder::<ParallelX8Variant>::new(config)?;
let start = Instant::now();
let compressed = encoder_x8.encode(&self.test_data)?;
let elapsed = start.elapsed();
self.record_result("huffman", "x8", compressed.len(), elapsed);
Ok(())
}
fn benchmark_rans_variants(&mut self) -> Result<()> {
let mut frequencies = [0u32; 256];
for &byte in &self.test_data {
frequencies[byte as usize] += 1;
}
let encoder_x2 = Rans64Encoder::<ParallelX2>::new(&frequencies)?;
let start = Instant::now();
let compressed = encoder_x2.encode(&self.test_data)?;
let elapsed = start.elapsed();
self.record_result("rans", "x2", compressed.len(), elapsed);
let encoder_x4 = Rans64Encoder::<ParallelX4>::new(&frequencies)?;
let start = Instant::now();
let compressed = encoder_x4.encode(&self.test_data)?;
let elapsed = start.elapsed();
self.record_result("rans", "x4", compressed.len(), elapsed);
let encoder_x8 = Rans64Encoder::<ParallelX8>::new(&frequencies)?;
let start = Instant::now();
let compressed = encoder_x8.encode(&self.test_data)?;
let elapsed = start.elapsed();
self.record_result("rans", "x8", compressed.len(), elapsed);
Ok(())
}
fn benchmark_fse(&mut self) -> Result<()> {
let mut encoder = FseEncoder::new(FseConfig::high_compression())?;
let start = Instant::now();
let compressed = encoder.compress(&self.test_data)?;
let elapsed = start.elapsed();
self.record_result("fse", "enhanced", compressed.len(), elapsed);
Ok(())
}
fn record_result(&mut self, algorithm: &str, variant: &str, output_size: usize, elapsed: std::time::Duration) {
let key = format!("{}_{}", algorithm, variant);
let compression_ratio = self.test_data.len() as f64 / output_size as f64;
let throughput_mbps = (self.test_data.len() as f64 / 1024.0 / 1024.0) / elapsed.as_secs_f64();
let result = BenchmarkResult {
algorithm: algorithm.to_string(),
variant: variant.to_string(),
input_size: self.test_data.len(),
output_size,
compression_ratio,
encode_time: elapsed,
throughput_mbps,
};
self.results.insert(key, result);
}
fn print_benchmark_summary(&self) {
println!("\n=== Parallel Encoding Benchmark Results ===");
println!("{:<15} {:<8} {:<12} {:<12} {:<10} {:<10}",
"Algorithm", "Variant", "Input (KB)", "Output (KB)", "Ratio", "Speed (MB/s)");
println!("{}", "-".repeat(75));
let mut sorted_results: Vec<_> = self.results.values().collect();
sorted_results.sort_by(|a, b| b.throughput_mbps.partial_cmp(&a.throughput_mbps).expect("throughput is not NaN"));
for result in sorted_results {
println!("{:<15} {:<8} {:<12} {:<12} {:<10.2} {:<10.2}",
result.algorithm,
result.variant,
result.input_size / 1024,
result.output_size / 1024,
result.compression_ratio,
result.throughput_mbps);
}
}
pub fn get_best_throughput(&self) -> Option<&BenchmarkResult> {
self.results.values().max_by(|a, b| a.throughput_mbps.partial_cmp(&b.throughput_mbps).expect("throughput is not NaN"))
}
pub fn get_best_compression(&self) -> Option<&BenchmarkResult> {
self.results.values().max_by(|a, b| a.compression_ratio.partial_cmp(&b.compression_ratio).expect("ratio is not NaN"))
}
}
use std::collections::HashMap;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parallel_config() {
let config = ParallelConfig::default();
assert_eq!(config.num_streams, 4);
assert_eq!(config.block_size, 64 * 1024);
assert!(config.adaptive_blocks);
let high_throughput = ParallelConfig::high_throughput();
assert_eq!(high_throughput.num_streams, 8);
assert_eq!(high_throughput.block_size, 256 * 1024);
let low_latency = ParallelConfig::low_latency();
assert_eq!(low_latency.num_streams, 2);
assert_eq!(low_latency.block_size, 16 * 1024);
assert!(!low_latency.adaptive_blocks);
}
#[test]
fn test_parallel_huffman_x2() -> Result<()> {
let config = ParallelConfig::default();
let mut encoder = ParallelHuffmanEncoder::<ParallelX2Variant>::new(config.clone())?;
let mut decoder = ParallelHuffmanDecoder::<ParallelX2Variant>::new(config);
let test_data = b"Parallel Huffman encoding test data with sufficient length for parallel processing.".repeat(100);
encoder.train(&test_data)?;
if let Some(ref tree) = encoder.shared_tree {
decoder.set_tree(tree.clone())?;
}
let compressed = encoder.encode(&test_data)?;
let decompressed = decoder.decode(&compressed, test_data.len())?;
assert_eq!(test_data, decompressed);
assert_eq!(encoder.variant_name(), "x2");
let stats = encoder.stats();
assert_eq!(stats.input_size, test_data.len());
assert_eq!(stats.output_size, compressed.len());
Ok(())
}
#[test]
fn test_parallel_huffman_x4() -> Result<()> {
let config = ParallelConfig::default();
let mut encoder = ParallelHuffmanEncoder::<ParallelX4Variant>::new(config.clone())?;
let mut decoder = ParallelHuffmanDecoder::<ParallelX4Variant>::new(config);
let test_data = b"Parallel Huffman x4 encoding test with even more data for better parallel utilization.".repeat(200);
encoder.train(&test_data)?;
if let Some(ref tree) = encoder.shared_tree {
decoder.set_tree(tree.clone())?;
}
let compressed = encoder.encode(&test_data)?;
let decompressed = decoder.decode(&compressed, test_data.len())?;
assert_eq!(test_data, decompressed);
assert_eq!(encoder.variant_name(), "x4");
Ok(())
}
#[test]
fn test_parallel_huffman_x8() -> Result<()> {
let config = ParallelConfig::default();
let mut encoder = ParallelHuffmanEncoder::<ParallelX8Variant>::new(config.clone())?;
let mut decoder = ParallelHuffmanDecoder::<ParallelX8Variant>::new(config);
let test_data = b"Parallel Huffman x8 encoding test with maximum parallel streams for optimal throughput.".repeat(500);
encoder.train(&test_data)?;
if let Some(ref tree) = encoder.shared_tree {
decoder.set_tree(tree.clone())?;
}
let compressed = encoder.encode(&test_data)?;
let decompressed = decoder.decode(&compressed, test_data.len())?;
assert_eq!(test_data, decompressed);
assert_eq!(encoder.variant_name(), "x8");
Ok(())
}
#[test]
fn test_adaptive_parallel_encoder() -> Result<()> {
let mut encoder = AdaptiveParallelEncoder::new()?;
let low_entropy_data = vec![42u8; 1000]; let high_entropy_data = (0..=255u8).cycle().take(10000).collect::<Vec<u8>>(); let medium_data = b"This is medium entropy test data with mixed patterns.".repeat(100);
let compressed1 = encoder.encode_adaptive(&low_entropy_data)?;
let compressed2 = encoder.encode_adaptive(&high_entropy_data)?;
let compressed3 = encoder.encode_adaptive(&medium_data)?;
assert!(!compressed1.is_empty());
assert!(!compressed2.is_empty());
assert!(!compressed3.is_empty());
let (alg1, var1) = encoder.select_optimal_encoding(&low_entropy_data);
let (alg2, var2) = encoder.select_optimal_encoding(&high_entropy_data);
println!("Low entropy: {} {}", alg1, var1);
println!("High entropy: {} {}", alg2, var2);
Ok(())
}
#[test]
fn test_parallel_benchmark() -> Result<()> {
let mut test_data = Vec::new();
test_data.extend_from_slice("Benchmark test data ".repeat(1000).as_bytes());
test_data.extend_from_slice(&(0..=255u8).cycle().take(5000).collect::<Vec<u8>>());
test_data.extend_from_slice("More varied content for comprehensive testing. ".repeat(500).as_bytes());
let mut benchmark = ParallelBenchmark::new(test_data);
benchmark.run_comprehensive_benchmark()?;
assert!(!benchmark.results.is_empty());
let best_throughput = benchmark.get_best_throughput();
let best_compression = benchmark.get_best_compression();
assert!(best_throughput.is_some());
assert!(best_compression.is_some());
if let Some(result) = best_throughput {
println!("Best throughput: {} {} at {:.2} MB/s",
result.algorithm, result.variant, result.throughput_mbps);
}
if let Some(result) = best_compression {
println!("Best compression: {} {} at {:.2}x ratio",
result.algorithm, result.variant, result.compression_ratio);
}
Ok(())
}
#[test]
fn test_data_splitting_strategies() {
let config = ParallelConfig::default();
let encoder = ParallelHuffmanEncoder::<ParallelX4Variant>::new(config).unwrap();
let test_data = vec![1u8; 10000];
let blocks = encoder.split_data_into_blocks(&test_data);
let total_size: usize = blocks.iter().map(|b| b.len()).sum();
assert_eq!(total_size, test_data.len());
assert!(blocks.len() >= 1);
assert!(blocks.len() <= 20);
println!("Split {} bytes into {} blocks", test_data.len(), blocks.len());
for (i, block) in blocks.iter().enumerate() {
println!("Block {}: {} bytes", i, block.len());
}
}
#[test]
fn test_load_balancing() {
let mut config = ParallelConfig::default();
config.load_balancing = true;
let encoder = ParallelHuffmanEncoder::<ParallelX4Variant>::new(config).unwrap();
let test_data = vec![1u8; 1000];
let blocks = encoder.split_data_into_blocks(&test_data);
if blocks.len() > 1 {
let sizes: Vec<usize> = blocks.iter().map(|b| b.len()).collect();
let min_size = *sizes.iter().min().unwrap();
let max_size = *sizes.iter().max().unwrap();
assert!(max_size - min_size <= 1);
}
}
}
#[cfg(test)]
mod bench_tests {
use super::*;
use std::time::Instant;
#[test]
fn bench_parallel_variants_performance() -> Result<()> {
let mut test_data = Vec::new();
test_data.extend_from_slice("Structured data pattern ".repeat(2000).as_bytes());
for i in 0..10000 {
test_data.push(((i * 17 + 23) % 256) as u8);
}
test_data.extend_from_slice(&vec![42u8; 5000]);
println!("Performance comparison of parallel variants:");
println!("Test data size: {} KB", test_data.len() / 1024);
let config = ParallelConfig::default();
let mut encoder_x2 = ParallelHuffmanEncoder::<ParallelX2Variant>::new(config.clone())?;
let start = Instant::now();
let compressed_x2 = encoder_x2.encode(&test_data)?;
let time_x2 = start.elapsed();
let mut encoder_x4 = ParallelHuffmanEncoder::<ParallelX4Variant>::new(config.clone())?;
let start = Instant::now();
let compressed_x4 = encoder_x4.encode(&test_data)?;
let time_x4 = start.elapsed();
let mut encoder_x8 = ParallelHuffmanEncoder::<ParallelX8Variant>::new(config)?;
let start = Instant::now();
let compressed_x8 = encoder_x8.encode(&test_data)?;
let time_x8 = start.elapsed();
let size_mb = test_data.len() as f64 / (1024.0 * 1024.0);
let throughput_x2 = size_mb / time_x2.as_secs_f64();
let throughput_x4 = size_mb / time_x4.as_secs_f64();
let throughput_x8 = size_mb / time_x8.as_secs_f64();
println!("x2 variant: {:.2} MB/s, {} bytes compressed", throughput_x2, compressed_x2.len());
println!("x4 variant: {:.2} MB/s, {} bytes compressed", throughput_x4, compressed_x4.len());
println!("x8 variant: {:.2} MB/s, {} bytes compressed", throughput_x8, compressed_x8.len());
assert!(!compressed_x2.is_empty());
assert!(!compressed_x4.is_empty());
assert!(!compressed_x8.is_empty());
assert!(throughput_x2 > 0.5); assert!(throughput_x4 > 0.5);
assert!(throughput_x8 > 0.5);
Ok(())
}
#[test]
fn bench_adaptive_selection_accuracy() -> Result<()> {
let mut encoder = AdaptiveParallelEncoder::new()?;
let test_cases = vec![
(vec![1u8; 1000], "low_entropy_small"),
(vec![1u8; 100000], "low_entropy_large"),
((0..=255u8).cycle().take(1000).collect(), "high_entropy_small"),
((0..=255u8).cycle().take(100000).collect(), "high_entropy_large"),
("Mixed content with patterns and randomness".repeat(1000).as_bytes().to_vec(), "mixed_medium"),
];
println!("Adaptive selection accuracy test:");
for (data, description) in test_cases {
let (algorithm, variant) = encoder.select_optimal_encoding(&data);
let entropy = EntropyStats::calculate_entropy(&data);
println!("{}: {} {} (entropy: {:.2}, size: {} KB)",
description, algorithm, variant, entropy, data.len() / 1024);
let start = Instant::now();
let compressed = encoder.encode_adaptive(&data)?;
let elapsed = start.elapsed();
let ratio = data.len() as f64 / compressed.len() as f64;
let throughput = (data.len() as f64 / 1024.0 / 1024.0) / elapsed.as_secs_f64();
println!(" Result: {:.2}x compression, {:.2} MB/s", ratio, throughput);
assert!(!compressed.is_empty());
assert!(ratio > 0.5); }
Ok(())
}
}