use alloc::string::ToString;
use alloc::vec;
use alloc::vec::Vec;
use enough::Stop;
use zenflate::{CompressionLevel, Compressor};
use crate::error::PngError;
#[allow(unused_imports)]
use whereat::at;
use super::filter::{
FAST_STRATEGIES, HEURISTIC_STRATEGIES, HeuristicScratch, MINIMAL_STRATEGIES, Strategy,
filter_image, filter_image_from_precomputed, precompute_all_filters,
};
use super::{PhaseStat, PhaseStats};
struct EffortParams {
zenflate_effort: u32,
strategies: &'static [Strategy],
screen_effort: u32,
screen_is_final: bool,
top_k: usize,
refine_efforts: &'static [u32],
brute_configs: &'static [(usize, u32)],
block_brute_configs: &'static [(usize, u32)],
fork_brute_efforts: &'static [u32],
adaptive_fork_configs: &'static [(u32, usize)], beam_brute_configs: &'static [(u32, usize)], use_recompress: bool,
full_optimal_effort: Option<u32>,
full_optimal_only: bool,
}
impl EffortParams {
fn from_effort_and_bpp(effort: u32, bpp: usize) -> Self {
let mut params = Self::from_effort(effort);
if params.brute_configs.is_empty()
&& ((bpp == 1 && (16..24).contains(&effort))
|| (bpp == 2 && (20..24).contains(&effort)))
{
params.brute_configs = &[(5, 1)];
}
params
}
fn from_effort(effort: u32) -> Self {
if effort > 60 {
return Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[30],
brute_configs: &[
(1, 1),
(1, 4),
(3, 1),
(3, 4),
(5, 1),
(5, 4),
(8, 1),
(8, 4),
],
block_brute_configs: &[(5, 1), (5, 4)],
fork_brute_efforts: &[10, 15],
adaptive_fork_configs: &[(15, 2), (22, 2)],
beam_brute_configs: &[(10, 3), (15, 3)],
use_recompress: true,
full_optimal_effort: Some(effort),
full_optimal_only: false,
};
}
if effort > 45 {
return Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[28, 30],
brute_configs: &[(1, 1), (1, 4), (3, 1), (3, 4)],
block_brute_configs: &[],
fork_brute_efforts: &[10, 15],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: true,
full_optimal_effort: Some(effort),
full_optimal_only: false,
};
}
if effort > 30 {
return Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[28, 30],
brute_configs: &[
(1, 1),
(1, 4),
(3, 1),
(3, 4),
(5, 1),
(5, 4),
(8, 1),
(8, 4),
],
block_brute_configs: &[],
fork_brute_efforts: &[10, 15],
adaptive_fork_configs: &[(15, 2), (22, 2)],
beam_brute_configs: &[(10, 3), (15, 3)],
use_recompress: true,
full_optimal_effort: Some(effort),
full_optimal_only: false,
};
}
match effort {
0 => Self {
zenflate_effort: 0,
strategies: &[Strategy::Single(0)],
screen_effort: 0,
screen_is_final: true,
top_k: 1,
refine_efforts: &[],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
1 => Self {
zenflate_effort: 1,
strategies: &[Strategy::Single(4)],
screen_effort: 1,
screen_is_final: true,
top_k: 1,
refine_efforts: &[],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
2 => Self {
zenflate_effort: 2,
strategies: MINIMAL_STRATEGIES,
screen_effort: 2,
screen_is_final: true,
top_k: 1,
refine_efforts: &[],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
3 => Self {
zenflate_effort: 3,
strategies: FAST_STRATEGIES,
screen_effort: 3,
screen_is_final: true,
top_k: 1,
refine_efforts: &[],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
4 => Self {
zenflate_effort: 4,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 4,
screen_is_final: true,
top_k: 1,
refine_efforts: &[],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
5 => Self {
zenflate_effort: 5,
strategies: FAST_STRATEGIES,
screen_effort: 5,
screen_is_final: true,
top_k: 1,
refine_efforts: &[],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
6 => Self {
zenflate_effort: 6,
strategies: FAST_STRATEGIES,
screen_effort: 6,
screen_is_final: true,
top_k: 1,
refine_efforts: &[],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
7 => Self {
zenflate_effort: 7,
strategies: FAST_STRATEGIES,
screen_effort: 7,
screen_is_final: true,
top_k: 1,
refine_efforts: &[],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
8 => Self {
zenflate_effort: 8,
strategies: FAST_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[8],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
9 => Self {
zenflate_effort: 10,
strategies: FAST_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[10],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
10 => Self {
zenflate_effort: 12,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[12],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
11 => Self {
zenflate_effort: 14,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[14],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
12 => Self {
zenflate_effort: 15,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[15],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
13 => Self {
zenflate_effort: 17,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[17],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
14 => Self {
zenflate_effort: 18,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[18],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
15 => Self {
zenflate_effort: 20,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[20],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
16 => Self {
zenflate_effort: 22,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[20, 22],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
17 => Self {
zenflate_effort: 22,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[20, 22],
brute_configs: &[(3, 1)],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
18 => Self {
zenflate_effort: 24,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[22, 24],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
19 => Self {
zenflate_effort: 24,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[22, 24],
brute_configs: &[(3, 1)],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
20 => Self {
zenflate_effort: 26,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[24, 26],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
21 => Self {
zenflate_effort: 28,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[26, 28],
brute_configs: &[],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
22 => Self {
zenflate_effort: 28,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[26, 28],
brute_configs: &[(5, 1)],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
23 => Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[28, 30],
brute_configs: &[(5, 1)],
block_brute_configs: &[],
fork_brute_efforts: &[],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
24 => Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[28, 30],
brute_configs: &[(5, 1)],
block_brute_configs: &[],
fork_brute_efforts: &[10],
adaptive_fork_configs: &[],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
25 => Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[28, 30],
brute_configs: &[(5, 1)],
block_brute_configs: &[],
fork_brute_efforts: &[10],
adaptive_fork_configs: &[(15, 2)],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
26 => Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[28, 30],
brute_configs: &[(5, 1), (5, 4)],
block_brute_configs: &[],
fork_brute_efforts: &[10],
adaptive_fork_configs: &[(15, 2)],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
27 => Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[28, 30],
brute_configs: &[(5, 1), (5, 4)],
block_brute_configs: &[],
fork_brute_efforts: &[10, 15],
adaptive_fork_configs: &[(15, 2), (22, 2)],
beam_brute_configs: &[],
use_recompress: false,
full_optimal_effort: None,
full_optimal_only: false,
},
28 => Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[28, 30],
brute_configs: &[
(1, 1),
(1, 4),
(3, 1),
(3, 4),
(5, 1),
(5, 4),
(8, 1),
(8, 4),
],
block_brute_configs: &[(5, 1)],
fork_brute_efforts: &[10, 15],
adaptive_fork_configs: &[(15, 2), (22, 2)],
beam_brute_configs: &[],
use_recompress: true,
full_optimal_effort: None,
full_optimal_only: false,
},
29 => Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[28, 30],
brute_configs: &[
(1, 1),
(1, 4),
(3, 1),
(3, 4),
(5, 1),
(5, 4),
(8, 1),
(8, 4),
],
block_brute_configs: &[(5, 1)],
fork_brute_efforts: &[10, 15],
adaptive_fork_configs: &[(15, 2), (22, 2)],
beam_brute_configs: &[(10, 3)],
use_recompress: true,
full_optimal_effort: None,
full_optimal_only: false,
},
_ => Self {
zenflate_effort: 30,
strategies: HEURISTIC_STRATEGIES,
screen_effort: 7,
screen_is_final: false,
top_k: 3,
refine_efforts: &[28, 30],
brute_configs: &[
(1, 1),
(1, 4),
(3, 1),
(3, 4),
(5, 1),
(5, 4),
(8, 1),
(8, 4),
],
block_brute_configs: &[(5, 1), (5, 4)],
fork_brute_efforts: &[10, 15],
adaptive_fork_configs: &[(15, 2), (22, 2)],
beam_brute_configs: &[(10, 3), (15, 3)],
use_recompress: true,
full_optimal_effort: None,
full_optimal_only: false,
},
}
}
}
pub(crate) fn try_compress(
filtered: &[u8],
compressors: &mut [Compressor],
compress_buf: &mut [u8],
verify_buf: &mut [u8],
best_compressed: &mut Option<Vec<u8>>,
cancel: &dyn Stop,
) -> crate::error::Result<usize> {
let mut best_for_stream = usize::MAX;
for compressor in compressors.iter_mut() {
let compressed_len = match compressor.zlib_compress(filtered, compress_buf, cancel) {
Ok(len) => len,
Err(zenflate::CompressionError::Stopped(reason)) => {
return Err(at!(PngError::from(reason)));
}
Err(e) => {
return Err(at!(PngError::InvalidInput(alloc::format!(
"compression failed: {e}"
))));
}
};
{
let mut decompressor = zenflate::Decompressor::new();
if decompressor
.zlib_decompress(&compress_buf[..compressed_len], verify_buf, cancel)
.is_err()
{
continue;
}
}
best_for_stream = best_for_stream.min(compressed_len);
let dominated = best_compressed
.as_ref()
.is_some_and(|b| compressed_len >= b.len());
if !dominated {
*best_compressed = Some(compress_buf[..compressed_len].to_vec());
}
}
Ok(best_for_stream)
}
fn try_compress_with_fallbacks(
filtered: &[u8],
effort: u32,
compress_buf: &mut [u8],
verify_buf: &mut [u8],
best_compressed: &mut Option<Vec<u8>>,
cancel: &dyn Stop,
) -> crate::error::Result<usize> {
let mut best_size = usize::MAX;
let mut level = CompressionLevel::new(effort);
loop {
let mut compressor = Compressor::new(level);
let size = try_compress(
filtered,
core::slice::from_mut(&mut compressor),
compress_buf,
verify_buf,
best_compressed,
cancel,
)?;
best_size = best_size.min(size);
match level.monotonicity_fallback() {
Some(fb) => level = fb,
None => break,
}
}
Ok(best_size)
}
pub(crate) fn compress_filtered(
packed_rows: &[u8],
row_bytes: usize,
height: usize,
bpp: usize,
effort: u32,
opts: super::CompressOptions<'_>,
mut stats: Option<&mut PhaseStats>,
) -> crate::error::Result<Vec<u8>> {
use std::time::Instant;
let params = EffortParams::from_effort_and_bpp(effort, bpp);
let filtered_size = (row_bytes + 1) * height;
if effort == 0 {
if let Some(s) = &mut stats {
s.raw_size = filtered_size;
}
let phase_start = if stats.is_some() {
Some(Instant::now())
} else {
None
};
let result = zlib_store_unfiltered(packed_rows, row_bytes, height);
if let (Some(s), Some(t)) = (&mut stats, phase_start) {
s.phases.push(PhaseStat {
name: "Store (effort 0)".to_string(),
duration_ns: t.elapsed().as_nanos() as u64,
best_size: result.len(),
evaluations: 1,
});
}
return Ok(result);
}
let owned_rows;
let packed_rows = if bpp == 4 && has_any_transparent_pixel(packed_rows) {
owned_rows = zero_transparent_rgba8(packed_rows);
&owned_rows
} else {
packed_rows
};
let mut best_compressed: Option<Vec<u8>> = None;
if let Some(s) = &mut stats {
s.raw_size = filtered_size;
}
let mut filtered = Vec::with_capacity(filtered_size);
let compress_bound = Compressor::zlib_compress_bound(filtered_size);
let mut compress_buf = vec![0u8; compress_bound];
let mut verify_buf = vec![0u8; filtered_size];
let strategies = params.strategies;
let phase_start = if stats.is_some() {
Some(Instant::now())
} else {
None
};
let mut screen_results: Vec<(usize, Vec<u8>)> = Vec::with_capacity(strategies.len());
let screen_effort = params.screen_effort;
let precompute_size = 5 * height * row_bytes;
let precomputed = if strategies.len() > 1 && precompute_size <= 64 * 1024 * 1024 {
Some(precompute_all_filters(packed_rows, row_bytes, height, bpp))
} else {
None
};
if opts.parallel {
let precomputed_ref = precomputed.as_deref();
#[allow(clippy::type_complexity)]
let par_results: Vec<Option<(usize, Vec<u8>, Vec<u8>)>> = std::thread::scope(|s| {
let handles: Vec<_> = strategies
.iter()
.map(|strategy| {
s.spawn(move || {
let mut t_filtered = Vec::with_capacity(filtered_size);
let mut t_compressor =
Compressor::new(CompressionLevel::new(screen_effort));
let mut t_compress_buf = vec![0u8; compress_bound];
let mut t_verify_buf = vec![0u8; filtered_size];
if let Some(pc) = precomputed_ref {
let mut t_scratch = HeuristicScratch::new_universal();
filter_image_from_precomputed(
pc,
row_bytes,
height,
*strategy,
&mut t_scratch,
&mut t_filtered,
);
} else {
filter_image(
packed_rows,
row_bytes,
height,
bpp,
*strategy,
opts.cancel,
&mut t_filtered,
);
}
let compressed_len = t_compressor
.zlib_compress(&t_filtered, &mut t_compress_buf, opts.cancel)
.ok()?;
let mut decompressor = zenflate::Decompressor::new();
decompressor
.zlib_decompress(
&t_compress_buf[..compressed_len],
&mut t_verify_buf,
opts.cancel,
)
.ok()?;
Some((
compressed_len,
t_filtered,
t_compress_buf[..compressed_len].to_vec(),
))
})
})
.collect();
handles.into_iter().map(|h| h.join().unwrap()).collect()
});
for (compressed_len, filtered_data, compressed_data) in par_results.into_iter().flatten() {
let dominated = best_compressed
.as_ref()
.is_some_and(|b| compressed_len >= b.len());
if !dominated {
best_compressed = Some(compressed_data);
}
screen_results.push((compressed_len, filtered_data));
}
} else {
let mut screen_compressor = Compressor::new(CompressionLevel::new(screen_effort));
let mut scratch = HeuristicScratch::new_universal();
for (i, strategy) in strategies.iter().enumerate() {
if i > 0 && opts.deadline.should_stop() {
break;
}
filtered.clear();
if let Some(ref pc) = precomputed {
filter_image_from_precomputed(
pc,
row_bytes,
height,
*strategy,
&mut scratch,
&mut filtered,
);
} else {
filter_image(
packed_rows,
row_bytes,
height,
bpp,
*strategy,
opts.cancel,
&mut filtered,
);
}
let compressed_len =
match screen_compressor.zlib_compress(&filtered, &mut compress_buf, opts.cancel) {
Ok(len) => len,
Err(zenflate::CompressionError::Stopped(reason)) => {
return Err(at!(PngError::from(reason)));
}
Err(e) => {
return Err(at!(PngError::InvalidInput(alloc::format!(
"compression failed: {e}"
))));
}
};
let valid = {
let mut decompressor = zenflate::Decompressor::new();
decompressor
.zlib_decompress(
&compress_buf[..compressed_len],
&mut verify_buf,
opts.cancel,
)
.is_ok()
};
if valid {
let dominated = best_compressed
.as_ref()
.is_some_and(|b| compressed_len >= b.len());
if !dominated {
best_compressed = Some(compress_buf[..compressed_len].to_vec());
}
screen_results.push((compressed_len, filtered.clone()));
}
}
}
screen_results.sort_by_key(|(size, _)| *size);
let filter_variance_low = if screen_results.len() >= 3 {
let best = screen_results[0].0;
let worst = screen_results[screen_results.len() - 1].0;
best > 0 && (worst as f64 / best as f64) < 1.01
} else {
false
};
if let (Some(s), Some(t)) = (&mut stats, phase_start) {
let tried = screen_results.len();
s.phases.push(PhaseStat {
name: alloc::format!("Screen ({tried}×E{screen_effort})"),
duration_ns: t.elapsed().as_nanos() as u64,
best_size: best_compressed.as_ref().map_or(0, |b| b.len()),
evaluations: tried as u32,
});
}
if params.screen_is_final || opts.deadline.should_stop() {
return best_compressed.ok_or_else(|| {
at!(PngError::InvalidInput(
"no filter strategies tried".to_string()
))
});
}
let refine_tiers = params.refine_efforts;
let phase2_start = if stats.is_some() {
Some(Instant::now())
} else {
None
};
let top_n = screen_results.len().min(params.top_k);
let mut recompress_candidates: Vec<(usize, Vec<u8>)> = Vec::new();
if refine_tiers.is_empty() && params.use_recompress {
for (size, filtered_data) in &screen_results[..top_n] {
recompress_candidates.push((*size, filtered_data.clone()));
}
}
if opts.parallel && top_n > 1 {
let refine_results: Vec<Option<(usize, Vec<u8>)>> = std::thread::scope(|s| {
let handles: Vec<_> = screen_results[..top_n]
.iter()
.map(|(_, filtered_data)| {
s.spawn(move || {
let mut t_compress_buf = vec![0u8; compress_bound];
let mut t_verify_buf = vec![0u8; filtered_size];
let mut t_best: Option<Vec<u8>> = None;
for &tier_level in refine_tiers {
let mut level = CompressionLevel::new(tier_level);
loop {
let mut compressor = Compressor::new(level);
if let Ok(len) = compressor.zlib_compress(
filtered_data,
&mut t_compress_buf,
opts.cancel,
) {
let mut decompressor = zenflate::Decompressor::new();
if decompressor
.zlib_decompress(
&t_compress_buf[..len],
&mut t_verify_buf,
opts.cancel,
)
.is_ok()
{
let dominated =
t_best.as_ref().is_some_and(|b| len >= b.len());
if !dominated {
t_best = Some(t_compress_buf[..len].to_vec());
}
}
}
match level.monotonicity_fallback() {
Some(fb) => level = fb,
None => break,
}
}
}
t_best.map(|b| (b.len(), b))
})
})
.collect();
handles.into_iter().map(|h| h.join().unwrap()).collect()
});
#[allow(unused_variables)]
for (idx, result) in refine_results.into_iter().enumerate() {
if let Some((size, compressed_data)) = result {
let dominated = best_compressed.as_ref().is_some_and(|b| size >= b.len());
if !dominated {
best_compressed = Some(compressed_data);
}
if params.use_recompress {
recompress_candidates.push((size, screen_results[idx].1.clone()));
}
}
}
} else {
for &tier_level in refine_tiers {
if opts.deadline.should_stop() {
break;
}
for (_, filtered_data) in &screen_results[..top_n] {
let refine_size = try_compress_with_fallbacks(
filtered_data,
tier_level,
&mut compress_buf,
&mut verify_buf,
&mut best_compressed,
opts.cancel,
)?;
if params.use_recompress && refine_size < usize::MAX {
recompress_candidates.push((refine_size, filtered_data.clone()));
}
}
}
}
if let (Some(s), Some(t)) = (&mut stats, phase2_start) {
let tiers_str = refine_tiers
.iter()
.map(|l| alloc::format!("E{l}"))
.collect::<Vec<_>>()
.join(",");
s.phases.push(PhaseStat {
name: alloc::format!("Refine ({top_n}×{tiers_str})"),
duration_ns: t.elapsed().as_nanos() as u64,
best_size: best_compressed.as_ref().map_or(0, |b| b.len()),
evaluations: (top_n * refine_tiers.len()) as u32,
});
}
let brute_configs = params.brute_configs;
let block_brute_configs = params.block_brute_configs;
let fork_brute_levels = params.fork_brute_efforts;
let beam_brute_configs = params.beam_brute_configs;
let adaptive_fork_configs = params.adaptive_fork_configs;
let can_brute_force = !brute_configs.is_empty()
|| !block_brute_configs.is_empty()
|| !fork_brute_levels.is_empty()
|| !beam_brute_configs.is_empty()
|| !adaptive_fork_configs.is_empty();
let phase3_start = if stats.is_some() && can_brute_force {
Some(Instant::now())
} else {
None
};
let mut brute_evals = 0u32;
if can_brute_force && !opts.deadline.should_stop() && !filter_variance_low {
for &(context_rows, eval_level) in brute_configs {
if opts.deadline.should_stop() {
break;
}
filtered.clear();
filter_image(
packed_rows,
row_bytes,
height,
bpp,
Strategy::BruteForce {
context_rows,
eval_level,
},
opts.cancel,
&mut filtered,
);
let brute_size = try_compress_with_fallbacks(
&filtered,
params.zenflate_effort,
&mut compress_buf,
&mut verify_buf,
&mut best_compressed,
opts.cancel,
)?;
brute_evals += 1;
if params.use_recompress && brute_size < usize::MAX {
recompress_candidates.push((brute_size, filtered.clone()));
}
}
for &(context_rows, eval_level) in block_brute_configs {
if opts.deadline.should_stop() {
break;
}
filtered.clear();
filter_image(
packed_rows,
row_bytes,
height,
bpp,
Strategy::BruteForceBlock {
context_rows,
eval_level,
},
opts.cancel,
&mut filtered,
);
let block_brute_size = try_compress_with_fallbacks(
&filtered,
params.zenflate_effort,
&mut compress_buf,
&mut verify_buf,
&mut best_compressed,
opts.cancel,
)?;
brute_evals += 1;
if params.use_recompress && block_brute_size < usize::MAX {
recompress_candidates.push((block_brute_size, filtered.clone()));
}
}
for &eval_level in fork_brute_levels {
if opts.deadline.should_stop() {
break;
}
filtered.clear();
filter_image(
packed_rows,
row_bytes,
height,
bpp,
Strategy::BruteForceFork { eval_level },
opts.cancel,
&mut filtered,
);
let fork_brute_size = try_compress_with_fallbacks(
&filtered,
params.zenflate_effort,
&mut compress_buf,
&mut verify_buf,
&mut best_compressed,
opts.cancel,
)?;
brute_evals += 1;
if params.use_recompress && fork_brute_size < usize::MAX {
recompress_candidates.push((fork_brute_size, filtered.clone()));
}
}
for &(eval_level, beam_width) in beam_brute_configs {
if opts.deadline.should_stop() {
break;
}
filtered.clear();
filter_image(
packed_rows,
row_bytes,
height,
bpp,
Strategy::BruteForceBeam {
eval_level,
beam_width,
},
opts.cancel,
&mut filtered,
);
let beam_brute_size = try_compress_with_fallbacks(
&filtered,
params.zenflate_effort,
&mut compress_buf,
&mut verify_buf,
&mut best_compressed,
opts.cancel,
)?;
brute_evals += 1;
if params.use_recompress && beam_brute_size < usize::MAX {
recompress_candidates.push((beam_brute_size, filtered.clone()));
}
}
for &(eval_level, narrow_to) in adaptive_fork_configs {
if opts.deadline.should_stop() {
break;
}
filtered.clear();
filter_image(
packed_rows,
row_bytes,
height,
bpp,
Strategy::AdaptiveFork {
eval_level,
narrow_to,
},
opts.cancel,
&mut filtered,
);
let adaptive_size = try_compress_with_fallbacks(
&filtered,
params.zenflate_effort,
&mut compress_buf,
&mut verify_buf,
&mut best_compressed,
opts.cancel,
)?;
brute_evals += 1;
if params.use_recompress && adaptive_size < usize::MAX {
recompress_candidates.push((adaptive_size, filtered.clone()));
}
}
}
if let (Some(s), Some(t)) = (&mut stats, phase3_start)
&& brute_evals > 0
{
let configs_desc = brute_configs
.iter()
.map(|(ctx, ev)| alloc::format!("ctx{ctx}/E{ev}"))
.chain(
block_brute_configs
.iter()
.map(|(ctx, ev)| alloc::format!("blk-ctx{ctx}/E{ev}")),
)
.chain(
fork_brute_levels
.iter()
.map(|l| alloc::format!("fork-E{l}")),
)
.chain(
beam_brute_configs
.iter()
.map(|(ev, k)| alloc::format!("beam-E{ev}/K{k}")),
)
.chain(
adaptive_fork_configs
.iter()
.map(|(ev, n)| alloc::format!("afork-E{ev}/N{n}")),
)
.collect::<Vec<_>>()
.join(",");
s.phases.push(PhaseStat {
name: alloc::format!("BruteForce ({configs_desc})"),
duration_ns: t.elapsed().as_nanos() as u64,
best_size: best_compressed.as_ref().map_or(0, |b| b.len()),
evaluations: brute_evals,
});
}
{
let phase4_start = if stats.is_some() {
Some(Instant::now())
} else {
None
};
if params.use_recompress
&& !recompress_candidates.is_empty()
&& !opts.deadline.should_stop()
{
recompress_candidates.sort_by_key(|(size, _)| *size);
if params.full_optimal_only {
recompress_candidates.truncate(1);
} else {
recompress_candidates.truncate(3);
}
let n_candidates = recompress_candidates.len();
if params.full_optimal_only {
#[cfg(feature = "zopfli")]
{
let iterations =
params.full_optimal_effort.unwrap_or(31).saturating_sub(16) as u64;
for (_screen_size, filtered_data) in &recompress_candidates {
if opts.cancel.should_stop() {
break;
}
let zopfli_result =
compress_with_zopfli_n(filtered_data, iterations.max(1), opts.cancel)?;
let dominated = best_compressed
.as_ref()
.is_some_and(|b| zopfli_result.len() >= b.len());
if !dominated {
best_compressed = Some(zopfli_result);
}
}
}
#[cfg(not(feature = "zopfli"))]
{
if let Some(fo_effort) = params.full_optimal_effort {
let fo_best = zenflate_full_optimal_recompress(
&recompress_candidates,
fo_effort,
opts.cancel,
&mut best_compressed,
opts.max_threads,
)?;
if let Some(b) = fo_best {
best_compressed = Some(b);
}
}
}
} else {
let zenflate_best = zenflate_recompress(
&recompress_candidates,
opts.cancel,
&mut best_compressed,
opts.max_threads,
)?;
if let Some(b) = zenflate_best {
best_compressed = Some(b);
}
if let Some(fo_effort) = params.full_optimal_effort
&& !opts.deadline.should_stop()
{
let fo_best = zenflate_full_optimal_recompress(
&recompress_candidates,
fo_effort,
opts.cancel,
&mut best_compressed,
opts.max_threads,
)?;
if let Some(b) = fo_best {
best_compressed = Some(b);
}
}
#[cfg(feature = "zopfli")]
{
if !opts.deadline.should_stop() {
let zopfli_best = zopfli_adaptive(
&recompress_candidates,
opts.cancel,
opts.deadline,
opts.remaining_ns,
&mut best_compressed,
opts.max_threads,
)?;
if let Some(b) = zopfli_best {
best_compressed = Some(b);
}
}
}
}
if let (Some(s), Some(t)) = (&mut stats, phase4_start) {
let mut label_parts = Vec::new();
if params.full_optimal_only {
#[cfg(feature = "zopfli")]
label_parts.push("Zenzop".to_string());
#[cfg(not(feature = "zopfli"))]
label_parts.push("FullOpt".to_string());
} else {
label_parts.push("NearOpt".to_string());
if params.full_optimal_effort.is_some() {
label_parts.push("FullOpt".to_string());
}
if cfg!(feature = "zopfli") {
label_parts.push("Zopfli".to_string());
}
}
let label = alloc::format!(
"Recompress[{}] ({n_candidates} candidates)",
label_parts.join("+")
);
s.phases.push(PhaseStat {
name: label,
duration_ns: t.elapsed().as_nanos() as u64,
best_size: best_compressed.as_ref().map_or(0, |b| b.len()),
evaluations: n_candidates as u32,
});
}
}
}
best_compressed.ok_or_else(|| {
at!(PngError::InvalidInput(
"no filter strategies tried".to_string()
))
})
}
fn has_any_transparent_pixel(data: &[u8]) -> bool {
data.chunks_exact(4).any(|px| px[3] == 0)
}
fn zero_transparent_rgba8(data: &[u8]) -> Vec<u8> {
let mut buf = data.to_vec();
for px in buf.chunks_exact_mut(4) {
if px[3] == 0 {
px[0] = 0;
px[1] = 0;
px[2] = 0;
}
}
buf
}
fn zlib_store_unfiltered(packed_rows: &[u8], row_bytes: usize, height: usize) -> Vec<u8> {
let filtered_row = row_bytes + 1; let total_filtered = filtered_row * height;
let num_blocks = if total_filtered == 0 {
1
} else {
total_filtered.div_ceil(65535)
};
let out_size = 2 + 5 * num_blocks + total_filtered + 4;
let mut out = Vec::with_capacity(out_size);
out.push(0x78);
out.push(0x01);
if height == 0 {
write_stored_block_header(&mut out, 0, true);
out.extend_from_slice(&zenflate::adler32(1, &[]).to_be_bytes());
return out;
}
let mut adler = 1u32;
let mut block_remaining: usize = 0;
let mut filtered_remaining = total_filtered;
for y in 0..height {
let row = &packed_rows[y * row_bytes..(y + 1) * row_bytes];
if block_remaining == 0 {
let block_len = filtered_remaining.min(65535);
let is_final = block_len >= filtered_remaining;
write_stored_block_header(&mut out, block_len, is_final);
block_remaining = block_len;
}
out.push(0u8);
block_remaining -= 1;
filtered_remaining -= 1;
let mut data = row;
while !data.is_empty() {
if block_remaining == 0 {
let block_len = filtered_remaining.min(65535);
let is_final = block_len >= filtered_remaining;
write_stored_block_header(&mut out, block_len, is_final);
block_remaining = block_len;
}
let n = data.len().min(block_remaining);
out.extend_from_slice(&data[..n]);
data = &data[n..];
block_remaining -= n;
filtered_remaining -= n;
}
let s1 = adler & 0xFFFF;
let s2 = ((adler >> 16) + s1) % 65521;
adler = (s2 << 16) | s1;
adler = zenflate::adler32(adler, row);
}
out.extend_from_slice(&adler.to_be_bytes());
out
}
pub(crate) fn write_zlib_stored_inline(
out: &mut Vec<u8>,
packed_rows: &[u8],
row_bytes: usize,
height: usize,
) {
let filtered_row = row_bytes + 1;
let total_filtered = filtered_row * height;
out.push(0x78);
out.push(0x01);
if height == 0 {
write_stored_block_header(out, 0, true);
out.extend_from_slice(&zenflate::adler32(1, &[]).to_be_bytes());
return;
}
let mut adler = 1u32;
let mut block_remaining: usize = 0;
let mut filtered_remaining = total_filtered;
for y in 0..height {
let row = &packed_rows[y * row_bytes..(y + 1) * row_bytes];
if block_remaining == 0 {
let block_len = filtered_remaining.min(65535);
let is_final = block_len >= filtered_remaining;
write_stored_block_header(out, block_len, is_final);
block_remaining = block_len;
}
out.push(0u8);
block_remaining -= 1;
filtered_remaining -= 1;
let mut data = row;
while !data.is_empty() {
if block_remaining == 0 {
let block_len = filtered_remaining.min(65535);
let is_final = block_len >= filtered_remaining;
write_stored_block_header(out, block_len, is_final);
block_remaining = block_len;
}
let n = data.len().min(block_remaining);
out.extend_from_slice(&data[..n]);
data = &data[n..];
block_remaining -= n;
filtered_remaining -= n;
}
let s1 = adler & 0xFFFF;
let s2 = ((adler >> 16) + s1) % 65521;
adler = (s2 << 16) | s1;
adler = zenflate::adler32(adler, row);
}
out.extend_from_slice(&adler.to_be_bytes());
}
fn write_stored_block_header(out: &mut Vec<u8>, len: usize, is_final: bool) {
out.push(if is_final { 1 } else { 0 });
out.push((len & 0xFF) as u8);
out.push(((len >> 8) & 0xFF) as u8);
let nlen = !len & 0xFFFF;
out.push((nlen & 0xFF) as u8);
out.push(((nlen >> 8) & 0xFF) as u8);
}
fn recompress_one(data: &[u8], cancel: &dyn Stop) -> crate::error::Result<Vec<u8>> {
let mut compressor = Compressor::new(CompressionLevel::new(30));
let bound = Compressor::zlib_compress_bound(data.len());
let mut output = vec![0u8; bound];
let len = compressor
.zlib_compress(data, &mut output, cancel)
.map_err(|e| match e {
zenflate::CompressionError::Stopped(reason) => PngError::Stopped(reason),
other => PngError::InvalidInput(alloc::format!("zenflate recompress failed: {other}")),
})?;
output.truncate(len);
Ok(output)
}
fn zenflate_recompress(
candidates: &[(usize, Vec<u8>)],
cancel: &dyn Stop,
current_best: &mut Option<Vec<u8>>,
max_threads: usize,
) -> crate::error::Result<Option<Vec<u8>>> {
let mut best: Option<Vec<u8>> = None;
let results: Vec<crate::error::Result<Vec<u8>>> = if max_threads == 1 || candidates.len() <= 1 {
candidates
.iter()
.map(|(_size, data)| recompress_one(data, cancel))
.collect()
} else {
std::thread::scope(|s| {
let handles: Vec<_> = candidates
.iter()
.map(|(_size, data)| s.spawn(|| recompress_one(data, cancel)))
.collect();
handles.into_iter().map(|h| h.join().unwrap()).collect()
})
};
for result in results {
let compressed = result?;
let dominated = best.as_ref().is_some_and(|b| compressed.len() >= b.len())
|| current_best
.as_ref()
.is_some_and(|b| compressed.len() >= b.len());
if !dominated {
best = Some(compressed);
}
}
Ok(best)
}
fn full_optimal_recompress_one(
data: &[u8],
effort: u32,
cancel: &dyn Stop,
) -> crate::error::Result<Vec<u8>> {
let mut compressor = Compressor::new(CompressionLevel::new(effort));
let bound = Compressor::zlib_compress_bound(data.len());
let mut output = vec![0u8; bound];
let len = compressor
.zlib_compress(data, &mut output, cancel)
.map_err(|e| match e {
zenflate::CompressionError::Stopped(reason) => PngError::Stopped(reason),
other => PngError::InvalidInput(alloc::format!(
"zenflate FullOptimal recompress failed: {other}"
)),
})?;
output.truncate(len);
Ok(output)
}
fn zenflate_full_optimal_recompress(
candidates: &[(usize, Vec<u8>)],
effort: u32,
cancel: &dyn Stop,
current_best: &mut Option<Vec<u8>>,
max_threads: usize,
) -> crate::error::Result<Option<Vec<u8>>> {
let mut best: Option<Vec<u8>> = None;
let results: Vec<crate::error::Result<Vec<u8>>> = if max_threads == 1 || candidates.len() <= 1 {
candidates
.iter()
.map(|(_size, data)| full_optimal_recompress_one(data, effort, cancel))
.collect()
} else {
std::thread::scope(|s| {
let handles: Vec<_> = candidates
.iter()
.map(|(_size, data)| s.spawn(|| full_optimal_recompress_one(data, effort, cancel)))
.collect();
handles.into_iter().map(|h| h.join().unwrap()).collect()
})
};
for result in results {
let compressed = result?;
let dominated = best.as_ref().is_some_and(|b| compressed.len() >= b.len())
|| current_best
.as_ref()
.is_some_and(|b| compressed.len() >= b.len());
if !dominated {
best = Some(compressed);
}
}
Ok(best)
}
#[cfg(feature = "zopfli")]
fn zopfli_adaptive(
candidates: &[(usize, Vec<u8>)],
cancel: &dyn Stop,
deadline: &dyn Stop,
remaining_ns: Option<&dyn Fn() -> Option<u64>>,
current_best: &mut Option<Vec<u8>>,
max_threads: usize,
) -> crate::error::Result<Option<Vec<u8>>> {
use std::time::Instant;
let combined = almost_enough::OrStop::new(cancel, deadline);
let mut best: Option<Vec<u8>> = None;
let mut update_best = |compressed: Vec<u8>| {
let dominated = best.as_ref().is_some_and(|b| compressed.len() >= b.len())
|| current_best
.as_ref()
.is_some_and(|b| compressed.len() >= b.len());
if !dominated {
best = Some(compressed);
}
};
let calibration_iters = 5u64;
let cal_start = Instant::now();
let cal_result = compress_with_zopfli_n(&candidates[0].1, calibration_iters, &combined)?;
let cal_elapsed = cal_start.elapsed();
update_best(cal_result);
let ms_per_iter = cal_elapsed.as_secs_f64() * 1000.0 / calibration_iters as f64;
let effective_parallelism = if max_threads == 1 {
1.0
} else {
num_cpus() as f64
};
let max_iters = match remaining_ns.and_then(|f| f()) {
Some(ns) => {
let remaining_ms = ns as f64 / 1_000_000.0;
if remaining_ms < ms_per_iter * 2.0 {
return Ok(best);
}
let n_candidates = candidates.len().min(3) as f64;
let parallel_factor = n_candidates.min(effective_parallelism);
let ms_per_candidate = remaining_ms * parallel_factor / n_candidates;
let iters = (ms_per_candidate / ms_per_iter).floor() as u64;
iters.clamp(5, 100)
}
None => 50u64,
};
if max_iters <= calibration_iters {
return Ok(best);
}
let zopfli_results: Vec<crate::error::Result<Vec<u8>>> = if max_threads == 1
|| candidates.len() <= 1
{
candidates
.iter()
.map(|(_size, data)| compress_with_zopfli_n(data, max_iters, &combined))
.collect()
} else {
std::thread::scope(|s| {
let handles: Vec<_> = candidates
.iter()
.map(|(_size, data)| s.spawn(|| compress_with_zopfli_n(data, max_iters, &combined)))
.collect();
handles.into_iter().map(|h| h.join().unwrap()).collect()
})
};
for result in zopfli_results {
update_best(result?);
}
Ok(best)
}
#[cfg(feature = "zopfli")]
fn compress_with_zopfli_n(
data: &[u8],
iterations: u64,
stop: &dyn Stop,
) -> crate::error::Result<Vec<u8>> {
use std::io::Write;
let mut options = zenzop::Options::default();
options.iteration_count = core::num::NonZeroU64::new(iterations.max(1)).unwrap();
options.enhanced = true;
let mut encoder = zenzop::ZlibEncoder::with_stop(options, Vec::new(), stop)
.map_err(|e| zenzop_err(e, stop))?;
encoder.write_all(data).map_err(|e| zenzop_err(e, stop))?;
let result = encoder.finish().map_err(|e| zenzop_err(e, stop))?;
Ok(result.into_inner())
}
#[cfg(feature = "zopfli")]
fn zenzop_err(e: std::io::Error, stop: &dyn Stop) -> PngError {
if let Err(reason) = stop.check() {
return PngError::Stopped(reason);
}
PngError::InvalidInput(alloc::format!("zopfli compression failed: {e}"))
}
#[cfg(feature = "zopfli")]
fn num_cpus() -> usize {
std::thread::available_parallelism()
.map(|n| n.get())
.unwrap_or(1)
}
#[cfg(test)]
mod tests {
use super::*;
use enough::Unstoppable;
#[test]
fn effort_0_is_store() {
let p = EffortParams::from_effort(0);
assert_eq!(p.zenflate_effort, 0);
assert!(p.screen_is_final);
assert_eq!(p.strategies.len(), 1);
assert!(!p.use_recompress);
assert!(p.refine_efforts.is_empty());
}
#[test]
fn effort_1_single_paeth() {
let p = EffortParams::from_effort(1);
assert_eq!(p.zenflate_effort, 1);
assert!(p.screen_is_final);
assert_eq!(p.strategies.len(), 1);
}
#[test]
fn effort_2_minimal_strategies() {
let p = EffortParams::from_effort(2);
assert_eq!(p.strategies.len(), MINIMAL_STRATEGIES.len());
assert!(p.screen_is_final);
}
#[test]
fn effort_3_7_screen_only() {
for effort in 3..=7 {
let p = EffortParams::from_effort(effort);
assert!(p.screen_is_final, "effort {effort} should be screen-only");
assert!(p.refine_efforts.is_empty());
assert!(!p.use_recompress);
}
}
#[test]
fn effort_8_15_have_refinement() {
for effort in 8..=15 {
let p = EffortParams::from_effort(effort);
assert!(
!p.screen_is_final,
"effort {effort} should not be screen-only"
);
assert!(
!p.refine_efforts.is_empty(),
"effort {effort} should have refine"
);
assert!(p.brute_configs.is_empty() || effort >= 10);
}
}
#[test]
fn effort_17_has_brute_force() {
let p = EffortParams::from_effort(17);
assert!(!p.brute_configs.is_empty());
}
#[test]
fn effort_24_has_fork() {
let p = EffortParams::from_effort(24);
assert!(!p.fork_brute_efforts.is_empty());
}
#[test]
fn effort_29_has_beam() {
let p = EffortParams::from_effort(29);
assert!(!p.beam_brute_configs.is_empty());
}
#[test]
fn effort_30_maniac() {
let p = EffortParams::from_effort(30);
assert!(p.use_recompress);
assert!(!p.brute_configs.is_empty());
assert!(!p.fork_brute_efforts.is_empty());
assert!(!p.beam_brute_configs.is_empty());
assert!(p.full_optimal_effort.is_none());
}
#[test]
fn effort_31_full_optimal() {
let p = EffortParams::from_effort(31);
assert!(p.full_optimal_effort.is_some());
assert!(p.use_recompress);
}
#[test]
fn effort_50_medium_tier() {
let p = EffortParams::from_effort(50);
assert!(p.full_optimal_effort.is_some());
assert!(p.use_recompress);
}
#[test]
fn effort_100_full_tier() {
let p = EffortParams::from_effort(100);
assert!(p.full_optimal_effort.is_some());
assert!(p.use_recompress);
assert!(!p.brute_configs.is_empty());
}
#[test]
fn effort_monotonicity_zenflate_effort() {
let mut prev = 0;
for effort in 0..=30 {
let p = EffortParams::from_effort(effort);
assert!(
p.zenflate_effort >= prev,
"zenflate effort should be monotonic: e{effort} = {} < {prev}",
p.zenflate_effort
);
prev = p.zenflate_effort;
}
}
#[test]
fn bpp1_gets_brute_at_lower_effort() {
let p = EffortParams::from_effort_and_bpp(20, 1);
assert!(!p.brute_configs.is_empty());
}
#[test]
fn bpp4_no_extra_brute() {
let p = EffortParams::from_effort_and_bpp(20, 4);
let p_raw = EffortParams::from_effort(20);
assert_eq!(p.brute_configs.len(), p_raw.brute_configs.len());
}
#[test]
fn try_compress_basic() {
let data: Vec<u8> = (0..=255).collect::<Vec<u8>>().repeat(4);
let mut compressors = [Compressor::new(CompressionLevel::new(1))];
let bound = Compressor::zlib_compress_bound(data.len());
let mut compress_buf = vec![0u8; bound];
let mut verify_buf = vec![0u8; data.len()];
let mut best = None;
let size = try_compress(
&data,
&mut compressors,
&mut compress_buf,
&mut verify_buf,
&mut best,
&Unstoppable,
)
.unwrap();
assert!(size < data.len());
assert!(best.is_some());
let compressed = best.unwrap();
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(&compressed).unwrap();
assert_eq!(decompressed, data);
}
#[test]
fn zlib_store_empty() {
let result = zlib_store_unfiltered(&[], 4, 0);
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(&result).unwrap();
assert!(decompressed.is_empty());
}
#[test]
fn zlib_store_small() {
let row = [10u8, 20, 30];
let result = zlib_store_unfiltered(&row, 3, 1);
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(&result).unwrap();
assert_eq!(decompressed.len(), 4); assert_eq!(decompressed[0], 0); assert_eq!(&decompressed[1..], &row);
}
#[test]
fn zlib_store_multi_row() {
let data = [1u8, 2, 3, 4, 5, 6];
let result = zlib_store_unfiltered(&data, 3, 2);
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(&result).unwrap();
assert_eq!(decompressed.len(), 8);
assert_eq!(decompressed[0], 0); assert_eq!(&decompressed[1..4], &[1, 2, 3]);
assert_eq!(decompressed[4], 0); assert_eq!(&decompressed[5..8], &[4, 5, 6]);
}
#[test]
fn write_stored_inline_matches_standalone() {
let data = [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let standalone = zlib_store_unfiltered(&data, 4, 3);
let mut inline = Vec::new();
write_zlib_stored_inline(&mut inline, &data, 4, 3);
assert_eq!(standalone, inline);
}
#[test]
fn write_stored_inline_zero_height() {
let mut out = Vec::new();
write_zlib_stored_inline(&mut out, &[], 4, 0);
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(&out).unwrap();
assert!(decompressed.is_empty());
}
#[test]
fn stored_block_header_format() {
let mut out = Vec::new();
write_stored_block_header(&mut out, 100, false);
assert_eq!(out.len(), 5);
assert_eq!(out[0], 0); assert_eq!(out[1], 100); assert_eq!(out[2], 0);
let mut out2 = Vec::new();
write_stored_block_header(&mut out2, 100, true);
assert_eq!(out2[0], 1); }
#[test]
fn compress_filtered_effort_0() {
let data = vec![128u8; 12 * 4]; let opts = super::super::CompressOptions {
cancel: &Unstoppable,
deadline: &Unstoppable,
parallel: false,
remaining_ns: None,
max_threads: 0,
};
let result = compress_filtered(&data, 12, 4, 3, 0, opts, None).unwrap();
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(&result).unwrap();
assert_eq!(decompressed.len(), 52);
}
#[test]
fn compress_filtered_effort_1() {
let data = vec![128u8; 12 * 4];
let opts = super::super::CompressOptions {
cancel: &Unstoppable,
deadline: &Unstoppable,
parallel: false,
remaining_ns: None,
max_threads: 0,
};
let result = compress_filtered(&data, 12, 4, 3, 1, opts, None).unwrap();
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(&result).unwrap();
assert_eq!(decompressed.len(), 52);
}
#[test]
fn compress_filtered_effort_10_with_stats() {
let data = vec![128u8; 12 * 4];
let opts = super::super::CompressOptions {
cancel: &Unstoppable,
deadline: &Unstoppable,
parallel: false,
remaining_ns: None,
max_threads: 0,
};
let mut stats = PhaseStats::default();
let result = compress_filtered(&data, 12, 4, 3, 10, opts, Some(&mut stats)).unwrap();
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(&result).unwrap();
assert_eq!(decompressed.len(), 52);
assert!(
stats.phases.len() >= 2,
"should have ≥2 phases, got {}",
stats.phases.len()
);
assert!(stats.raw_size > 0);
}
#[test]
#[cfg(not(target_arch = "wasm32"))] fn compress_filtered_parallel() {
let data = vec![128u8; 12 * 4];
let opts = super::super::CompressOptions {
cancel: &Unstoppable,
deadline: &Unstoppable,
parallel: true,
remaining_ns: None,
max_threads: 0,
};
let result = compress_filtered(&data, 12, 4, 3, 7, opts, None).unwrap();
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(&result).unwrap();
assert_eq!(decompressed.len(), 52);
}
#[test]
fn compress_filtered_rgba_transparent() {
let mut data = vec![0u8; 16 * 4]; data[0] = 255; data[1] = 128; data[2] = 64; data[3] = 0;
let opts = super::super::CompressOptions {
cancel: &Unstoppable,
deadline: &Unstoppable,
parallel: false,
remaining_ns: None,
max_threads: 0,
};
let result = compress_filtered(&data, 16, 4, 4, 2, opts, None).unwrap();
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(&result).unwrap();
assert_eq!(decompressed.len(), 4 * (1 + 16));
}
}
#[cfg(all(test, feature = "zopfli"))]
mod zopfli_tests {
use super::*;
use core::sync::atomic::{AtomicI64, Ordering};
struct CallCountDeadline(AtomicI64);
impl CallCountDeadline {
fn new(calls: i64) -> Self {
Self(AtomicI64::new(calls))
}
}
impl Stop for CallCountDeadline {
fn check(&self) -> Result<(), enough::StopReason> {
let prev = self.0.fetch_sub(1, Ordering::Relaxed);
if prev <= 0 {
Err(enough::StopReason::TimedOut)
} else {
Ok(())
}
}
}
struct CallCountRemainingNs(AtomicI64);
impl CallCountRemainingNs {
fn new(calls: i64) -> Self {
Self(AtomicI64::new(calls))
}
fn as_fn(&self) -> impl Fn() -> Option<u64> + '_ {
move || {
let prev = self.0.fetch_sub(1, Ordering::Relaxed);
if prev <= 0 {
Some(0)
} else {
Some(1_000_000_000)
}
}
}
}
struct CallCountCancel(AtomicI64);
impl CallCountCancel {
fn new(calls: i64) -> Self {
Self(AtomicI64::new(calls))
}
}
impl Stop for CallCountCancel {
fn check(&self) -> Result<(), enough::StopReason> {
let prev = self.0.fetch_sub(1, Ordering::Relaxed);
if prev <= 0 {
Err(enough::StopReason::Cancelled)
} else {
Ok(())
}
}
}
fn test_data() -> Vec<u8> {
let pattern: Vec<u8> = (0..=255).collect();
pattern.repeat(8) }
fn verify_zlib(compressed: &[u8], expected: &[u8]) {
let decompressed = miniz_oxide::inflate::decompress_to_vec_zlib(compressed)
.expect("decompression failed — zlib stream is invalid");
assert_eq!(decompressed, expected);
}
#[test]
fn zopfli_unlimited_returns_valid_output() {
let data = test_data();
let stop = enough::Unstoppable;
let result = compress_with_zopfli_n(&data, 5, &stop).unwrap();
verify_zlib(&result, &data);
}
#[test]
fn zopfli_deadline_expiry_returns_valid_output() {
let data = test_data();
let cancel = enough::Unstoppable;
let deadline = CallCountDeadline::new(2);
let stop = almost_enough::OrStop::new(&cancel, &deadline);
let result = compress_with_zopfli_n(&data, 50, &stop).unwrap();
verify_zlib(&result, &data);
}
#[test]
fn zopfli_cancel_returns_stopped() {
let data = test_data();
let cancel = CallCountCancel::new(2);
let result = compress_with_zopfli_n(&data, 50, &cancel).map_err(|e| e.decompose().0);
assert!(
matches!(
result,
Err(PngError::Stopped(enough::StopReason::Cancelled))
),
"expected Stopped(Cancelled), got {result:?}",
);
}
#[test]
fn or_stop_cancel_takes_priority() {
let cancel = CallCountCancel::new(0); let deadline = CallCountDeadline::new(0); let stop = almost_enough::OrStop::new(&cancel, &deadline);
let result = stop.check();
assert!(matches!(result, Err(enough::StopReason::Cancelled)));
}
#[test]
fn or_stop_deadline_fires_timed_out() {
let cancel = enough::Unstoppable;
let deadline = CallCountDeadline::new(0); let stop = almost_enough::OrStop::new(&cancel, &deadline);
let result = stop.check();
assert!(matches!(result, Err(enough::StopReason::TimedOut)));
}
#[test]
fn or_stop_neither_fires() {
let cancel = enough::Unstoppable;
let deadline = CallCountDeadline::new(100);
let stop = almost_enough::OrStop::new(&cancel, &deadline);
assert!(stop.check().is_ok());
}
#[test]
fn zopfli_adaptive_unlimited_returns_valid() {
let data = test_data();
let compressed_size = {
let c = compress_with_zopfli_n(&data, 5, &enough::Unstoppable).unwrap();
c.len()
};
let candidates = vec![(compressed_size, data.clone())];
let cancel = enough::Unstoppable;
let deadline = enough::Unstoppable;
let mut current_best = None;
let result =
zopfli_adaptive(&candidates, &cancel, &deadline, None, &mut current_best, 1).unwrap();
assert!(result.is_some(), "should find a result");
verify_zlib(result.as_ref().unwrap(), &data);
}
#[test]
fn zopfli_adaptive_deadline_expiry_returns_valid() {
let data = test_data();
let compressed_size = {
let c = compress_with_zopfli_n(&data, 5, &enough::Unstoppable).unwrap();
c.len()
};
let candidates = vec![(compressed_size, data.clone())];
let cancel = enough::Unstoppable;
let deadline = CallCountDeadline::new(10);
let remaining = CallCountRemainingNs::new(10);
let remaining_fn = remaining.as_fn();
let mut current_best = None;
let result = zopfli_adaptive(
&candidates,
&cancel,
&deadline,
Some(&remaining_fn),
&mut current_best,
1,
)
.unwrap();
assert!(result.is_some(), "should have calibration result");
verify_zlib(result.as_ref().unwrap(), &data);
}
#[test]
fn zopfli_adaptive_cancel_returns_stopped() {
let data = test_data();
let compressed_size = {
let c = compress_with_zopfli_n(&data, 5, &enough::Unstoppable).unwrap();
c.len()
};
let candidates = vec![(compressed_size, data.clone())];
let cancel = CallCountCancel::new(2);
let deadline = enough::Unstoppable;
let mut current_best = None;
let result = zopfli_adaptive(&candidates, &cancel, &deadline, None, &mut current_best, 1)
.map_err(|e| e.decompose().0);
assert!(
matches!(
result,
Err(PngError::Stopped(enough::StopReason::Cancelled))
),
"expected Stopped(Cancelled), got {result:?}",
);
}
fn zenflate_baseline(data: &[u8], level: u32) -> Vec<u8> {
let mut compressor = Compressor::new(CompressionLevel::new(level));
let bound = Compressor::zlib_compress_bound(data.len());
let mut buf = vec![0u8; bound];
let len = compressor
.zlib_compress(data, &mut buf, enough::Unstoppable)
.unwrap();
buf[..len].to_vec()
}
#[test]
fn zopfli_adaptive_never_regresses_vs_zenflate() {
let data = test_data();
let zenflate_result = zenflate_baseline(&data, 12);
let zenflate_size = zenflate_result.len();
let candidates = vec![(zenflate_size, data.clone())];
let cancel = enough::Unstoppable;
let deadline = enough::Unstoppable;
let mut current_best = Some(zenflate_result.clone());
let result =
zopfli_adaptive(&candidates, &cancel, &deadline, None, &mut current_best, 1).unwrap();
if let Some(ref better) = result {
assert!(
better.len() < zenflate_size,
"zopfli ({}) must be strictly smaller than zenflate ({zenflate_size})",
better.len(),
);
verify_zlib(better, &data);
}
}
#[test]
fn zopfli_adaptive_multi_candidate_never_regresses() {
let patterns: Vec<Vec<u8>> = vec![
(0..=255u8).collect::<Vec<_>>().repeat(8),
(0..=255u8).rev().collect::<Vec<_>>().repeat(8),
(0..=255u8)
.flat_map(|b| [b, b])
.collect::<Vec<_>>()
.repeat(4),
];
let candidates: Vec<(usize, Vec<u8>)> = patterns
.iter()
.map(|p| {
let baseline = zenflate_baseline(p, 12);
(baseline.len(), p.clone())
})
.collect();
let best_pattern_idx = candidates
.iter()
.enumerate()
.min_by_key(|(_, (size, _))| *size)
.unwrap()
.0;
let zenflate_best = zenflate_baseline(&patterns[best_pattern_idx], 12);
let zenflate_best_size = zenflate_best.len();
let cancel = enough::Unstoppable;
let deadline = enough::Unstoppable;
let mut current_best = Some(zenflate_best.clone());
let result =
zopfli_adaptive(&candidates, &cancel, &deadline, None, &mut current_best, 1).unwrap();
if let Some(ref better) = result {
assert!(
better.len() < zenflate_best_size,
"zopfli ({}) must be strictly smaller than zenflate ({zenflate_best_size})",
better.len(),
);
let decompressed =
miniz_oxide::inflate::decompress_to_vec_zlib(better).expect("invalid zlib");
assert!(
patterns.contains(&decompressed),
"decompressed data doesn't match any candidate",
);
}
}
}