use crate::bit_writer::BitWriter;
#[allow(unused_imports)]
use crate::debug_rect;
use crate::entropy_coding::encode::write_tokens_ans;
use crate::error::Result;
use crate::modular::channel::{Channel, ModularImage};
use crate::modular::rct::{RctType, forward_rct};
pub(crate) use super::encode_primitives::*;
pub(crate) use super::encode_transforms::*;
pub(crate) use super::encode_tree::*;
pub(crate) fn write_num_transforms(writer: &mut BitWriter, num_transforms: u32) -> Result<()> {
match num_transforms {
0 => writer.write(2, 0)?,
1 => writer.write(2, 1)?,
2..=17 => {
writer.write(2, 2)?;
writer.write(4, (num_transforms - 2) as u64)?;
}
_ => {
writer.write(2, 3)?;
writer.write(8, (num_transforms - 18) as u64)?;
}
}
Ok(())
}
fn collect_residuals_with_prediction(image: &ModularImage) -> Vec<Token> {
let mut tokens = Vec::new();
let mut current_run = 0usize;
let mut num_decoded = 0usize; let mut last_value = 0u32; let mut debug_count = 0;
for channel in &image.channels {
if current_run > K_LZ77_MIN_LENGTH {
tokens.push(Token::Lz77Run(current_run));
num_decoded += current_run;
} else {
for _ in 0..current_run {
tokens.push(Token::Raw(last_value));
num_decoded += 1;
}
}
current_run = 0;
last_value = u32::MAX;
let width = channel.width();
let height = channel.height();
for y in 0..height {
for x in 0..width {
let pixel = channel.get(x, y);
let left = if x > 0 { channel.get(x - 1, y) } else { 0 };
let top = if y > 0 { channel.get(x, y - 1) } else { left };
let topleft = if x > 0 && y > 0 {
channel.get(x - 1, y - 1)
} else {
left
};
let prediction = predict_gradient(left, top, topleft);
let residual = pixel - prediction;
let packed = pack_signed(residual);
if debug_count < 20 {
let _channel_idx = image
.channels
.iter()
.position(|c| std::ptr::eq(c, channel))
.unwrap();
crate::trace::debug_eprintln!(
"RESIDUAL[{}]: ch={} y={} x={} pixel={}, pred={}, residual={}, packed={}",
debug_count,
_channel_idx,
y,
x,
pixel,
prediction,
residual,
packed
);
debug_count += 1;
}
let can_use_lz77 = num_decoded > 0 && packed == last_value;
if can_use_lz77 {
current_run += 1;
} else {
if current_run > K_LZ77_MIN_LENGTH {
tokens.push(Token::Lz77Run(current_run));
num_decoded += current_run;
} else {
for _ in 0..current_run {
tokens.push(Token::Raw(last_value));
num_decoded += 1;
}
}
current_run = 0;
tokens.push(Token::Raw(packed));
num_decoded += 1;
last_value = packed;
}
}
}
}
if current_run > K_LZ77_MIN_LENGTH {
tokens.push(Token::Lz77Run(current_run));
} else {
for _ in 0..current_run {
tokens.push(Token::Raw(last_value));
}
}
tokens
}
pub fn write_improved_modular_stream(
image: &ModularImage,
writer: &mut BitWriter,
use_ans: bool,
) -> Result<()> {
write_improved_modular_stream_inner(image, writer, false, use_ans)
}
fn write_improved_modular_stream_inner(
image: &ModularImage,
writer: &mut BitWriter,
_skip_group_header: bool,
use_ans: bool,
) -> Result<()> {
let tokens = collect_residuals_with_prediction(image);
let sparse_counts = build_sparse_histogram(&tokens);
let _num_raw_used = sparse_counts[..K_NUM_RAW_SYMBOLS]
.iter()
.filter(|&&c| c > 0)
.count();
let _num_lz77_used = sparse_counts[K_LZ77_MIN_SYMBOL..]
.iter()
.filter(|&&c| c > 0)
.count();
let num_lz77_runs = tokens
.iter()
.filter(|t| matches!(t, Token::Lz77Run(_)))
.count();
crate::trace::debug_eprintln!(
"IMPROVED: {} tokens, {} raw symbols used, {} lz77 tokens used, {} lz77 runs",
tokens.len(),
_num_raw_used,
_num_lz77_used,
num_lz77_runs
);
if num_lz77_runs == 0 {
return write_simple_modular_stream(image, writer, use_ans);
}
writer.write(1, 1)?; writer.write(1, 1)?;
let (tree_depths, tree_codes) = write_tree_histogram_for_gradient(writer)?;
write_gradient_tree_tokens(writer, &tree_depths, &tree_codes)?;
let (depths, codes) = write_sparse_lz77_histogram(writer, &sparse_counts)?;
writer.write(1, 1)?; writer.write(1, 1)?; writer.write(2, 0)?;
for token in &tokens {
match token {
Token::Raw(value) => {
let (tok, nbits, extra) = encode_hybrid_uint_000(*value);
let symbol = tok as usize;
let depth = depths[symbol];
let code = codes[symbol];
if depth > 0 {
writer.write(depth as usize, code as u64)?;
}
if nbits > 0 {
writer.write(nbits as usize, extra as u64)?;
}
}
Token::Lz77Run(count) => {
let adjusted = count - K_LZ77_MIN_LENGTH;
let (tok, nbits, extra) = encode_hybrid_uint_lz77_length(adjusted as u32);
let symbol = K_LZ77_MIN_SYMBOL + tok as usize;
let depth = depths[symbol];
let code = codes[symbol];
if depth > 0 {
writer.write(depth as usize, code as u64)?;
}
if nbits > 0 {
writer.write(nbits as usize, extra as u64)?;
}
let dist_symbol = 1u32;
let (dist_tok, dist_nbits, dist_extra) = encode_hybrid_uint_000(dist_symbol);
let dist_depth = depths[dist_tok as usize];
let dist_code = codes[dist_tok as usize];
if dist_depth > 0 {
writer.write(dist_depth as usize, dist_code as u64)?;
}
if dist_nbits > 0 {
writer.write(dist_nbits as usize, dist_extra as u64)?;
}
}
}
}
crate::trace::debug_eprintln!(
"LZ77 [bit {}]: Encoded {} tokens",
writer.bits_written(),
tokens.len()
);
writer.zero_pad_to_byte();
Ok(())
}
#[inline]
fn predict_gradient(left: i32, top: i32, topleft: i32) -> i32 {
let min = left.min(top);
let max = left.max(top);
let grad = left + top - topleft;
let grad_clamp_max = if topleft < min { max } else { grad };
if topleft > max { min } else { grad_clamp_max }
}
const USE_ZERO_PREDICTOR: bool = false;
pub fn write_simple_modular_stream(
image: &ModularImage,
writer: &mut BitWriter,
use_ans: bool,
) -> Result<()> {
let mut residuals = Vec::new();
for channel in &image.channels {
let width = channel.width();
let height = channel.height();
for y in 0..height {
for x in 0..width {
let pixel = channel.get(x, y);
let prediction = if USE_ZERO_PREDICTOR {
0
} else {
let left = if x > 0 { channel.get(x - 1, y) } else { 0 };
let top = if y > 0 { channel.get(x, y - 1) } else { left };
let topleft = if x > 0 && y > 0 {
channel.get(x - 1, y - 1)
} else {
left
};
predict_gradient(left, top, topleft)
};
let residual = pixel - prediction;
let packed = pack_signed(residual);
residuals.push(packed);
}
}
}
writer.write(1, 1)?; writer.write(1, 1)?;
if USE_ZERO_PREDICTOR {
write_zero_tree_complete(writer)?;
} else {
let (tree_depths, tree_codes) = write_tree_histogram_for_gradient(writer)?;
write_gradient_tree_tokens(writer, &tree_depths, &tree_codes)?;
}
if use_ans {
let (tokens, code) = build_ans_modular_code(&residuals);
write_ans_modular_header(writer, &code)?;
writer.write(1, 1)?; writer.write(1, 1)?; writer.write(2, 0)?;
write_ans_modular_tokens(writer, &tokens, &code)?;
} else {
let (encoded, max_token) = encode_residuals_hybrid(&residuals);
let histogram = build_token_histogram(&encoded, max_token);
let (depths, codes) = write_hybrid_data_histogram(writer, &histogram, max_token)?;
writer.write(1, 1)?; writer.write(1, 1)?; writer.write(2, 0)?;
write_hybrid_residuals(writer, &encoded, &depths, &codes)?;
}
writer.zero_pad_to_byte();
Ok(())
}
pub fn write_modular_stream_with_palette(
image: &ModularImage,
writer: &mut BitWriter,
use_ans: bool,
begin_c: usize,
num_c: usize,
) -> Result<()> {
use super::palette::{analyze_palette, apply_palette};
let max_colors = super::palette::MAX_PALETTE_COLORS;
let analysis = analyze_palette(image, begin_c, num_c, max_colors);
if !analysis.use_palette {
if image.channels.len() >= 3 {
return write_modular_stream_with_rct_only(image, writer, use_ans);
} else {
return write_simple_modular_stream(image, writer, use_ans);
}
}
let mut transformed = image.clone();
let nb_colors = apply_palette(&mut transformed, begin_c, num_c, &analysis)?;
crate::trace::debug_eprintln!(
"PALETTE: {} unique colors, {} channels → palette({}) + index",
analysis.num_colors,
num_c,
nb_colors
);
let mut residuals = Vec::new();
for channel in &transformed.channels {
let width = channel.width();
let height = channel.height();
for y in 0..height {
for x in 0..width {
let pixel = channel.get(x, y);
let left = if x > 0 { channel.get(x - 1, y) } else { 0 };
let top = if y > 0 { channel.get(x, y - 1) } else { left };
let topleft = if x > 0 && y > 0 {
channel.get(x - 1, y - 1)
} else {
left
};
let prediction = predict_gradient(left, top, topleft);
let residual = pixel - prediction;
let packed = pack_signed(residual);
residuals.push(packed);
}
}
}
writer.write(1, 1)?; writer.write(1, 1)?;
let (tree_depths, tree_codes) = write_tree_histogram_for_gradient(writer)?;
write_gradient_tree_tokens(writer, &tree_depths, &tree_codes)?;
if use_ans {
let (tokens, code) = build_ans_modular_code(&residuals);
write_ans_modular_header(writer, &code)?;
writer.write(1, 1)?; writer.write(1, 1)?; writer.write(2, 1)?; write_palette_transform(writer, begin_c, num_c, nb_colors, 0, 0)?;
write_ans_modular_tokens(writer, &tokens, &code)?;
} else {
let (encoded, max_token) = encode_residuals_hybrid(&residuals);
let histogram = build_token_histogram(&encoded, max_token);
let (depths, codes) = write_hybrid_data_histogram(writer, &histogram, max_token)?;
writer.write(1, 1)?; writer.write(1, 1)?; writer.write(2, 1)?; write_palette_transform(writer, begin_c, num_c, nb_colors, 0, 0)?;
write_hybrid_residuals(writer, &encoded, &depths, &codes)?;
}
writer.zero_pad_to_byte();
Ok(())
}
pub fn write_modular_stream_with_lossy_palette(
image: &ModularImage,
writer: &mut BitWriter,
use_ans: bool,
begin_c: usize,
num_c: usize,
max_palette_colors: usize,
) -> Result<()> {
use super::palette::apply_lossy_palette;
let mut transformed = image.clone();
let result = apply_lossy_palette(&mut transformed, begin_c, num_c, max_palette_colors);
let result = match result {
Some(r) => r,
None => {
if image.channels.len() >= 3 {
return write_modular_stream_with_rct(image, writer, use_ans);
} else {
return write_simple_modular_stream(image, writer, use_ans);
}
}
};
let nb_colors = result.nb_colors;
let nb_deltas = result.nb_deltas;
let predictor = result.predictor;
crate::trace::debug_eprintln!(
"LOSSY PALETTE: {} colors + {} deltas, predictor={}, {} channels → palette + index",
nb_colors,
nb_deltas,
predictor,
num_c,
);
let mut residuals = Vec::new();
for channel in &transformed.channels {
let width = channel.width();
let height = channel.height();
for y in 0..height {
for x in 0..width {
let pixel = channel.get(x, y);
let left = if x > 0 { channel.get(x - 1, y) } else { 0 };
let top = if y > 0 { channel.get(x, y - 1) } else { left };
let topleft = if x > 0 && y > 0 {
channel.get(x - 1, y - 1)
} else {
left
};
let prediction = predict_gradient(left, top, topleft);
let residual = pixel - prediction;
let packed = pack_signed(residual);
residuals.push(packed);
}
}
}
writer.write(1, 1)?; writer.write(1, 1)?;
let (tree_depths, tree_codes) = write_tree_histogram_for_gradient(writer)?;
write_gradient_tree_tokens(writer, &tree_depths, &tree_codes)?;
if use_ans {
let (tokens, code) = build_ans_modular_code(&residuals);
write_ans_modular_header(writer, &code)?;
writer.write(1, 1)?; writer.write(1, 1)?; writer.write(2, 1)?; write_palette_transform(writer, begin_c, num_c, nb_colors, nb_deltas, predictor)?;
write_ans_modular_tokens(writer, &tokens, &code)?;
} else {
let (encoded, max_token) = encode_residuals_hybrid(&residuals);
let histogram = build_token_histogram(&encoded, max_token);
let (depths, codes) = write_hybrid_data_histogram(writer, &histogram, max_token)?;
writer.write(1, 1)?; writer.write(1, 1)?; writer.write(2, 1)?; write_palette_transform(writer, begin_c, num_c, nb_colors, nb_deltas, predictor)?;
write_hybrid_residuals(writer, &encoded, &depths, &codes)?;
}
writer.zero_pad_to_byte();
Ok(())
}
pub fn write_modular_stream_with_squeeze(
image: &ModularImage,
writer: &mut BitWriter,
use_ans: bool,
) -> Result<()> {
use super::squeeze::{apply_squeeze, default_squeeze_params};
let params = default_squeeze_params(image);
if params.is_empty() {
if image.channels.len() >= 3 {
return write_modular_stream_with_rct(image, writer, use_ans);
} else {
return write_simple_modular_stream(image, writer, use_ans);
}
}
let mut transformed = image.clone();
let has_rct = transformed.channels.len() >= 3;
if has_rct {
let rct_type = RctType::YCOCG;
forward_rct(&mut transformed.channels, 0, rct_type)?;
}
apply_squeeze(&mut transformed, ¶ms)?;
crate::trace::debug_eprintln!(
"SQUEEZE: {} steps, {} → {} channels, rct={}",
params.len(),
image.channels.len(),
transformed.channels.len(),
has_rct,
);
let mut residuals = Vec::new();
let mut max_residual: u32 = 0;
for channel in &transformed.channels {
let width = channel.width();
let height = channel.height();
for y in 0..height {
for x in 0..width {
let pixel = channel.get(x, y);
let packed = pack_signed(pixel);
residuals.push(packed);
max_residual = max_residual.max(packed);
}
}
}
writer.write(1, 1)?; writer.write(1, 1)?;
write_zero_tree_complete(writer)?;
if use_ans {
let (tokens, code) = build_ans_modular_code(&residuals);
write_ans_modular_header(writer, &code)?;
writer.write(1, 1)?; writer.write(1, 1)?; if has_rct {
writer.write(2, 2)?;
writer.write(4, 0)?;
write_rct_transform(writer, 0, RctType::YCOCG)?;
write_squeeze_transform(writer, ¶ms)?;
} else {
writer.write(2, 1)?; write_squeeze_transform(writer, ¶ms)?;
}
write_ans_modular_tokens(writer, &tokens, &code)?;
} else {
let (encoded, max_token) = encode_residuals_hybrid(&residuals);
let histogram = build_token_histogram(&encoded, max_token);
let (depths, codes) = write_hybrid_data_histogram(writer, &histogram, max_token)?;
writer.write(1, 1)?; writer.write(1, 1)?; if has_rct {
writer.write(2, 2)?;
writer.write(4, 0)?;
write_rct_transform(writer, 0, RctType::YCOCG)?;
write_squeeze_transform(writer, ¶ms)?;
} else {
writer.write(2, 1)?; write_squeeze_transform(writer, ¶ms)?;
}
write_hybrid_residuals(writer, &encoded, &depths, &codes)?;
}
writer.zero_pad_to_byte();
Ok(())
}
pub fn write_modular_stream_with_rct(
image: &ModularImage,
writer: &mut BitWriter,
use_ans: bool,
) -> Result<()> {
if let Some((begin_c, num_c)) = super::palette::should_use_palette(image) {
return write_modular_stream_with_palette(image, writer, use_ans, begin_c, num_c);
}
write_modular_stream_with_rct_only(image, writer, use_ans)
}
fn write_modular_stream_with_rct_only(
image: &ModularImage,
writer: &mut BitWriter,
use_ans: bool,
) -> Result<()> {
if image.channels.len() < 3 {
return write_simple_modular_stream(image, writer, use_ans);
}
let mut transformed = image.clone();
let rct_type = RctType::YCOCG;
forward_rct(&mut transformed.channels, 0, rct_type)?;
crate::trace::debug_eprintln!(
"RCT: Applied YCoCg transform to {} channels",
transformed.channels.len()
);
let mut residuals = Vec::new();
let mut max_residual: u32 = 0;
for channel in &transformed.channels {
let width = channel.width();
let height = channel.height();
for y in 0..height {
for x in 0..width {
let pixel = channel.get(x, y);
let left = if x > 0 { channel.get(x - 1, y) } else { 0 };
let top = if y > 0 { channel.get(x, y - 1) } else { left };
let topleft = if x > 0 && y > 0 {
channel.get(x - 1, y - 1)
} else {
left
};
let prediction = predict_gradient(left, top, topleft);
let residual = pixel - prediction;
let packed = pack_signed(residual);
residuals.push(packed);
max_residual = max_residual.max(packed);
}
}
}
writer.write(1, 1)?; writer.write(1, 1)?;
let (tree_depths, tree_codes) = write_tree_histogram_for_gradient(writer)?;
write_gradient_tree_tokens(writer, &tree_depths, &tree_codes)?;
if use_ans {
let (tokens, code) = build_ans_modular_code(&residuals);
write_ans_modular_header(writer, &code)?;
writer.write(1, 1)?; writer.write(1, 1)?; writer.write(2, 1)?; write_rct_transform(writer, 0, rct_type)?;
write_ans_modular_tokens(writer, &tokens, &code)?;
} else {
let (encoded, max_token) = encode_residuals_hybrid(&residuals);
let histogram = build_token_histogram(&encoded, max_token);
let (depths, codes) = write_hybrid_data_histogram(writer, &histogram, max_token)?;
writer.write(1, 1)?; writer.write(1, 1)?; writer.write(2, 1)?; write_rct_transform(writer, 0, rct_type)?;
write_hybrid_residuals(writer, &encoded, &depths, &codes)?;
}
writer.zero_pad_to_byte();
Ok(())
}
pub fn write_modular_stream_with_weighted(
image: &ModularImage,
writer: &mut BitWriter,
use_ans: bool,
) -> Result<()> {
use super::predictor::{Neighbors, WeightedPredictorParams, WeightedPredictorState};
let params = WeightedPredictorParams::default();
let mut residuals = Vec::new();
for channel in &image.channels {
let width = channel.width();
let height = channel.height();
let mut wp_state = WeightedPredictorState::new(¶ms, width);
for y in 0..height {
for x in 0..width {
let pixel = channel.get(x, y);
let neighbors = Neighbors::gather(channel, x, y);
let prediction = wp_state.predict(x, y, width, &neighbors);
let residual = pixel - prediction;
let packed = pack_signed(residual);
residuals.push(packed);
wp_state.update_errors(pixel, x, y, width);
}
}
}
writer.write(1, 1)?; writer.write(1, 1)?;
write_tree_histogram_for_weighted(writer)?;
write_weighted_tree_tokens(writer)?;
if use_ans {
let (tokens, code) = build_ans_modular_code(&residuals);
write_ans_modular_header(writer, &code)?;
writer.write(1, 1)?; write_wp_header(writer, ¶ms)?;
writer.write(2, 0)?;
write_ans_modular_tokens(writer, &tokens, &code)?;
} else {
let (encoded, max_token) = encode_residuals_hybrid(&residuals);
let histogram = build_token_histogram(&encoded, max_token);
let (depths, codes) = write_hybrid_data_histogram(writer, &histogram, max_token)?;
writer.write(1, 1)?; write_wp_header(writer, ¶ms)?;
writer.write(2, 0)?;
write_hybrid_residuals(writer, &encoded, &depths, &codes)?;
}
writer.zero_pad_to_byte();
Ok(())
}
pub fn write_modular_stream_with_rct_weighted(
image: &ModularImage,
writer: &mut BitWriter,
use_ans: bool,
) -> Result<()> {
use super::predictor::{Neighbors, WeightedPredictorParams, WeightedPredictorState};
if let Some((begin_c, num_c)) = super::palette::should_use_palette(image) {
return write_modular_stream_with_palette(image, writer, use_ans, begin_c, num_c);
}
if image.channels.len() < 3 {
return write_modular_stream_with_weighted(image, writer, use_ans);
}
let mut transformed = image.clone();
let rct_type = RctType::YCOCG;
forward_rct(&mut transformed.channels, 0, rct_type)?;
let params = WeightedPredictorParams::default();
let mut residuals = Vec::new();
for channel in &transformed.channels {
let width = channel.width();
let height = channel.height();
let mut wp_state = WeightedPredictorState::new(¶ms, width);
for y in 0..height {
for x in 0..width {
let pixel = channel.get(x, y);
let neighbors = Neighbors::gather(channel, x, y);
let prediction = wp_state.predict(x, y, width, &neighbors);
let residual = pixel - prediction;
residuals.push(pack_signed(residual));
wp_state.update_errors(pixel, x, y, width);
}
}
}
writer.write(1, 1)?; writer.write(1, 1)?;
write_tree_histogram_for_weighted(writer)?;
write_weighted_tree_tokens(writer)?;
if use_ans {
let (tokens, code) = build_ans_modular_code(&residuals);
write_ans_modular_header(writer, &code)?;
writer.write(1, 1)?; write_wp_header(writer, ¶ms)?;
writer.write(2, 1)?; write_rct_transform(writer, 0, rct_type)?;
write_ans_modular_tokens(writer, &tokens, &code)?;
} else {
let (encoded, max_token) = encode_residuals_hybrid(&residuals);
let histogram = build_token_histogram(&encoded, max_token);
let (depths, codes) = write_hybrid_data_histogram(writer, &histogram, max_token)?;
writer.write(1, 1)?; write_wp_header(writer, ¶ms)?;
writer.write(2, 1)?; write_rct_transform(writer, 0, rct_type)?;
write_hybrid_residuals(writer, &encoded, &depths, &codes)?;
}
writer.zero_pad_to_byte();
Ok(())
}
fn estimate_cost(image: &ModularImage) -> f64 {
use super::predictor::pack_signed;
use crate::entropy_coding::hybrid_uint::HybridUintConfig;
let config = HybridUintConfig::new(4, 2, 0);
let cutoffs: &[u32] = &[
0, 1, 3, 5, 7, 11, 15, 23, 31, 47, 63, 95, 127, 191, 255, 392, 500,
];
let nc = cutoffs.len() + 1;
let mut total_bits: f64 = 0.0;
let mut extra_bits: u64 = 0;
let mut histograms: Vec<Vec<u32>> = vec![vec![]; nc];
for ch in &image.channels {
let w = ch.width();
let h = ch.height();
if w == 0 || h == 0 {
continue;
}
for y in 0..h {
for x in 0..w {
let val = ch.data()[y * w + x];
let left = if x > 0 {
ch.data()[y * w + x - 1]
} else if y > 0 {
ch.data()[(y - 1) * w + x]
} else {
0
};
let top = if y > 0 {
ch.data()[(y - 1) * w + x]
} else {
left
};
let topleft = if x > 0 && y > 0 {
ch.data()[(y - 1) * w + x - 1]
} else {
left
};
let max_diff = left.max(top).max(topleft) - left.min(top).min(topleft);
let max_diff = max_diff as u32;
let mut ctx = 0usize;
for &c in cutoffs {
if max_diff < c {
ctx += 1;
}
}
let grad = left + top - topleft;
let pred = grad.max(left.min(top)).min(left.max(top)); let res = val - pred;
let packed = pack_signed(res);
let (token, _bits, nbits) = config.encode(packed);
if histograms[ctx].len() <= token as usize {
histograms[ctx].resize(token as usize + 1, 0);
}
histograms[ctx][token as usize] += 1;
extra_bits += nbits as u64;
}
}
for hist in &mut histograms {
let total: u32 = hist.iter().sum();
if total > 0 {
let total_f = total as f64;
for &count in hist.iter() {
if count > 0 {
let p = count as f64 / total_f;
total_bits -= count as f64 * jxl_simd::fast_log2f(p as f32) as f64;
}
}
}
hist.clear();
}
}
total_bits + extra_bits as f64
}
#[allow(clippy::identity_op, clippy::erasing_op)]
const RCT_CANDIDATES: &[u8] = &[
0 * 7 + 0, 0 * 7 + 6, 0 * 7 + 5, 1 * 7 + 3, 3 * 7 + 5, 5 * 7 + 5, 1 * 7 + 5, ];
pub(crate) fn select_best_rct(image: &ModularImage, nb_rcts_to_try: u8) -> (RctType, ModularImage) {
use super::rct::{RctType, forward_rct};
let nb_rcts_to_try = nb_rcts_to_try as usize;
if nb_rcts_to_try == 0 || image.channels.len() < 3 {
let mut transformed = image.clone();
forward_rct(&mut transformed.channels, 0, RctType::YCOCG).ok();
return (RctType::YCOCG, transformed);
}
let mut best_cost = f64::MAX;
let mut best_rct = RctType::YCOCG;
let mut best_image = None;
for (i, &rct_val) in RCT_CANDIDATES.iter().enumerate() {
if i >= nb_rcts_to_try {
break;
}
let rct_type = RctType(rct_val);
if rct_type.is_noop() {
let cost = estimate_cost(image);
crate::trace::debug_eprintln!(" RCT {:2}: cost={:.0}", rct_val, cost);
if cost < best_cost {
best_cost = cost;
best_rct = rct_type;
best_image = Some(image.clone());
}
} else {
let mut transformed = image.clone();
if forward_rct(&mut transformed.channels, 0, rct_type).is_ok() {
let cost = estimate_cost(&transformed);
crate::trace::debug_eprintln!(" RCT {:2}: cost={:.0}", rct_val, cost);
if cost < best_cost {
best_cost = cost;
best_rct = rct_type;
best_image = Some(transformed);
}
}
}
}
let work_image = best_image.unwrap_or_else(|| {
let mut t = image.clone();
forward_rct(&mut t.channels, 0, RctType::YCOCG).ok();
t
});
crate::trace::debug_eprintln!(
"RCT_SELECT: best={} (cost={:.0}), tried {} variants",
best_rct.0,
best_cost,
nb_rcts_to_try.min(RCT_CANDIDATES.len()),
);
(best_rct, work_image)
}
pub(crate) fn select_best_rct_at(
image: &ModularImage,
begin_c: usize,
nb_rcts_to_try: u8,
) -> (RctType, ModularImage) {
use super::rct::{RctType, forward_rct};
let nb_rcts_to_try = nb_rcts_to_try as usize;
if nb_rcts_to_try == 0 || image.channels.len() < begin_c + 3 {
let mut transformed = image.clone();
forward_rct(&mut transformed.channels, begin_c, RctType::YCOCG).ok();
return (RctType::YCOCG, transformed);
}
let mut best_cost = f64::MAX;
let mut best_rct = RctType::YCOCG;
let mut best_image = None;
for (i, &rct_val) in RCT_CANDIDATES.iter().enumerate() {
if i >= nb_rcts_to_try {
break;
}
let rct_type = RctType(rct_val);
if rct_type.is_noop() {
let cost = estimate_cost(image);
crate::trace::debug_eprintln!(
" RCT {:2} (begin_c={}): cost={:.0}",
rct_val,
begin_c,
cost
);
if cost < best_cost {
best_cost = cost;
best_rct = rct_type;
best_image = Some(image.clone());
}
} else {
let mut transformed = image.clone();
if forward_rct(&mut transformed.channels, begin_c, rct_type).is_ok() {
let cost = estimate_cost(&transformed);
crate::trace::debug_eprintln!(
" RCT {:2} (begin_c={}): cost={:.0}",
rct_val,
begin_c,
cost
);
if cost < best_cost {
best_cost = cost;
best_rct = rct_type;
best_image = Some(transformed);
}
}
}
}
let work_image = best_image.unwrap_or_else(|| {
let mut t = image.clone();
forward_rct(&mut t.channels, begin_c, RctType::YCOCG).ok();
t
});
crate::trace::debug_eprintln!(
"RCT_SELECT: best={} (cost={:.0}), tried {} variants, begin_c={}",
best_rct.0,
best_cost,
nb_rcts_to_try.min(RCT_CANDIDATES.len()),
begin_c,
);
(best_rct, work_image)
}
pub fn write_modular_stream_with_tree(
image: &ModularImage,
writer: &mut BitWriter,
profile: &crate::effort::EffortProfile,
rct: bool,
use_lz77: bool,
lz77_method: crate::entropy_coding::lz77::Lz77Method,
) -> Result<()> {
write_modular_stream_with_tree_dc_quant(
image,
writer,
profile,
rct,
use_lz77,
lz77_method,
None,
None, true, )
}
#[derive(Debug, Clone, Copy)]
pub struct LossyModularOptions {
pub distance: f32,
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn write_modular_stream_with_tree_dc_quant(
image: &ModularImage,
writer: &mut BitWriter,
profile: &crate::effort::EffortProfile,
rct: bool,
use_lz77: bool,
lz77_method: crate::entropy_coding::lz77::Lz77Method,
dc_quant_custom: Option<[f32; 3]>,
lossy_options: Option<LossyModularOptions>,
palette: bool,
) -> Result<()> {
use super::tree::count_contexts;
use super::tree_learn::{
TreeLearningParams, TreeSamples, collect_residuals_with_tree, compute_best_tree,
compute_best_tree_with_multipliers, compute_gather_stride_from_profile,
gather_samples_strided, max_ref_channels,
};
use crate::entropy_coding::encode::build_entropy_code_ans_with_options;
use crate::entropy_coding::encode::write_entropy_code_ans;
use crate::entropy_coding::lz77::{apply_lz77, write_lz77_header};
let is_lossy = lossy_options.is_some();
let palette_info = if palette && !is_lossy && image.channels.len() >= 2 {
if let Some((begin_c, num_c)) = super::palette::should_use_palette(image) {
let max_colors = super::palette::MAX_PALETTE_COLORS;
let analysis = super::palette::analyze_palette(image, begin_c, num_c, max_colors);
if analysis.use_palette {
Some((begin_c, num_c, analysis))
} else {
None
}
} else {
None
}
} else {
None
};
let compact_analyses: Vec<(usize, super::palette::PaletteAnalysis)> =
if palette_info.is_none() && !is_lossy && palette && image.channels.len() >= 2 {
let num_color_channels = if image.has_alpha {
image.channels.len() - 1
} else {
image.channels.len()
};
(0..num_color_channels)
.filter_map(|i| {
super::palette::analyze_channel_compact(
&image.channels[i],
super::palette::CHANNEL_COLORS_PERCENT,
)
.map(|a| (i, a))
})
.collect()
} else {
Vec::new()
};
let (work_image, rct_type, palette_result, compact_info) = if let Some((
begin_c,
num_c,
ref analysis,
)) = palette_info
{
let mut palettized = image.clone();
let nb_colors = super::palette::apply_palette(&mut palettized, begin_c, num_c, analysis)?;
crate::trace::debug_eprintln!(
"PALETTE+TREE: {} unique colors, {} channels palettized, begin_c={}",
nb_colors,
num_c,
begin_c,
);
(
palettized,
None,
Some((begin_c, num_c, nb_colors)),
Vec::new(),
)
} else if !compact_analyses.is_empty() {
let num_compacted = compact_analyses.len();
let mut palettes: Vec<Channel> = Vec::new();
let mut non_meta: Vec<Channel> = Vec::new();
let mut info: Vec<(usize, usize)> = Vec::new();
let mut nb_meta = 0usize;
for (orig_idx, ch) in image.channels.iter().enumerate() {
if let Some((_, analysis)) = compact_analyses.iter().find(|(idx, _)| *idx == orig_idx) {
let mut pal_ch = Channel::new(analysis.num_colors, 1)?;
for (i, color) in analysis.palette.iter().enumerate() {
pal_ch.set(i, 0, color[0]);
}
palettes.push(pal_ch);
let mut idx_ch = Channel::new(ch.width(), ch.height())?;
for y in 0..ch.height() {
for x in 0..ch.width() {
let val = ch.get(x, y);
let index = analysis.color_to_index[&vec![val]];
idx_ch.set(x, y, index);
}
}
non_meta.push(idx_ch);
let begin_c = orig_idx + nb_meta;
info.push((begin_c, analysis.num_colors));
nb_meta += 1;
} else {
non_meta.push(ch.clone());
}
}
palettes.reverse();
let mut work = image.clone();
work.channels = palettes;
work.channels.extend(non_meta);
crate::trace::debug_eprintln!(
"CHANNEL_COMPACT+TREE: {} channels compacted, {} meta + {} non-meta channels, info={:?}",
num_compacted,
nb_meta,
work.channels.len() - nb_meta,
info,
);
let rct_begin_c = num_compacted;
if rct && work.channels.len() >= rct_begin_c + 3 {
let (selected_rct, transformed) =
select_best_rct_at(&work, rct_begin_c, profile.nb_rcts_to_try);
(transformed, Some(selected_rct), None, info)
} else {
(work, None, None, info)
}
} else if !is_lossy && rct && image.channels.len() >= 3 {
let (selected_rct, transformed) = select_best_rct(image, profile.nb_rcts_to_try);
(transformed, Some(selected_rct), None, Vec::new())
} else {
(image.clone(), None, None, Vec::new())
};
let squeeze_params = if is_lossy {
use super::squeeze::default_squeeze_params;
let params = default_squeeze_params(&work_image);
if !params.is_empty() {
Some(params)
} else {
None
}
} else {
None
};
let mut work_image = work_image;
if let Some(ref params) = squeeze_params {
super::squeeze::apply_squeeze(&mut work_image, params)?;
}
let multiplier_info = if let Some(lossy) = lossy_options {
use super::quantize::{
build_multiplier_info, compute_channel_quantizer_xyb, quantize_channel,
};
let mut quants = Vec::new();
for ch in work_image.channels.iter_mut() {
let component = ch.component;
if !(0..3).contains(&component) {
quants.push(1);
continue;
}
let q = compute_channel_quantizer_xyb(
component as usize,
ch.hshift,
ch.vshift,
lossy.distance,
);
quantize_channel(ch, q);
quants.push(q);
}
let info = build_multiplier_info(&quants, 0);
crate::trace::debug_eprintln!(
"LOSSY_MODULAR: distance={:.2}, {} channels, quants={:?}, {} mul_info entries",
lossy.distance,
work_image.channels.len(),
quants,
info.len(),
);
Some(info)
} else {
None
};
let wp_params = if !is_lossy && profile.wp_num_param_sets > 0 {
super::predictor::find_best_wp_params(&work_image.channels, profile.wp_num_param_sets)
} else {
super::predictor::WeightedPredictorParams::default()
};
let total_pixels: usize = work_image
.channels
.iter()
.map(|ch| ch.width() * ch.height())
.sum();
let stride = compute_gather_stride_from_profile(total_pixels, profile);
let num_refs = if is_lossy {
0
} else {
max_ref_channels(&work_image)
};
let mut samples = TreeSamples::new_with_ref_channels(num_refs);
gather_samples_strided(&mut samples, &work_image, 0, 0, stride, &wp_params);
let pixel_fraction = if total_pixels > 0 {
samples.num_samples as f64 / total_pixels as f64
} else {
1.0
};
let params = TreeLearningParams::from_profile(profile)
.with_ref_properties(num_refs, profile.effort)
.with_pixel_fraction(pixel_fraction)
.with_total_pixels(total_pixels);
let tree = if let Some(ref mul_info) = multiplier_info {
let num_channels = work_image.channels.len() as u32;
let initial_range = [[0, num_channels], [0, 1]];
compute_best_tree_with_multipliers(&mut samples, ¶ms, mul_info, initial_range)
} else {
compute_best_tree(&mut samples, ¶ms)
};
let num_contexts = count_contexts(&tree) as usize;
crate::trace::debug_eprintln!(
"TREE_LEARN: effort={}, {} props, {} max_buckets, threshold={:.0}*{:.3}={:.1}, \
{} nodes, {} leaves/contexts, {} samples, lossy={}",
profile.effort,
params.properties.len(),
params.max_property_values,
params.split_threshold,
params.pixel_fraction * 0.9 + 0.1,
params.split_threshold * (params.pixel_fraction * 0.9 + 0.1),
tree.len(),
num_contexts,
samples.num_samples,
is_lossy,
);
let tokens = collect_residuals_with_tree(&work_image, &tree, 0, &wp_params);
let dist_multiplier = work_image
.channels
.iter()
.map(|c| c.width())
.max()
.unwrap_or(0) as i32;
let (tokens, lz77_params) = if use_lz77 {
match apply_lz77(&tokens, num_contexts, false, lz77_method, dist_multiplier) {
Some((lz77_tokens, params)) => (lz77_tokens, Some(params)),
None => (tokens, None),
}
} else {
(tokens, None)
};
let ans_num_contexts = if lz77_params.is_some() {
num_contexts + 1
} else {
num_contexts
};
let code = build_entropy_code_ans_with_options(
&tokens,
ans_num_contexts,
true, true, lz77_params.as_ref(),
Some(total_pixels),
);
crate::f16::write_lf_quant(writer, dc_quant_custom)?;
writer.write(1, 1)?;
write_tree(writer, &tree)?;
if ans_num_contexts > 1 {
write_lz77_header(lz77_params.as_ref(), writer)?;
write_entropy_code_ans(&code, writer)?;
} else {
use super::section::write_ans_modular_header;
write_ans_modular_header(writer, &code)?;
}
writer.write(1, 1)?; write_wp_header(writer, &wp_params)?;
{
let has_palette = palette_result.is_some();
let has_rct = rct_type.is_some();
let has_squeeze = squeeze_params.is_some();
let num_transforms =
compact_info.len() as u32 + has_palette as u32 + has_rct as u32 + has_squeeze as u32;
write_num_transforms(writer, num_transforms)?;
for &(begin_c, nb_colors) in &compact_info {
write_palette_transform(writer, begin_c, 1, nb_colors, 0, 0)?;
}
if let Some((begin_c, num_c, nb_colors)) = palette_result {
write_palette_transform(writer, begin_c, num_c, nb_colors, 0, 0)?;
}
if let Some(rct_type) = rct_type {
let rct_begin_c = compact_info.len();
write_rct_transform(writer, rct_begin_c, rct_type)?;
}
if let Some(ref params) = squeeze_params {
write_squeeze_transform(writer, params)?;
}
}
#[cfg(debug_assertions)]
if lz77_params.is_none() {
let roundtrip_result = crate::entropy_coding::encode::verify_ans_roundtrip(&tokens, &code);
if roundtrip_result.is_err() {
debug_rect!(
"ans/verify",
0,
0,
image.width(),
image.height(),
"ROUNDTRIP FAILED for tree learning data (ctx={} histo={} tokens={}): {:?}",
num_contexts,
code.histograms.len(),
tokens.len(),
roundtrip_result
);
}
}
write_tokens_ans(&tokens, &code, lz77_params.as_ref(), writer)?;
writer.zero_pad_to_byte();
Ok(())
}
#[allow(clippy::too_many_arguments, dead_code)]
pub(crate) fn write_modular_stream_with_tree_dc_quant_presqueezed(
image: &ModularImage,
writer: &mut BitWriter,
profile: &crate::effort::EffortProfile,
use_lz77: bool,
lz77_method: crate::entropy_coding::lz77::Lz77Method,
dc_quant_custom: Option<[f32; 3]>,
squeeze_params: &[super::squeeze::SqueezeParams],
multiplier_info: &[super::quantize::ModularMultiplierInfo],
_quants: &[i32],
) -> Result<()> {
use super::tree::count_contexts;
use super::tree_learn::{
TreeLearningParams, TreeSamples, collect_residuals_with_tree, compute_best_tree,
compute_best_tree_with_multipliers, compute_gather_stride_from_profile,
gather_samples_strided,
};
use crate::entropy_coding::encode::build_entropy_code_ans_with_options;
use crate::entropy_coding::encode::write_entropy_code_ans;
use crate::entropy_coding::lz77::{apply_lz77, write_lz77_header};
let wp_params = super::predictor::WeightedPredictorParams::default();
let total_pixels: usize = image
.channels
.iter()
.map(|ch| ch.width() * ch.height())
.sum();
let stride = compute_gather_stride_from_profile(total_pixels, profile);
let mut samples = TreeSamples::new();
gather_samples_strided(&mut samples, image, 0, 0, stride, &wp_params);
let pixel_fraction = if total_pixels > 0 {
samples.num_samples as f64 / total_pixels as f64
} else {
1.0
};
let params = TreeLearningParams::from_profile(profile)
.with_pixel_fraction(pixel_fraction)
.with_total_pixels(total_pixels);
let tree = if !multiplier_info.is_empty() {
let num_channels = image.channels.len() as u32;
let initial_range = [[0, num_channels], [0, 1]];
compute_best_tree_with_multipliers(&mut samples, ¶ms, multiplier_info, initial_range)
} else {
compute_best_tree(&mut samples, ¶ms)
};
let num_contexts = count_contexts(&tree) as usize;
crate::trace::debug_eprintln!(
"PRESQUEEZED_TREE: {} nodes, {} contexts, {} samples, {} mul_info entries",
tree.len(),
num_contexts,
samples.num_samples,
multiplier_info.len(),
);
let tokens = collect_residuals_with_tree(image, &tree, 0, &wp_params);
let dist_multiplier = image.channels.iter().map(|c| c.width()).max().unwrap_or(0) as i32;
let (tokens, lz77_params) = if use_lz77 {
match apply_lz77(&tokens, num_contexts, false, lz77_method, dist_multiplier) {
Some((lz77_tokens, params)) => (lz77_tokens, Some(params)),
None => (tokens, None),
}
} else {
(tokens, None)
};
let ans_num_contexts = if lz77_params.is_some() {
num_contexts + 1
} else {
num_contexts
};
let code = build_entropy_code_ans_with_options(
&tokens,
ans_num_contexts,
true,
true, lz77_params.as_ref(),
Some(total_pixels),
);
crate::f16::write_lf_quant(writer, dc_quant_custom)?;
writer.write(1, 1)?;
write_tree(writer, &tree)?;
if ans_num_contexts > 1 {
write_lz77_header(lz77_params.as_ref(), writer)?;
write_entropy_code_ans(&code, writer)?;
} else {
use super::section::write_ans_modular_header;
write_ans_modular_header(writer, &code)?;
}
writer.write(1, 1)?; write_wp_header(writer, &wp_params)?;
let has_squeeze = !squeeze_params.is_empty();
if has_squeeze {
writer.write(2, 1)?; write_squeeze_transform(writer, squeeze_params)?;
} else {
writer.write(2, 0)?; }
#[cfg(debug_assertions)]
if lz77_params.is_none() {
let roundtrip_result = crate::entropy_coding::encode::verify_ans_roundtrip(&tokens, &code);
if roundtrip_result.is_err() {
crate::debug_rect!(
"ans/verify",
0,
0,
image.width(),
image.height(),
"ROUNDTRIP FAILED for presqueezed data (ctx={} histo={} tokens={}): {:?}",
num_contexts,
code.histograms.len(),
tokens.len(),
roundtrip_result
);
}
}
write_tokens_ans(&tokens, &code, lz77_params.as_ref(), writer)?;
writer.zero_pad_to_byte();
Ok(())
}
pub fn write_modular_stream_with_squeeze_and_tree(
image: &ModularImage,
writer: &mut BitWriter,
profile: &crate::effort::EffortProfile,
use_lz77: bool,
lz77_method: crate::entropy_coding::lz77::Lz77Method,
) -> Result<()> {
use super::rct::{RctType, forward_rct};
use super::squeeze::{apply_squeeze, default_squeeze_params};
use super::tree::count_contexts;
use super::tree_learn::{
TreeLearningParams, TreeSamples, collect_residuals_with_tree, compute_best_tree,
compute_gather_stride_from_profile, gather_samples_strided,
};
use crate::entropy_coding::encode::build_entropy_code_ans_with_options;
use crate::entropy_coding::encode::write_entropy_code_ans;
use crate::entropy_coding::lz77::{apply_lz77, write_lz77_header};
let params = default_squeeze_params(image);
if params.is_empty() {
return write_modular_stream_with_tree(
image,
writer,
profile,
image.channels.len() >= 3,
use_lz77,
lz77_method,
);
}
let mut transformed = image.clone();
let has_rct = transformed.channels.len() >= 3;
if has_rct {
let rct_type = RctType::YCOCG;
forward_rct(&mut transformed.channels, 0, rct_type)?;
}
apply_squeeze(&mut transformed, ¶ms)?;
crate::trace::debug_eprintln!(
"SQUEEZE+TREE: {} squeeze steps, {} → {} channels, rct={}",
params.len(),
image.channels.len(),
transformed.channels.len(),
has_rct,
);
let wp_params = if profile.wp_num_param_sets > 0 {
super::predictor::find_best_wp_params(&transformed.channels, profile.wp_num_param_sets)
} else {
super::predictor::WeightedPredictorParams::default()
};
let total_pixels: usize = transformed
.channels
.iter()
.map(|ch| ch.width() * ch.height())
.sum();
let stride = compute_gather_stride_from_profile(total_pixels, profile);
let mut samples = TreeSamples::new_for_squeeze();
gather_samples_strided(&mut samples, &transformed, 0, 0, stride, &wp_params);
let pixel_fraction = if total_pixels > 0 {
samples.num_samples as f64 / total_pixels as f64
} else {
1.0
};
let tree_params = TreeLearningParams::from_profile_squeeze(profile)
.with_pixel_fraction(pixel_fraction)
.with_total_pixels(total_pixels);
let tree = compute_best_tree(&mut samples, &tree_params);
let num_contexts = count_contexts(&tree) as usize;
crate::trace::debug_eprintln!(
"SQUEEZE+TREE: effort={}, {} nodes, {} contexts, {} samples (pf={:.3})",
profile.effort,
tree.len(),
num_contexts,
samples.num_samples,
pixel_fraction,
);
let tokens = collect_residuals_with_tree(&transformed, &tree, 0, &wp_params);
let dist_multiplier = transformed
.channels
.iter()
.map(|c| c.width())
.max()
.unwrap_or(0) as i32;
let (tokens, lz77_params) = if use_lz77 {
match apply_lz77(&tokens, num_contexts, false, lz77_method, dist_multiplier) {
Some((lz77_tokens, params)) => {
crate::trace::debug_eprintln!(
"SQUEEZE LZ77: {} → {} tokens ({:.1}x), method={:?}, dm={}",
tokens.len(),
lz77_tokens.len(),
tokens.len() as f64 / lz77_tokens.len() as f64,
lz77_method,
dist_multiplier,
);
(lz77_tokens, Some(params))
}
None => {
crate::trace::debug_eprintln!(
"SQUEEZE LZ77: not cost-effective, method={:?}, dm={}, {} tokens",
lz77_method,
dist_multiplier,
tokens.len(),
);
(tokens, None)
}
}
} else {
crate::trace::debug_eprintln!("SQUEEZE LZ77: disabled");
(tokens, None)
};
let ans_num_contexts = if lz77_params.is_some() {
num_contexts + 1
} else {
num_contexts
};
let code = build_entropy_code_ans_with_options(
&tokens,
ans_num_contexts,
true, true, lz77_params.as_ref(),
Some(total_pixels),
);
let _bit0 = writer.bits_written();
writer.write(1, 1)?;
writer.write(1, 1)?;
write_tree(writer, &tree)?;
let _bit_after_tree = writer.bits_written();
if ans_num_contexts > 1 {
write_lz77_header(lz77_params.as_ref(), writer)?;
write_entropy_code_ans(&code, writer)?;
} else {
use super::section::write_ans_modular_header;
write_ans_modular_header(writer, &code)?;
}
let _bit_after_histo = writer.bits_written();
writer.write(1, 1)?; write_wp_header(writer, &wp_params)?;
if has_rct {
writer.write(2, 2)?;
writer.write(4, 0)?;
write_rct_transform(writer, 0, RctType::YCOCG)?;
write_squeeze_transform(writer, ¶ms)?;
} else {
writer.write(2, 1)?; write_squeeze_transform(writer, ¶ms)?;
}
let _bit_after_header = writer.bits_written();
crate::trace::debug_eprintln!(
"SQUEEZE OVERHEAD: tree={} bits ({:.0}B), histograms={} bits ({:.0}B), header={} bits ({:.0}B), total_overhead={:.0}B",
_bit_after_tree - _bit0,
(_bit_after_tree - _bit0) as f64 / 8.0,
_bit_after_histo - _bit_after_tree,
(_bit_after_histo - _bit_after_tree) as f64 / 8.0,
_bit_after_header - _bit_after_histo,
(_bit_after_header - _bit_after_histo) as f64 / 8.0,
(_bit_after_header - _bit0) as f64 / 8.0,
);
#[cfg(debug_assertions)]
if lz77_params.is_none() {
let roundtrip_result = crate::entropy_coding::encode::verify_ans_roundtrip(&tokens, &code);
if roundtrip_result.is_err() {
debug_rect!(
"ans/verify",
0,
0,
image.width(),
image.height(),
"ROUNDTRIP FAILED for squeeze+tree data (ctx={} histo={} tokens={}): {:?}",
num_contexts,
code.histograms.len(),
tokens.len(),
roundtrip_result
);
}
}
let _bit_before_data = writer.bits_written();
write_tokens_ans(&tokens, &code, lz77_params.as_ref(), writer)?;
let _bit_after_data = writer.bits_written();
crate::trace::debug_eprintln!(
"SQUEEZE DATA: {} bits ({:.0}B), {} tokens, {} histograms",
_bit_after_data - _bit_before_data,
(_bit_after_data - _bit_before_data) as f64 / 8.0,
tokens.len(),
code.histograms.len(),
);
crate::trace::debug_eprintln!(
"SQUEEZE TOTAL: {:.0}B (overhead {:.0}B + data {:.0}B)",
(_bit_after_data - _bit0) as f64 / 8.0,
(_bit_after_header - _bit0) as f64 / 8.0,
(_bit_after_data - _bit_before_data) as f64 / 8.0,
);
writer.zero_pad_to_byte();
Ok(())
}
pub use super::section::{
build_histogram_from_residuals, collect_all_residuals, write_global_modular_section,
write_group_modular_section, write_group_modular_section_idx,
};
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pack_signed() {
assert_eq!(pack_signed(0), 0);
assert_eq!(pack_signed(-1), 1);
assert_eq!(pack_signed(1), 2);
assert_eq!(pack_signed(-2), 3);
assert_eq!(pack_signed(2), 4);
}
#[test]
fn test_predict_gradient() {
assert_eq!(predict_gradient(10, 10, 10), 10);
assert_eq!(predict_gradient(20, 10, 10), 20); assert_eq!(predict_gradient(10, 20, 30), 10); }
#[test]
fn test_encode_hybrid_uint() {
assert_eq!(encode_hybrid_uint_000(0), (0, 0, 0));
assert_eq!(encode_hybrid_uint_000(1), (1, 0, 0));
assert_eq!(encode_hybrid_uint_000(2), (2, 1, 0));
assert_eq!(encode_hybrid_uint_000(3), (2, 1, 1));
}
#[test]
fn test_gradient_stream() {
let data: Vec<u8> = vec![
100, 101, 102, 103, 101, 102, 103, 104, 102, 103, 104, 105, 103, 104, 105, 106,
];
let image = ModularImage::from_gray8(&data, 4, 4).unwrap();
let mut writer = BitWriter::new();
write_simple_modular_stream(&image, &mut writer, false).unwrap();
let _bytes = writer.finish_with_padding();
crate::trace::debug_eprintln!("Gradient stream: {} bytes", _bytes.len());
}
#[test]
fn test_rct_stream() {
let mut data = Vec::new();
for y in 0..4 {
for x in 0..4 {
let base = (y * 4 + x) * 10;
data.push(base as u8); data.push((base + 5) as u8); data.push((base + 10) as u8); }
}
let image = ModularImage::from_rgb8(&data, 4, 4).unwrap();
let mut writer = BitWriter::new();
write_modular_stream_with_rct(&image, &mut writer, false).unwrap();
let bytes = writer.finish_with_padding();
crate::trace::debug_eprintln!("RCT stream: {} bytes", bytes.len());
assert!(!bytes.is_empty());
}
#[test]
fn test_write_rct_transform() {
use crate::bit_writer::BitWriter;
let mut writer = BitWriter::new();
write_rct_transform(&mut writer, 0, RctType::YCOCG).unwrap();
assert_eq!(writer.bits_written(), 9);
}
#[test]
fn test_rct_type_u32_encoding() {
use crate::modular::rct::RctType;
let base_bits = 2 + 5;
for rct_val in 0..42u8 {
let rct_type = RctType(rct_val);
let mut writer = BitWriter::new();
write_rct_transform(&mut writer, 0, rct_type).unwrap();
let expected_rct_bits = if rct_val == 6 {
2 } else if rct_val < 2 {
2 + 2 } else if rct_val < 10 {
2 + 4 } else {
2 + 6 };
let total_expected = base_bits + expected_rct_bits;
assert_eq!(
writer.bits_written(),
total_expected,
"Wrong bit count for rct_type={}: expected {} bits, got {}",
rct_val,
total_expected,
writer.bits_written()
);
}
}
#[test]
fn test_weighted_stream() {
let data: Vec<u8> = vec![
100, 101, 102, 103, 101, 102, 103, 104, 102, 103, 104, 105, 103, 104, 105, 106,
];
let image = ModularImage::from_gray8(&data, 4, 4).unwrap();
let mut writer = BitWriter::new();
write_modular_stream_with_weighted(&image, &mut writer, false).unwrap();
let bytes = writer.finish_with_padding();
crate::trace::debug_eprintln!("Weighted stream: {} bytes", bytes.len());
assert!(!bytes.is_empty());
}
#[test]
fn test_rct_weighted_stream() {
let mut data = Vec::new();
for y in 0..4 {
for x in 0..4 {
let base = (y * 4 + x) * 10;
data.push(base as u8);
data.push((base + 5) as u8);
data.push((base + 10) as u8);
}
}
let image = ModularImage::from_rgb8(&data, 4, 4).unwrap();
let mut writer = BitWriter::new();
write_modular_stream_with_rct_weighted(&image, &mut writer, false).unwrap();
let bytes = writer.finish_with_padding();
crate::trace::debug_eprintln!("RCT+Weighted stream: {} bytes", bytes.len());
assert!(!bytes.is_empty());
}
#[test]
fn test_lz77_bit_trace() {
let mut data = Vec::new();
for _ in 0..256 {
data.push(100u8);
data.push(100u8);
data.push(100u8);
}
let image = ModularImage::from_rgb8(&data, 16, 16).unwrap();
crate::trace::debug_eprintln!("\n=== LZ77 BIT TRACE TEST ===");
let mut writer = BitWriter::new();
write_improved_modular_stream(&image, &mut writer, false).unwrap();
let _bytes = writer.finish_with_padding();
crate::trace::debug_eprintln!("LZ77 stream: {} bytes", _bytes.len());
crate::trace::debug_eprintln!("Raw bytes: {:02x?}", &_bytes[.._bytes.len().min(50)]);
crate::trace::debug_eprintln!("\n=== EXPECTED DECODER INTERPRETATION ===");
crate::trace::debug_eprintln!("Bit 0: dc_quant.all_default = 1");
crate::trace::debug_eprintln!("Bit 1: has_tree = 1");
crate::trace::debug_eprintln!("--- TREE HISTOGRAM (6 contexts) ---");
crate::trace::debug_eprintln!("Bit 2: lz77.enabled = 0");
crate::trace::debug_eprintln!("Bits 3-5: context_map (is_simple=1, bits_per_entry=0)");
}
#[test]
fn test_ans_roundtrip_gray() {
use crate::headers::{ColorEncoding, FileHeader};
use crate::modular::frame::{FrameEncoder, FrameEncoderOptions};
let data: Vec<u8> = vec![
100, 101, 102, 103, 101, 102, 103, 104, 102, 103, 104, 105, 103, 104, 105, 106,
];
let image = ModularImage::from_gray8(&data, 4, 4).unwrap();
let mut writer = BitWriter::new();
let file_header = FileHeader::new_gray(4, 4);
file_header.write(&mut writer).unwrap();
writer.zero_pad_to_byte();
let frame_options = FrameEncoderOptions {
use_modular: true,
effort: 7,
use_ans: true,
use_tree_learning: false,
use_squeeze: false,
..Default::default()
};
let frame_encoder = FrameEncoder::new(4, 4, frame_options);
let color_encoding = ColorEncoding::srgb();
frame_encoder
.encode_modular(&image, &color_encoding, &mut writer)
.unwrap();
let bytes = writer.finish_with_padding();
eprintln!("ANS modular gray 4x4: {} bytes", bytes.len());
let jxl_image = jxl_oxide::JxlImage::builder()
.read(std::io::Cursor::new(&bytes))
.unwrap_or_else(|e| panic!("jxl-oxide parse failed: {}", e));
assert_eq!(jxl_image.width(), 4);
assert_eq!(jxl_image.height(), 4);
let render = jxl_image
.render_frame(0)
.unwrap_or_else(|e| panic!("jxl-oxide render failed: {}", e));
let fb = render.image_all_channels();
let decoded_f32 = fb.buf();
let decoded: Vec<u8> = decoded_f32
.iter()
.map(|&v| (v * 255.0).round().clamp(0.0, 255.0) as u8)
.collect();
assert_eq!(
decoded.len(),
data.len(),
"decoded size mismatch: {} vs {}",
decoded.len(),
data.len()
);
for (i, (&orig, &dec)) in data.iter().zip(decoded.iter()).enumerate() {
assert_eq!(
orig, dec,
"pixel {} differs: orig={} decoded={}",
i, orig, dec
);
}
}
#[test]
fn test_ans_roundtrip_gray_varied() {
use crate::headers::{ColorEncoding, FileHeader};
use crate::modular::frame::{FrameEncoder, FrameEncoderOptions};
let data = vec![0u8, 64, 128, 192, 255, 100, 50, 200];
let image = ModularImage::from_gray8(&data, 4, 2).unwrap();
{
let mut writer = BitWriter::new();
let file_header = FileHeader::new_gray(4, 2);
file_header.write(&mut writer).unwrap();
writer.zero_pad_to_byte();
let frame_options = FrameEncoderOptions {
use_modular: true,
effort: 7,
use_ans: false,
use_tree_learning: false,
use_squeeze: false,
..Default::default()
};
let frame_encoder = FrameEncoder::new(4, 2, frame_options);
let color_encoding = ColorEncoding::srgb();
frame_encoder
.encode_modular(&image, &color_encoding, &mut writer)
.unwrap();
let huf_bytes = writer.finish_with_padding();
eprintln!("Huffman modular gray varied 4x2: {} bytes", huf_bytes.len());
eprintln!("Huffman bytes: {:02x?}", &huf_bytes);
}
let mut writer = BitWriter::new();
let file_header = FileHeader::new_gray(4, 2);
file_header.write(&mut writer).unwrap();
writer.zero_pad_to_byte();
let frame_options = FrameEncoderOptions {
use_modular: true,
effort: 7,
use_ans: true,
use_tree_learning: false,
use_squeeze: false,
..Default::default()
};
let frame_encoder = FrameEncoder::new(4, 2, frame_options);
let color_encoding = ColorEncoding::srgb();
frame_encoder
.encode_modular(&image, &color_encoding, &mut writer)
.unwrap();
let bytes = writer.finish_with_padding();
eprintln!("ANS modular gray varied 4x2: {} bytes", bytes.len());
eprintln!("ANS bytes: {:02x?}", &bytes);
std::fs::write(std::env::temp_dir().join("ans_modular_varied.jxl"), &bytes).ok();
let jxl_image = jxl_oxide::JxlImage::builder()
.read(std::io::Cursor::new(&bytes))
.unwrap_or_else(|e| panic!("jxl-oxide parse failed: {}", e));
let render = jxl_image
.render_frame(0)
.unwrap_or_else(|e| panic!("jxl-oxide render failed: {}", e));
let fb = render.image_all_channels();
let decoded_f32 = fb.buf();
let decoded: Vec<u8> = decoded_f32
.iter()
.map(|&v| (v * 255.0).round().clamp(0.0, 255.0) as u8)
.collect();
for (i, (&orig, &dec)) in data.iter().zip(decoded.iter()).enumerate() {
assert_eq!(
orig, dec,
"pixel {} differs: orig={} decoded={}",
i, orig, dec
);
}
}
#[test]
fn test_ans_roundtrip_rgb_gradient() {
use crate::headers::{ColorEncoding, FileHeader};
use crate::modular::frame::{FrameEncoder, FrameEncoderOptions};
let mut data = vec![0u8; 8 * 8 * 3];
for y in 0..8 {
for x in 0..8 {
let idx = (y * 8 + x) * 3;
data[idx] = (x * 32) as u8;
data[idx + 1] = (y * 32) as u8;
data[idx + 2] = ((x + y) * 16) as u8;
}
}
let image = ModularImage::from_rgb8(&data, 8, 8).unwrap();
let mut writer = BitWriter::new();
let file_header = FileHeader::new_rgb(8, 8);
file_header.write(&mut writer).unwrap();
writer.zero_pad_to_byte();
let frame_options = FrameEncoderOptions {
use_modular: true,
effort: 7,
use_ans: true,
use_tree_learning: false,
use_squeeze: false,
..Default::default()
};
let frame_encoder = FrameEncoder::new(8, 8, frame_options);
let color_encoding = ColorEncoding::srgb();
frame_encoder
.encode_modular(&image, &color_encoding, &mut writer)
.unwrap();
let bytes = writer.finish_with_padding();
eprintln!("ANS modular RGB gradient 8x8: {} bytes", bytes.len());
let jxl_image = jxl_oxide::JxlImage::builder()
.read(std::io::Cursor::new(&bytes))
.unwrap_or_else(|e| panic!("jxl-oxide parse failed: {}", e));
let render = jxl_image
.render_frame(0)
.unwrap_or_else(|e| panic!("jxl-oxide render failed: {}", e));
let fb = render.image_all_channels();
let decoded_f32 = fb.buf();
let decoded: Vec<u8> = decoded_f32
.iter()
.map(|&v| (v * 255.0).round().clamp(0.0, 255.0) as u8)
.collect();
assert_eq!(decoded.len(), data.len());
let mut max_diff = 0i32;
for (i, (&orig, &dec)) in data.iter().zip(decoded.iter()).enumerate() {
let diff = (orig as i32 - dec as i32).abs();
if diff > max_diff {
max_diff = diff;
eprintln!(
"pixel {} ch {}: orig={} decoded={} diff={}",
i / 3,
i % 3,
orig,
dec,
diff
);
}
}
assert_eq!(max_diff, 0, "lossless roundtrip should have zero diff");
}
#[test]
fn test_ans_simple_stream() {
let data: Vec<u8> = vec![
100, 101, 102, 103, 101, 102, 103, 104, 102, 103, 104, 105, 103, 104, 105, 106,
];
let image = ModularImage::from_gray8(&data, 4, 4).unwrap();
let mut writer = BitWriter::new();
write_simple_modular_stream(&image, &mut writer, true).unwrap();
let bytes = writer.finish_with_padding();
assert!(
!bytes.is_empty(),
"ANS stream should produce non-empty output"
);
}
#[test]
fn test_ans_rct_stream() {
let mut data = Vec::new();
for y in 0..4 {
for x in 0..4 {
let base = (y * 4 + x) * 10;
data.push(base as u8);
data.push((base + 5) as u8);
data.push((base + 10) as u8);
}
}
let image = ModularImage::from_rgb8(&data, 4, 4).unwrap();
let mut writer = BitWriter::new();
write_modular_stream_with_rct(&image, &mut writer, true).unwrap();
let bytes = writer.finish_with_padding();
assert!(
!bytes.is_empty(),
"ANS RCT stream should produce non-empty output"
);
}
#[test]
fn test_ans_weighted_stream() {
let data: Vec<u8> = vec![
100, 101, 102, 103, 101, 102, 103, 104, 102, 103, 104, 105, 103, 104, 105, 106,
];
let image = ModularImage::from_gray8(&data, 4, 4).unwrap();
let mut writer = BitWriter::new();
write_modular_stream_with_weighted(&image, &mut writer, true).unwrap();
let bytes = writer.finish_with_padding();
assert!(
!bytes.is_empty(),
"ANS weighted stream should produce non-empty output"
);
}
#[test]
fn test_ans_rct_weighted_stream() {
let mut data = Vec::new();
for y in 0..4 {
for x in 0..4 {
let base = (y * 4 + x) * 10;
data.push(base as u8);
data.push((base + 5) as u8);
data.push((base + 10) as u8);
}
}
let image = ModularImage::from_rgb8(&data, 4, 4).unwrap();
let mut writer = BitWriter::new();
write_modular_stream_with_rct_weighted(&image, &mut writer, true).unwrap();
let bytes = writer.finish_with_padding();
assert!(
!bytes.is_empty(),
"ANS RCT+weighted stream should produce non-empty output"
);
}
#[test]
fn test_ans_vs_huffman_size() {
use crate::{LosslessConfig, PixelLayout};
let mut data = vec![0u8; 32 * 32 * 3];
for y in 0..32 {
for x in 0..32 {
let idx = (y * 32 + x) * 3;
data[idx] = ((x * 8 + y * 2) % 256) as u8;
data[idx + 1] = ((y * 8 + x * 3) % 256) as u8;
data[idx + 2] = (((x + y) * 5) % 256) as u8;
}
}
let huf_encoded = LosslessConfig::new()
.with_ans(false)
.encode(&data, 32, 32, PixelLayout::Rgb8)
.unwrap();
let ans_encoded = LosslessConfig::new()
.with_ans(true)
.encode(&data, 32, 32, PixelLayout::Rgb8)
.unwrap();
eprintln!(
"32x32 RGB: Huffman={} bytes, ANS={} bytes, savings={:.1}%",
huf_encoded.len(),
ans_encoded.len(),
(1.0 - ans_encoded.len() as f64 / huf_encoded.len() as f64) * 100.0
);
assert!(
ans_encoded.len() <= huf_encoded.len() + huf_encoded.len() / 5,
"ANS should not be >20% larger than Huffman"
);
}
}