Skip to main content

jxl_encoder/
api.rs

1// Copyright (c) Imazen LLC and the JPEG XL Project Authors.
2// Algorithms and constants derived from libjxl (BSD-3-Clause).
3// Licensed under AGPL-3.0-or-later. Commercial licenses at https://www.imazen.io/pricing
4
5//! Three-layer public API: Config → Request → Encoder.
6//!
7//! ```rust,no_run
8//! use jxl_encoder::{LosslessConfig, LossyConfig, PixelLayout};
9//!
10//! # let pixels = vec![0u8; 800 * 600 * 3];
11//! // Simple — one line, no request visible
12//! let jxl = LossyConfig::new(1.0)
13//!     .encode(&pixels, 800, 600, PixelLayout::Rgb8)?;
14//!
15//! // Full control — request layer for metadata, limits, cancellation
16//! let jxl = LosslessConfig::new()
17//!     .encode_request(800, 600, PixelLayout::Rgb8)
18//!     .encode(&pixels)?;
19//! # Ok::<_, jxl_encoder::At<jxl_encoder::EncodeError>>(())
20//! ```
21
22pub use crate::entropy_coding::Lz77Method;
23pub use enough::{Stop, Unstoppable};
24pub use whereat::{At, ResultAtExt, at};
25
26// ── Error type ──────────────────────────────────────────────────────────────
27
28/// Encode error type.
29#[derive(Debug)]
30#[non_exhaustive]
31pub enum EncodeError {
32    /// Input validation failed (wrong buffer size, zero dimensions, etc.).
33    InvalidInput { message: String },
34    /// Config validation failed (contradictory options, out-of-range values).
35    InvalidConfig { message: String },
36    /// Pixel layout not supported for this config/mode.
37    UnsupportedPixelLayout(PixelLayout),
38    /// A configured limit was exceeded.
39    LimitExceeded { message: String },
40    /// Encoding was cancelled via [`Stop`].
41    Cancelled,
42    /// Allocation failure.
43    Oom(std::collections::TryReserveError),
44    /// I/O error.
45    #[cfg(feature = "std")]
46    Io(std::io::Error),
47    /// Internal encoder error (should not happen — file a bug).
48    Internal { message: String },
49}
50
51impl core::fmt::Display for EncodeError {
52    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
53        match self {
54            Self::InvalidInput { message } => write!(f, "invalid input: {message}"),
55            Self::InvalidConfig { message } => write!(f, "invalid config: {message}"),
56            Self::UnsupportedPixelLayout(layout) => {
57                write!(f, "unsupported pixel layout: {layout:?}")
58            }
59            Self::LimitExceeded { message } => write!(f, "limit exceeded: {message}"),
60            Self::Cancelled => write!(f, "encoding cancelled"),
61            Self::Oom(e) => write!(f, "out of memory: {e}"),
62            #[cfg(feature = "std")]
63            Self::Io(e) => write!(f, "I/O error: {e}"),
64            Self::Internal { message } => write!(f, "internal error: {message}"),
65        }
66    }
67}
68
69impl core::error::Error for EncodeError {
70    fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
71        match self {
72            Self::Oom(e) => Some(e),
73            #[cfg(feature = "std")]
74            Self::Io(e) => Some(e),
75            _ => None,
76        }
77    }
78}
79
80impl From<crate::error::Error> for EncodeError {
81    fn from(e: crate::error::Error) -> Self {
82        match e {
83            crate::error::Error::InvalidImageDimensions(w, h) => Self::InvalidInput {
84                message: format!("invalid dimensions: {w}x{h}"),
85            },
86            crate::error::Error::ImageTooLarge(w, h, mw, mh) => Self::LimitExceeded {
87                message: format!("image {w}x{h} exceeds max {mw}x{mh}"),
88            },
89            crate::error::Error::InvalidInput(msg) => Self::InvalidInput { message: msg },
90            crate::error::Error::OutOfMemory(e) => Self::Oom(e),
91            #[cfg(feature = "std")]
92            crate::error::Error::IoError(e) => Self::Io(e),
93            crate::error::Error::Cancelled => Self::Cancelled,
94            other => Self::Internal {
95                message: format!("{other}"),
96            },
97        }
98    }
99}
100
101#[cfg(feature = "std")]
102impl From<std::io::Error> for EncodeError {
103    fn from(e: std::io::Error) -> Self {
104        Self::Io(e)
105    }
106}
107
108impl From<enough::StopReason> for EncodeError {
109    fn from(_: enough::StopReason) -> Self {
110        Self::Cancelled
111    }
112}
113
114/// Result type for encoding operations.
115///
116/// Errors carry location traces via [`whereat::At`] for lightweight
117/// production-safe error tracking without debuginfo or backtraces.
118pub type Result<T> = core::result::Result<T, At<EncodeError>>;
119
120// ── EncodeResult / EncodeStats ──────────────────────────────────────────────
121
122/// Result of an encode operation. Holds encoded data and metrics.
123///
124/// After `encode()`, `data()` returns the JXL bytes. After `encode_into()`
125/// or `encode_to()`, `data()` returns `None` (data already delivered).
126/// Use `take_data()` to move the vec out without cloning.
127#[derive(Clone, Debug)]
128pub struct EncodeResult {
129    data: Option<Vec<u8>>,
130    stats: EncodeStats,
131}
132
133impl EncodeResult {
134    /// Encoded JXL bytes (borrowing). None if data was written elsewhere.
135    pub fn data(&self) -> Option<&[u8]> {
136        self.data.as_deref()
137    }
138
139    /// Take the owned data vec, leaving None in its place.
140    pub fn take_data(&mut self) -> Option<Vec<u8>> {
141        self.data.take()
142    }
143
144    /// Encode metrics.
145    pub fn stats(&self) -> &EncodeStats {
146        &self.stats
147    }
148}
149
150/// Encode metrics collected during encoding.
151#[derive(Clone, Debug, Default)]
152#[non_exhaustive]
153pub struct EncodeStats {
154    codestream_size: usize,
155    output_size: usize,
156    mode: EncodeMode,
157    /// Index = raw strategy code (0..19), value = first-block count.
158    strategy_counts: [u32; 19],
159    gaborish: bool,
160    ans: bool,
161    butteraugli_iters: u32,
162    pixel_domain_loss: bool,
163}
164
165impl EncodeStats {
166    /// Size of the JXL codestream in bytes (before container wrapping).
167    pub fn codestream_size(&self) -> usize {
168        self.codestream_size
169    }
170
171    /// Size of the final output in bytes (after container wrapping, if any).
172    pub fn output_size(&self) -> usize {
173        self.output_size
174    }
175
176    /// Whether the encode was lossy or lossless.
177    pub fn mode(&self) -> EncodeMode {
178        self.mode
179    }
180
181    /// Per-strategy first-block counts, indexed by raw strategy code (0..19).
182    pub fn strategy_counts(&self) -> &[u32; 19] {
183        &self.strategy_counts
184    }
185
186    /// Whether gaborish pre-filtering was enabled.
187    pub fn gaborish(&self) -> bool {
188        self.gaborish
189    }
190
191    /// Whether ANS entropy coding was used.
192    pub fn ans(&self) -> bool {
193        self.ans
194    }
195
196    /// Number of butteraugli quantization loop iterations performed.
197    pub fn butteraugli_iters(&self) -> u32 {
198        self.butteraugli_iters
199    }
200
201    /// Whether pixel-domain loss was enabled.
202    pub fn pixel_domain_loss(&self) -> bool {
203        self.pixel_domain_loss
204    }
205}
206
207/// Encoding mode.
208#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
209pub enum EncodeMode {
210    /// Lossy (VarDCT) encoding.
211    #[default]
212    Lossy,
213    /// Lossless (modular) encoding.
214    Lossless,
215}
216
217// ── PixelLayout ─────────────────────────────────────────────────────────────
218
219/// Describes the pixel format of input data.
220#[derive(Clone, Copy, Debug, PartialEq, Eq)]
221#[non_exhaustive]
222pub enum PixelLayout {
223    /// 8-bit sRGB, 3 bytes per pixel (R, G, B).
224    Rgb8,
225    /// 8-bit sRGB + alpha, 4 bytes per pixel (R, G, B, A).
226    Rgba8,
227    /// 8-bit sRGB in BGR order, 3 bytes per pixel (B, G, R).
228    Bgr8,
229    /// 8-bit sRGB in BGRA order, 4 bytes per pixel (B, G, R, A).
230    Bgra8,
231    /// 8-bit grayscale, 1 byte per pixel.
232    Gray8,
233    /// 8-bit grayscale + alpha, 2 bytes per pixel.
234    GrayAlpha8,
235    /// 16-bit sRGB, 6 bytes per pixel (R, G, B) — native-endian u16.
236    Rgb16,
237    /// 16-bit sRGB + alpha, 8 bytes per pixel (R, G, B, A) — native-endian u16.
238    Rgba16,
239    /// 16-bit grayscale, 2 bytes per pixel — native-endian u16.
240    Gray16,
241    /// 16-bit grayscale + alpha, 4 bytes per pixel — native-endian u16.
242    GrayAlpha16,
243    /// Linear f32 RGB, 12 bytes per pixel. Skips sRGB→linear conversion.
244    RgbLinearF32,
245    /// Linear f32 RGBA, 16 bytes per pixel. Skips sRGB→linear conversion.
246    RgbaLinearF32,
247    /// Linear f32 grayscale, 4 bytes per pixel.
248    GrayLinearF32,
249    /// Linear f32 grayscale + alpha, 8 bytes per pixel.
250    GrayAlphaLinearF32,
251}
252
253impl PixelLayout {
254    /// Bytes per pixel for this layout.
255    pub const fn bytes_per_pixel(self) -> usize {
256        match self {
257            Self::Rgb8 | Self::Bgr8 => 3,
258            Self::Rgba8 | Self::Bgra8 => 4,
259            Self::Gray8 => 1,
260            Self::GrayAlpha8 => 2,
261            Self::Rgb16 => 6,
262            Self::Rgba16 => 8,
263            Self::Gray16 => 2,
264            Self::GrayAlpha16 => 4,
265            Self::RgbLinearF32 => 12,
266            Self::RgbaLinearF32 => 16,
267            Self::GrayLinearF32 => 4,
268            Self::GrayAlphaLinearF32 => 8,
269        }
270    }
271
272    /// Whether this layout uses linear (not gamma-encoded) values.
273    pub const fn is_linear(self) -> bool {
274        matches!(
275            self,
276            Self::RgbLinearF32
277                | Self::RgbaLinearF32
278                | Self::GrayLinearF32
279                | Self::GrayAlphaLinearF32
280        )
281    }
282
283    /// Whether this layout uses 16-bit samples.
284    pub const fn is_16bit(self) -> bool {
285        matches!(
286            self,
287            Self::Rgb16 | Self::Rgba16 | Self::Gray16 | Self::GrayAlpha16
288        )
289    }
290
291    /// Whether this layout uses f32 samples.
292    pub const fn is_f32(self) -> bool {
293        matches!(
294            self,
295            Self::RgbLinearF32
296                | Self::RgbaLinearF32
297                | Self::GrayLinearF32
298                | Self::GrayAlphaLinearF32
299        )
300    }
301
302    /// Whether this layout includes an alpha channel.
303    pub const fn has_alpha(self) -> bool {
304        matches!(
305            self,
306            Self::Rgba8
307                | Self::Bgra8
308                | Self::GrayAlpha8
309                | Self::Rgba16
310                | Self::GrayAlpha16
311                | Self::RgbaLinearF32
312                | Self::GrayAlphaLinearF32
313        )
314    }
315
316    /// Whether this layout is grayscale.
317    pub const fn is_grayscale(self) -> bool {
318        matches!(
319            self,
320            Self::Gray8
321                | Self::GrayAlpha8
322                | Self::Gray16
323                | Self::GrayAlpha16
324                | Self::GrayLinearF32
325                | Self::GrayAlphaLinearF32
326        )
327    }
328}
329
330// ── Quality ─────────────────────────────────────────────────────────────────
331
332/// Quality specification for lossy encoding.
333#[derive(Clone, Copy, Debug)]
334#[non_exhaustive]
335pub enum Quality {
336    /// Butteraugli distance (1.0 = high quality, lower = better).
337    Distance(f32),
338    /// Percentage scale (0–100, 100 = mathematically lossless, invalid for lossy).
339    Percent(u32),
340}
341
342impl Quality {
343    /// Convert to butteraugli distance.
344    fn to_distance(self) -> core::result::Result<f32, EncodeError> {
345        match self {
346            Self::Distance(d) => {
347                if d <= 0.0 {
348                    return Err(EncodeError::InvalidConfig {
349                        message: format!("lossy distance must be > 0.0, got {d}"),
350                    });
351                }
352                Ok(d)
353            }
354            Self::Percent(q) => {
355                if q >= 100 {
356                    return Err(EncodeError::InvalidConfig {
357                        message: "quality 100 is lossless; use LosslessConfig instead".into(),
358                    });
359                }
360                Ok(percent_to_distance(q))
361            }
362        }
363    }
364}
365
366fn percent_to_distance(quality: u32) -> f32 {
367    if quality >= 100 {
368        0.0
369    } else if quality >= 90 {
370        (100 - quality) as f32 / 10.0
371    } else if quality >= 70 {
372        1.0 + (90 - quality) as f32 / 20.0
373    } else {
374        2.0 + (70 - quality) as f32 / 10.0
375    }
376}
377
378/// Convert quality on 0–100 scale to JXL butteraugli distance.
379///
380/// Matches the jxl-encoder's own `percent_to_distance` piecewise mapping:
381/// - 90–100 → distance 0.0–1.0  (perceptually lossless zone)
382/// - 70–90  → distance 1.0–2.0  (high quality)
383/// - 0–70   → distance 2.0–9.0  (lower quality)
384#[must_use]
385pub fn quality_to_distance(quality: f32) -> f32 {
386    let q = quality.clamp(0.0, 100.0);
387    if q >= 100.0 {
388        0.0
389    } else if q >= 90.0 {
390        (100.0 - q) / 10.0
391    } else if q >= 70.0 {
392        1.0 + (90.0 - q) / 20.0
393    } else {
394        2.0 + (70.0 - q) / 10.0
395    }
396}
397
398/// Map generic quality (libjpeg-turbo scale) to JXL native quality.
399///
400/// Calibrated on CID22-512 corpus (209 images) to produce the same median
401/// SSIMULACRA2 as libjpeg-turbo at each quality level. The native quality
402/// is then mapped to Butteraugli distance by [`quality_to_distance`].
403#[must_use]
404pub fn calibrated_jxl_quality(generic_q: f32) -> f32 {
405    let clamped = generic_q.clamp(0.0, 100.0);
406    const TABLE: &[(f32, f32)] = &[
407        (5.0, 5.0),
408        (10.0, 5.0),
409        (15.0, 5.0),
410        (20.0, 5.0),
411        (25.0, 9.3),
412        (30.0, 22.7),
413        (35.0, 33.0),
414        (40.0, 38.8),
415        (45.0, 43.8),
416        (50.0, 48.5),
417        (55.0, 51.9),
418        (60.0, 55.1),
419        (65.0, 58.0),
420        (70.0, 61.3),
421        (72.0, 63.2),
422        (75.0, 65.5),
423        (78.0, 67.9),
424        (80.0, 69.1),
425        (82.0, 71.8),
426        (85.0, 76.1),
427        (87.0, 79.3),
428        (90.0, 84.2),
429        (92.0, 86.9),
430        (95.0, 91.2),
431        (97.0, 92.8),
432        (99.0, 93.8),
433    ];
434    interp_quality(TABLE, clamped)
435}
436
437/// Piecewise linear interpolation with clamping at table bounds.
438fn interp_quality(table: &[(f32, f32)], x: f32) -> f32 {
439    if x <= table[0].0 {
440        return table[0].1;
441    }
442    if x >= table[table.len() - 1].0 {
443        return table[table.len() - 1].1;
444    }
445    for i in 1..table.len() {
446        if x <= table[i].0 {
447            let (x0, y0) = table[i - 1];
448            let (x1, y1) = table[i];
449            let t = (x - x0) / (x1 - x0);
450            return y0 + t * (y1 - y0);
451        }
452    }
453    table[table.len() - 1].1
454}
455
456// ── Supporting types ────────────────────────────────────────────────────────
457
458/// Image metadata (ICC, EXIF, XMP, tone mapping) to embed in the JXL file.
459#[derive(Clone, Debug, Default)]
460pub struct ImageMetadata<'a> {
461    icc_profile: Option<&'a [u8]>,
462    exif: Option<&'a [u8]>,
463    xmp: Option<&'a [u8]>,
464    /// Peak display luminance in nits (cd/m²). `None` uses the JXL default (255.0 = SDR).
465    intensity_target: Option<f32>,
466    /// Minimum display luminance in nits. `None` uses the JXL default (0.0).
467    min_nits: Option<f32>,
468    /// Intrinsic display size `(width, height)`, if different from coded dimensions.
469    intrinsic_size: Option<(u32, u32)>,
470}
471
472impl<'a> ImageMetadata<'a> {
473    /// Create empty metadata.
474    pub fn new() -> Self {
475        Self::default()
476    }
477
478    /// Attach an ICC color profile.
479    pub fn with_icc_profile(mut self, data: &'a [u8]) -> Self {
480        self.icc_profile = Some(data);
481        self
482    }
483
484    /// Attach EXIF data.
485    pub fn with_exif(mut self, data: &'a [u8]) -> Self {
486        self.exif = Some(data);
487        self
488    }
489
490    /// Attach XMP data.
491    pub fn with_xmp(mut self, data: &'a [u8]) -> Self {
492        self.xmp = Some(data);
493        self
494    }
495
496    /// Get the ICC color profile, if set.
497    pub fn icc_profile(&self) -> Option<&[u8]> {
498        self.icc_profile
499    }
500
501    /// Get the EXIF data, if set.
502    pub fn exif(&self) -> Option<&[u8]> {
503        self.exif
504    }
505
506    /// Get the XMP data, if set.
507    pub fn xmp(&self) -> Option<&[u8]> {
508        self.xmp
509    }
510
511    /// Set the peak display luminance in nits (cd/m²) for HDR content.
512    ///
513    /// Written to the JXL codestream `ToneMapping.intensity_target` field.
514    /// Default is 255.0 (SDR). Set to e.g. 4000.0 or 10000.0 for HDR.
515    pub fn with_intensity_target(mut self, nits: f32) -> Self {
516        self.intensity_target = Some(nits);
517        self
518    }
519
520    /// Set the minimum display luminance in nits.
521    ///
522    /// Written to the JXL codestream `ToneMapping.min_nits` field.
523    /// Default is 0.0.
524    pub fn with_min_nits(mut self, nits: f32) -> Self {
525        self.min_nits = Some(nits);
526        self
527    }
528
529    /// Get the intensity target, if set.
530    pub fn intensity_target(&self) -> Option<f32> {
531        self.intensity_target
532    }
533
534    /// Get the min nits, if set.
535    pub fn min_nits(&self) -> Option<f32> {
536        self.min_nits
537    }
538
539    /// Set the intrinsic display size.
540    ///
541    /// When set, the image should be rendered at this `(width, height)` rather
542    /// than the coded dimensions. Written to the JXL codestream `intrinsic_size` field.
543    pub fn with_intrinsic_size(mut self, width: u32, height: u32) -> Self {
544        self.intrinsic_size = Some((width, height));
545        self
546    }
547
548    /// Get the intrinsic size, if set.
549    pub fn intrinsic_size(&self) -> Option<(u32, u32)> {
550        self.intrinsic_size
551    }
552}
553
554/// Resource limits for encoding.
555#[derive(Clone, Debug, Default)]
556pub struct Limits {
557    max_width: Option<u64>,
558    max_height: Option<u64>,
559    max_pixels: Option<u64>,
560    max_memory_bytes: Option<u64>,
561}
562
563impl Limits {
564    /// Create limits with no restrictions (all `None`).
565    pub fn new() -> Self {
566        Self::default()
567    }
568
569    /// Set maximum image width.
570    pub fn with_max_width(mut self, w: u64) -> Self {
571        self.max_width = Some(w);
572        self
573    }
574
575    /// Set maximum image height.
576    pub fn with_max_height(mut self, h: u64) -> Self {
577        self.max_height = Some(h);
578        self
579    }
580
581    /// Set maximum total pixels (width × height).
582    pub fn with_max_pixels(mut self, p: u64) -> Self {
583        self.max_pixels = Some(p);
584        self
585    }
586
587    /// Set maximum memory bytes the encoder may allocate.
588    pub fn with_max_memory_bytes(mut self, bytes: u64) -> Self {
589        self.max_memory_bytes = Some(bytes);
590        self
591    }
592
593    /// Get maximum width, if set.
594    pub fn max_width(&self) -> Option<u64> {
595        self.max_width
596    }
597
598    /// Get maximum height, if set.
599    pub fn max_height(&self) -> Option<u64> {
600        self.max_height
601    }
602
603    /// Get maximum pixels, if set.
604    pub fn max_pixels(&self) -> Option<u64> {
605        self.max_pixels
606    }
607
608    /// Get maximum memory bytes, if set.
609    pub fn max_memory_bytes(&self) -> Option<u64> {
610        self.max_memory_bytes
611    }
612}
613
614// ── Animation ──────────────────────────────────────────────────────────────
615
616/// Animation timing parameters.
617#[derive(Clone, Debug)]
618pub struct AnimationParams {
619    /// Ticks per second numerator (default 100 = 10ms precision).
620    pub tps_numerator: u32,
621    /// Ticks per second denominator (default 1).
622    pub tps_denominator: u32,
623    /// Number of loops: 0 = infinite (default), >0 = play N times.
624    pub num_loops: u32,
625}
626
627impl Default for AnimationParams {
628    fn default() -> Self {
629        Self {
630            tps_numerator: 100,
631            tps_denominator: 1,
632            num_loops: 0,
633        }
634    }
635}
636
637/// A single frame in an animation sequence.
638pub struct AnimationFrame<'a> {
639    /// Raw pixel data (must match width/height/layout from the encode call).
640    pub pixels: &'a [u8],
641    /// Duration of this frame in ticks (tps_numerator/tps_denominator seconds per tick).
642    pub duration: u32,
643}
644
645// ── LosslessConfig ──────────────────────────────────────────────────────────
646
647/// Lossless (modular) encoding configuration.
648///
649/// Has a sensible `Default` — lossless has no quality ambiguity.
650#[derive(Clone, Debug)]
651pub struct LosslessConfig {
652    effort: u8,
653    mode: EncoderMode,
654    use_ans: bool,
655    squeeze: bool,
656    tree_learning: bool,
657    lz77: bool,
658    lz77_method: Lz77Method,
659    patches: bool,
660    lossy_palette: bool,
661    threads: usize,
662}
663
664impl Default for LosslessConfig {
665    fn default() -> Self {
666        Self::with_effort_level(7)
667    }
668}
669
670impl LosslessConfig {
671    fn with_effort_level(effort: u8) -> Self {
672        let profile = crate::effort::EffortProfile::lossless(effort, EncoderMode::Reference);
673        Self {
674            effort: profile.effort,
675            mode: EncoderMode::Reference,
676            use_ans: profile.use_ans,
677            tree_learning: profile.tree_learning,
678            squeeze: false, // squeeze hurts even with tree learning (14-62% larger on both photos and screenshots)
679            lz77: profile.lz77,
680            lz77_method: profile.lz77_method,
681            patches: profile.patches,
682            lossy_palette: false,
683            threads: 0,
684        }
685    }
686
687    /// Create a new lossless config with defaults (effort 7).
688    pub fn new() -> Self {
689        Self::default()
690    }
691
692    /// Set effort level (1–10). Higher effort = slower, better compression.
693    ///
694    /// This adjusts all effort-dependent defaults:
695    /// - **e1–3**: Huffman encoding
696    /// - **e4–6**: + ANS entropy coding
697    /// - **e7**: + content-adaptive tree learning, LZ77 RLE
698    /// - **e8**: + LZ77 greedy hash chain
699    /// - **e9–10**: + LZ77 optimal (Viterbi DP)
700    ///
701    /// Individual `with_*()` calls after `with_effort()` override these defaults.
702    pub fn with_effort(self, effort: u8) -> Self {
703        let mut new = Self::with_effort_level(effort);
704        // Preserve settings that aren't effort-derived
705        new.mode = self.mode;
706        new.squeeze = self.squeeze;
707        new
708    }
709
710    /// Set encoder mode (default: [`EncoderMode::Reference`]).
711    ///
712    /// `Reference` matches libjxl's algorithm choices for comparable output.
713    /// `Experimental` enables encoder-specific improvements.
714    pub fn with_mode(mut self, mode: EncoderMode) -> Self {
715        self.mode = mode;
716        self
717    }
718
719    /// Current encoder mode.
720    pub fn mode(&self) -> EncoderMode {
721        self.mode
722    }
723
724    /// Enable/disable patches (dictionary-based repeated pattern detection).
725    /// Default: true at effort >= 5. Huge wins on screenshots, zero cost on photos.
726    pub fn with_patches(mut self, enable: bool) -> Self {
727        self.patches = enable;
728        self
729    }
730
731    /// Enable/disable ANS entropy coding (default: true).
732    pub fn with_ans(mut self, enable: bool) -> Self {
733        self.use_ans = enable;
734        self
735    }
736
737    /// Enable/disable squeeze (Haar wavelet) transform (default: false).
738    ///
739    /// Squeeze is disabled by default because tree learning provides better
740    /// compression on both photos and screenshots. Squeeze can still be
741    /// enabled via `.with_squeeze(true)` for experimentation.
742    pub fn with_squeeze(mut self, enable: bool) -> Self {
743        self.squeeze = enable;
744        self
745    }
746
747    /// Enable/disable content-adaptive tree learning (default: false).
748    pub fn with_tree_learning(mut self, enable: bool) -> Self {
749        self.tree_learning = enable;
750        self
751    }
752
753    /// Enable/disable LZ77 backward references (default: false).
754    pub fn with_lz77(mut self, enable: bool) -> Self {
755        self.lz77 = enable;
756        self
757    }
758
759    /// Set LZ77 method (default: Greedy). Only effective when LZ77 is enabled.
760    pub fn with_lz77_method(mut self, method: Lz77Method) -> Self {
761        self.lz77_method = method;
762        self
763    }
764
765    /// Enable/disable lossy delta palette (default: false).
766    ///
767    /// When enabled, uses quantized palette with delta entries and error diffusion
768    /// for near-lossless encoding. This is NOT pixel-exact — it trades some color
769    /// accuracy for significantly smaller files on images with many colors.
770    /// Matching libjxl's modular lossy palette mode.
771    pub fn with_lossy_palette(mut self, enable: bool) -> Self {
772        self.lossy_palette = enable;
773        self
774    }
775
776    /// Set thread count for parallel encoding (0 = auto, 1 = sequential).
777    ///
778    /// Requires the `parallel` feature. When `parallel` is not enabled,
779    /// this value is ignored and encoding is always sequential.
780    pub fn with_threads(mut self, threads: usize) -> Self {
781        self.threads = threads;
782        self
783    }
784
785    // ── Getters ───────────────────────────────────────────────────────
786
787    /// Current effort level.
788    pub fn effort(&self) -> u8 {
789        self.effort
790    }
791
792    /// Whether ANS entropy coding is enabled.
793    pub fn ans(&self) -> bool {
794        self.use_ans
795    }
796
797    /// Whether squeeze (Haar wavelet) transform is enabled.
798    pub fn squeeze(&self) -> bool {
799        self.squeeze
800    }
801
802    /// Whether content-adaptive tree learning is enabled.
803    pub fn tree_learning(&self) -> bool {
804        self.tree_learning
805    }
806
807    /// Whether LZ77 backward references are enabled.
808    pub fn lz77(&self) -> bool {
809        self.lz77
810    }
811
812    /// Current LZ77 method.
813    pub fn lz77_method(&self) -> Lz77Method {
814        self.lz77_method
815    }
816
817    /// Whether patches (dictionary-based repeated pattern detection) are enabled.
818    pub fn patches(&self) -> bool {
819        self.patches
820    }
821
822    /// Whether lossy delta palette is enabled.
823    pub fn lossy_palette(&self) -> bool {
824        self.lossy_palette
825    }
826
827    /// Thread count (0 = auto, 1 = sequential).
828    pub fn threads(&self) -> usize {
829        self.threads
830    }
831
832    // ── Request / fluent encode ─────────────────────────────────────
833
834    /// Create an encode request for an image with this config.
835    ///
836    /// Use this when you need to attach metadata, limits, or cancellation.
837    pub fn encode_request(
838        &self,
839        width: u32,
840        height: u32,
841        layout: PixelLayout,
842    ) -> EncodeRequest<'_> {
843        EncodeRequest {
844            config: ConfigRef::Lossless(self),
845            width,
846            height,
847            layout,
848            metadata: None,
849            limits: None,
850            stop: None,
851            source_gamma: None,
852            color_encoding: None,
853        }
854    }
855
856    /// Encode pixels directly with this config. Shortcut for simple cases.
857    ///
858    /// ```rust,no_run
859    /// # let pixels = vec![0u8; 100 * 100 * 3];
860    /// let jxl = jxl_encoder::LosslessConfig::new()
861    ///     .encode(&pixels, 100, 100, jxl_encoder::PixelLayout::Rgb8)?;
862    /// # Ok::<_, jxl_encoder::At<jxl_encoder::EncodeError>>(())
863    /// ```
864    #[track_caller]
865    pub fn encode(
866        &self,
867        pixels: &[u8],
868        width: u32,
869        height: u32,
870        layout: PixelLayout,
871    ) -> Result<Vec<u8>> {
872        self.encode_request(width, height, layout).encode(pixels)
873    }
874
875    /// Encode pixels, appending to an existing buffer.
876    #[track_caller]
877    pub fn encode_into(
878        &self,
879        pixels: &[u8],
880        width: u32,
881        height: u32,
882        layout: PixelLayout,
883        out: &mut Vec<u8>,
884    ) -> Result<()> {
885        self.encode_request(width, height, layout)
886            .encode_into(pixels, out)
887            .map(|_| ())
888    }
889
890    /// Encode a multi-frame animation as a lossless JXL.
891    ///
892    /// Each frame must have the same dimensions and pixel layout.
893    /// Returns the complete JXL codestream bytes.
894    #[track_caller]
895    pub fn encode_animation(
896        &self,
897        width: u32,
898        height: u32,
899        layout: PixelLayout,
900        animation: &AnimationParams,
901        frames: &[AnimationFrame<'_>],
902    ) -> Result<Vec<u8>> {
903        encode_animation_lossless(self, width, height, layout, animation, frames).map_err(at)
904    }
905}
906
907// ── EncoderMode ──────────────────────────────────────────────────────────────
908
909/// Controls whether the encoder matches libjxl's algorithm choices or uses
910/// its own improvements.
911///
912/// Both modes produce valid JPEG XL bitstreams decodable by any conformant
913/// decoder. The difference is in *encoder-side* decisions: strategy selection
914/// heuristics, cost models, entropy coding parameters, tree learning, etc.
915#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
916pub enum EncoderMode {
917    /// Match libjxl's algorithm choices at the configured effort level.
918    ///
919    /// Output is statistically equivalent to `cjxl` at the same effort and
920    /// distance — same RD curve within measurement noise. Use this when
921    /// comparing against libjxl or when reproducibility matters.
922    #[default]
923    Reference,
924
925    /// Use encoder-specific improvements and research features.
926    ///
927    /// May produce better rate-distortion performance than libjxl at the
928    /// same effort level, but output will differ. Use this for production
929    /// encoding where quality per byte is the goal.
930    Experimental,
931}
932
933// ── ProgressiveMode ──────────────────────────────────────────────────────────
934
935/// Progressive encoding mode for VarDCT.
936///
937/// Progressive encoding splits AC coefficients across multiple passes by
938/// reducing precision. Decoders can render a coarse preview after early passes,
939/// improving user experience for web delivery.
940///
941/// The shift mechanism works by right-shifting quantized coefficients before
942/// encoding in early passes. The decoder left-shifts and accumulates, so the
943/// final result is exact (lossless reconstruction of the quantized coefficients).
944#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
945pub enum ProgressiveMode {
946    /// Single pass (default). No progressive rendering.
947    #[default]
948    Single,
949    /// 2-pass quantized progressive.
950    ///
951    /// - Pass 0: All AC coefficients right-shifted by 1 bit (coarse)
952    /// - Pass 1: Residual at full precision
953    ///
954    /// Provides quick 2x-downsampled preview, then full quality refinement.
955    QuantizedAcFullAc,
956    /// 3-pass progressive (DC/VLF → LF → Full AC).
957    ///
958    /// - Pass 0: All AC coefficients right-shifted by 2 bits (very coarse, 8x downsample hint)
959    /// - Pass 1: Residual right-shifted by 1 bit (medium, 4x downsample hint)
960    /// - Pass 2: Final residual at full precision
961    ///
962    /// Provides staged refinement: blurry preview → sharper → final.
963    DcVlfLfAc,
964}
965
966// ── LossyConfig ─────────────────────────────────────────────────────────────
967
968/// Lossy (VarDCT) encoding configuration.
969///
970/// No `Default` — distance/quality is a required choice.
971#[derive(Clone, Debug)]
972pub struct LossyConfig {
973    distance: f32,
974    effort: u8,
975    mode: EncoderMode,
976    use_ans: bool,
977    gaborish: bool,
978    noise: bool,
979    denoise: bool,
980    error_diffusion: bool,
981    pixel_domain_loss: bool,
982    lz77: bool,
983    lz77_method: Lz77Method,
984    force_strategy: Option<u8>,
985    max_strategy_size: Option<u8>,
986    patches: bool,
987    splines: Option<Vec<crate::vardct::splines::Spline>>,
988    progressive: ProgressiveMode,
989    lf_frame: bool,
990    #[cfg(feature = "butteraugli-loop")]
991    butteraugli_iters: u32,
992    #[cfg(feature = "butteraugli-loop")]
993    butteraugli_iters_explicit: bool,
994    #[cfg(feature = "ssim2-loop")]
995    ssim2_iters: u32,
996    #[cfg(feature = "zensim-loop")]
997    zensim_iters: u32,
998    threads: usize,
999}
1000
1001impl LossyConfig {
1002    /// Create with butteraugli distance (1.0 = high quality). Default effort 7.
1003    pub fn new(distance: f32) -> Self {
1004        Self::new_with_effort(distance, 7)
1005    }
1006
1007    fn new_with_effort(distance: f32, effort: u8) -> Self {
1008        let profile = crate::effort::EffortProfile::lossy(effort, EncoderMode::Reference);
1009        Self {
1010            distance,
1011            effort: profile.effort,
1012            mode: EncoderMode::Reference,
1013            use_ans: profile.use_ans,
1014            gaborish: profile.gaborish,
1015            noise: false,
1016            denoise: false,
1017            error_diffusion: profile.error_diffusion,
1018            pixel_domain_loss: profile.pixel_domain_loss,
1019            lz77: profile.lz77,
1020            lz77_method: profile.lz77_method,
1021            force_strategy: None,
1022            max_strategy_size: None,
1023            patches: profile.patches,
1024            splines: None,
1025            progressive: ProgressiveMode::Single,
1026            lf_frame: false,
1027            #[cfg(feature = "butteraugli-loop")]
1028            butteraugli_iters: profile.butteraugli_iters,
1029            #[cfg(feature = "butteraugli-loop")]
1030            butteraugli_iters_explicit: false,
1031            #[cfg(feature = "ssim2-loop")]
1032            ssim2_iters: 0,
1033            #[cfg(feature = "zensim-loop")]
1034            zensim_iters: 0,
1035            threads: 0,
1036        }
1037    }
1038
1039    /// Create from a [`Quality`] specification.
1040    pub fn from_quality(quality: Quality) -> core::result::Result<Self, EncodeError> {
1041        let distance = quality.to_distance()?;
1042        Ok(Self::new(distance))
1043    }
1044
1045    /// Set effort level (1–10). Higher effort = slower, better compression.
1046    ///
1047    /// This adjusts all effort-dependent defaults:
1048    /// - **e1–3**: DCT8 only, Huffman, no gaborish/patches/butteraugli
1049    /// - **e4**: + ANS entropy coding, custom coefficient orders
1050    /// - **e5**: + gaborish, pixel-domain loss, AC strategy search, AdjustQuantBlockAC
1051    /// - **e6**: + DCT4x8/AFV strategies, non-aligned eval, EPF dynamic sharpness
1052    /// - **e7**: + patches, error diffusion, CfL two-pass, LZ77 RLE, DCT64 strategies
1053    /// - **e8**: + butteraugli loop (2 iters), LZ77 greedy, WP param search (2 modes)
1054    /// - **e9–10**: + LZ77 optimal (Viterbi DP), 4 butteraugli iters, WP search (5 modes)
1055    ///
1056    /// Individual `with_*()` calls after `with_effort()` override these defaults.
1057    pub fn with_effort(self, effort: u8) -> Self {
1058        let mut new = Self::new_with_effort(self.distance, effort);
1059        // Preserve settings that are never effort-derived (always opt-in)
1060        new.mode = self.mode;
1061        new.noise = self.noise;
1062        new.denoise = self.denoise;
1063        new.force_strategy = self.force_strategy;
1064        new.max_strategy_size = self.max_strategy_size;
1065        new.splines = self.splines;
1066        new.progressive = self.progressive;
1067        // Preserve explicit butteraugli override
1068        #[cfg(feature = "butteraugli-loop")]
1069        if self.butteraugli_iters_explicit {
1070            new.butteraugli_iters = self.butteraugli_iters;
1071            new.butteraugli_iters_explicit = true;
1072        }
1073        #[cfg(feature = "ssim2-loop")]
1074        {
1075            new.ssim2_iters = self.ssim2_iters;
1076        }
1077        #[cfg(feature = "zensim-loop")]
1078        {
1079            new.zensim_iters = self.zensim_iters;
1080        }
1081        new
1082    }
1083
1084    /// Set encoder mode (default: [`EncoderMode::Reference`]).
1085    ///
1086    /// `Reference` matches libjxl's algorithm choices for comparable output.
1087    /// `Experimental` enables encoder-specific improvements.
1088    pub fn with_mode(mut self, mode: EncoderMode) -> Self {
1089        self.mode = mode;
1090        self
1091    }
1092
1093    /// Current encoder mode.
1094    pub fn mode(&self) -> EncoderMode {
1095        self.mode
1096    }
1097
1098    /// Enable/disable ANS entropy coding (default: true).
1099    pub fn with_ans(mut self, enable: bool) -> Self {
1100        self.use_ans = enable;
1101        self
1102    }
1103
1104    /// Enable/disable gaborish inverse pre-filter (default: true).
1105    pub fn with_gaborish(mut self, enable: bool) -> Self {
1106        self.gaborish = enable;
1107        self
1108    }
1109
1110    /// Enable/disable noise synthesis (default: false).
1111    pub fn with_noise(mut self, enable: bool) -> Self {
1112        self.noise = enable;
1113        self
1114    }
1115
1116    /// Enable/disable Wiener denoising pre-filter (default: false). Implies noise.
1117    pub fn with_denoise(mut self, enable: bool) -> Self {
1118        self.denoise = enable;
1119        if enable {
1120            self.noise = true;
1121        }
1122        self
1123    }
1124
1125    /// Enable/disable error diffusion in AC quantization (default: false).
1126    ///
1127    /// Error diffusion propagates 1/4 of the quantization error to the next
1128    /// coefficient in zigzag order. Note: libjxl's `QuantizeBlockAC` accepts
1129    /// this parameter but never references it — the feature is effectively a
1130    /// no-op in the reference encoder. Our implementation actually performs
1131    /// the diffusion, which can hurt quality on certain content (bright features
1132    /// in dark regions), especially when combined with gaborish.
1133    pub fn with_error_diffusion(mut self, enable: bool) -> Self {
1134        self.error_diffusion = enable;
1135        self
1136    }
1137
1138    /// Enable/disable pixel-domain loss in strategy selection (default: true).
1139    pub fn with_pixel_domain_loss(mut self, enable: bool) -> Self {
1140        self.pixel_domain_loss = enable;
1141        self
1142    }
1143
1144    /// Enable/disable LZ77 backward references (default: false).
1145    pub fn with_lz77(mut self, enable: bool) -> Self {
1146        self.lz77 = enable;
1147        self
1148    }
1149
1150    /// Set LZ77 method (default: Greedy).
1151    pub fn with_lz77_method(mut self, method: Lz77Method) -> Self {
1152        self.lz77_method = method;
1153        self
1154    }
1155
1156    /// Force a specific AC strategy for all blocks. `None` for auto-selection.
1157    pub fn with_force_strategy(mut self, strategy: Option<u8>) -> Self {
1158        self.force_strategy = strategy;
1159        self
1160    }
1161
1162    /// Limit the maximum AC strategy transform size.
1163    ///
1164    /// Controls the largest DCT transform the encoder will consider:
1165    /// - `8`: Only 8×8-class transforms (DCT8, DCT4x4, DCT4x8, AFV, IDENTITY, DCT2x2)
1166    /// - `16`: Up to 16×16 (adds DCT16x16, DCT16x8, DCT8x16)
1167    /// - `32`: Up to 32×32 (adds DCT32x32, DCT32x16, DCT16x32)
1168    /// - `64`: No restriction (adds DCT64x64, DCT64x32, DCT32x64) — the default
1169    ///
1170    /// `None` means no restriction (same as `64`). Values are clamped to the
1171    /// nearest valid size.
1172    pub fn with_max_strategy_size(mut self, size: Option<u8>) -> Self {
1173        self.max_strategy_size = size;
1174        self
1175    }
1176
1177    /// Enable/disable patches (dictionary-based repeated pattern detection).
1178    /// Default: true. Huge wins on screenshots, zero cost on photos.
1179    pub fn with_patches(mut self, enable: bool) -> Self {
1180        self.patches = enable;
1181        self
1182    }
1183
1184    /// Set manual splines to overlay on the image.
1185    ///
1186    /// Splines are Gaussian-blurred parametric curves overlaid additively.
1187    /// They encode thin features (power lines, horizons) efficiently.
1188    /// The encoder subtracts splines from XYB before VarDCT; the decoder
1189    /// adds them back after reconstruction. Default: `None`.
1190    pub fn with_splines(mut self, splines: Vec<crate::vardct::splines::Spline>) -> Self {
1191        self.splines = Some(splines);
1192        self
1193    }
1194
1195    /// Set progressive encoding mode (default: Single = no progressive).
1196    ///
1197    /// Progressive encoding splits AC coefficients across multiple passes,
1198    /// allowing decoders to render coarse previews before the full file is received.
1199    pub fn with_progressive(mut self, mode: ProgressiveMode) -> Self {
1200        self.progressive = mode;
1201        self
1202    }
1203
1204    /// Enable LfFrame (separate DC frame).
1205    ///
1206    /// When true, DC coefficients are encoded as a separate modular frame
1207    /// before the main VarDCT frame, matching libjxl's `progressive_dc >= 1`.
1208    pub fn with_lf_frame(mut self, enable: bool) -> Self {
1209        self.lf_frame = enable;
1210        self
1211    }
1212
1213    /// Set butteraugli quantization loop iterations explicitly.
1214    ///
1215    /// Overrides the automatic effort-based default (effort 7: 0, effort 8: 2, effort 9+: 4).
1216    /// Requires the `butteraugli-loop` feature.
1217    #[cfg(feature = "butteraugli-loop")]
1218    pub fn with_butteraugli_iters(mut self, n: u32) -> Self {
1219        self.butteraugli_iters = n;
1220        self.butteraugli_iters_explicit = true;
1221        self
1222    }
1223
1224    /// Set SSIM2 quantization loop iterations.
1225    ///
1226    /// Alternative to butteraugli loop: uses per-block linear RGB RMSE + full-image SSIM2.
1227    /// Requires the `ssim2-loop` feature.
1228    #[cfg(feature = "ssim2-loop")]
1229    pub fn with_ssim2_iters(mut self, n: u32) -> Self {
1230        self.ssim2_iters = n;
1231        self
1232    }
1233
1234    /// Set zensim quantization loop iterations.
1235    ///
1236    /// Alternative to butteraugli loop: uses zensim's psychovisual metric for
1237    /// both global quality tracking and per-pixel spatial error map (diffmap in XYB space).
1238    /// Also refines AC strategy by splitting large transforms with high perceptual error.
1239    /// Can stack with butteraugli loop (butteraugli runs first, then zensim fine-tunes).
1240    /// Requires the `zensim-loop` feature.
1241    #[cfg(feature = "zensim-loop")]
1242    pub fn with_zensim_iters(mut self, n: u32) -> Self {
1243        self.zensim_iters = n;
1244        self
1245    }
1246
1247    /// Set thread count for parallel encoding (0 = auto, 1 = sequential).
1248    ///
1249    /// Requires the `parallel` feature. When `parallel` is not enabled,
1250    /// this value is ignored and encoding is always sequential.
1251    pub fn with_threads(mut self, threads: usize) -> Self {
1252        self.threads = threads;
1253        self
1254    }
1255
1256    // ── Getters ───────────────────────────────────────────────────────
1257
1258    /// Current butteraugli distance.
1259    pub fn distance(&self) -> f32 {
1260        self.distance
1261    }
1262
1263    /// Current effort level.
1264    pub fn effort(&self) -> u8 {
1265        self.effort
1266    }
1267
1268    /// Whether ANS entropy coding is enabled.
1269    pub fn ans(&self) -> bool {
1270        self.use_ans
1271    }
1272
1273    /// Whether gaborish inverse pre-filter is enabled.
1274    pub fn gaborish(&self) -> bool {
1275        self.gaborish
1276    }
1277
1278    /// Whether noise synthesis is enabled.
1279    pub fn noise(&self) -> bool {
1280        self.noise
1281    }
1282
1283    /// Whether Wiener denoising pre-filter is enabled.
1284    pub fn denoise(&self) -> bool {
1285        self.denoise
1286    }
1287
1288    /// Whether error diffusion in AC quantization is enabled.
1289    pub fn error_diffusion(&self) -> bool {
1290        self.error_diffusion
1291    }
1292
1293    /// Whether pixel-domain loss is enabled.
1294    pub fn pixel_domain_loss(&self) -> bool {
1295        self.pixel_domain_loss
1296    }
1297
1298    /// Whether LZ77 backward references are enabled.
1299    pub fn lz77(&self) -> bool {
1300        self.lz77
1301    }
1302
1303    /// Current LZ77 method.
1304    pub fn lz77_method(&self) -> Lz77Method {
1305        self.lz77_method
1306    }
1307
1308    /// Forced AC strategy, if any.
1309    pub fn force_strategy(&self) -> Option<u8> {
1310        self.force_strategy
1311    }
1312
1313    /// Maximum AC strategy transform size, if set.
1314    pub fn max_strategy_size(&self) -> Option<u8> {
1315        self.max_strategy_size
1316    }
1317
1318    /// Current progressive mode.
1319    pub fn progressive(&self) -> ProgressiveMode {
1320        self.progressive
1321    }
1322
1323    /// Whether LfFrame (separate DC frame) is enabled.
1324    pub fn lf_frame(&self) -> bool {
1325        self.lf_frame
1326    }
1327
1328    /// Butteraugli quantization loop iterations.
1329    #[cfg(feature = "butteraugli-loop")]
1330    pub fn butteraugli_iters(&self) -> u32 {
1331        self.butteraugli_iters
1332    }
1333
1334    /// Thread count (0 = auto, 1 = sequential).
1335    pub fn threads(&self) -> usize {
1336        self.threads
1337    }
1338
1339    // ── Request / fluent encode ─────────────────────────────────────
1340
1341    /// Create an encode request for an image with this config.
1342    ///
1343    /// Use this when you need to attach metadata, limits, or cancellation.
1344    pub fn encode_request(
1345        &self,
1346        width: u32,
1347        height: u32,
1348        layout: PixelLayout,
1349    ) -> EncodeRequest<'_> {
1350        EncodeRequest {
1351            config: ConfigRef::Lossy(self),
1352            width,
1353            height,
1354            layout,
1355            metadata: None,
1356            limits: None,
1357            stop: None,
1358            source_gamma: None,
1359            color_encoding: None,
1360        }
1361    }
1362
1363    /// Encode pixels directly with this config. Shortcut for simple cases.
1364    ///
1365    /// ```rust,no_run
1366    /// # let pixels = vec![0u8; 100 * 100 * 3];
1367    /// let jxl = jxl_encoder::LossyConfig::new(1.0)
1368    ///     .encode(&pixels, 100, 100, jxl_encoder::PixelLayout::Rgb8)?;
1369    /// # Ok::<_, jxl_encoder::At<jxl_encoder::EncodeError>>(())
1370    /// ```
1371    #[track_caller]
1372    pub fn encode(
1373        &self,
1374        pixels: &[u8],
1375        width: u32,
1376        height: u32,
1377        layout: PixelLayout,
1378    ) -> Result<Vec<u8>> {
1379        self.encode_request(width, height, layout).encode(pixels)
1380    }
1381
1382    /// Encode pixels, appending to an existing buffer.
1383    #[track_caller]
1384    pub fn encode_into(
1385        &self,
1386        pixels: &[u8],
1387        width: u32,
1388        height: u32,
1389        layout: PixelLayout,
1390        out: &mut Vec<u8>,
1391    ) -> Result<()> {
1392        self.encode_request(width, height, layout)
1393            .encode_into(pixels, out)
1394            .map(|_| ())
1395    }
1396
1397    /// Encode a multi-frame animation as a lossy JXL.
1398    ///
1399    /// Each frame must have the same dimensions and pixel layout.
1400    /// Returns the complete JXL codestream bytes.
1401    #[track_caller]
1402    pub fn encode_animation(
1403        &self,
1404        width: u32,
1405        height: u32,
1406        layout: PixelLayout,
1407        animation: &AnimationParams,
1408        frames: &[AnimationFrame<'_>],
1409    ) -> Result<Vec<u8>> {
1410        encode_animation_lossy(self, width, height, layout, animation, frames).map_err(at)
1411    }
1412}
1413
1414// ── EncodeRequest ───────────────────────────────────────────────────────────
1415
1416/// Internal config reference (lossy or lossless).
1417#[derive(Clone, Copy, Debug)]
1418enum ConfigRef<'a> {
1419    Lossless(&'a LosslessConfig),
1420    Lossy(&'a LossyConfig),
1421}
1422
1423/// An encoding request — binds config + image dimensions + pixel layout.
1424///
1425/// Created via [`LosslessConfig::encode_request`] or [`LossyConfig::encode_request`].
1426pub struct EncodeRequest<'a> {
1427    config: ConfigRef<'a>,
1428    width: u32,
1429    height: u32,
1430    layout: PixelLayout,
1431    metadata: Option<&'a ImageMetadata<'a>>,
1432    limits: Option<&'a Limits>,
1433    stop: Option<&'a dyn Stop>,
1434    source_gamma: Option<f32>,
1435    color_encoding: Option<crate::headers::color_encoding::ColorEncoding>,
1436}
1437
1438impl<'a> EncodeRequest<'a> {
1439    /// Attach image metadata (ICC, EXIF, XMP).
1440    pub fn with_metadata(mut self, meta: &'a ImageMetadata<'a>) -> Self {
1441        self.metadata = Some(meta);
1442        self
1443    }
1444
1445    /// Attach resource limits.
1446    pub fn with_limits(mut self, limits: &'a Limits) -> Self {
1447        self.limits = Some(limits);
1448        self
1449    }
1450
1451    /// Attach a cooperative cancellation token.
1452    ///
1453    /// The encoder will check this periodically and return
1454    /// [`EncodeError::Cancelled`] if stopped.
1455    pub fn with_stop(mut self, stop: &'a dyn Stop) -> Self {
1456        self.stop = Some(stop);
1457        self
1458    }
1459
1460    /// Specify that source pixels use a custom gamma transfer function.
1461    ///
1462    /// When set, the encoder linearizes u8/u16 pixels with `pixel ^ (1/gamma)`
1463    /// instead of the sRGB transfer function, and writes `have_gamma=true` in
1464    /// the JXL header. This matches cjxl's behavior for PNGs with gAMA chunks.
1465    ///
1466    /// Example: `0.45455` for standard gamma 2.2 encoding (gAMA=45455).
1467    pub fn with_source_gamma(mut self, gamma: f32) -> Self {
1468        self.source_gamma = Some(gamma);
1469        self
1470    }
1471
1472    /// Override the color encoding written to the JXL header.
1473    ///
1474    /// When set, this color encoding is used instead of the default (sRGB for
1475    /// u8/u16, linear sRGB for f32) or any gamma derived from
1476    /// [`with_source_gamma`](Self::with_source_gamma).
1477    ///
1478    /// Use this for HDR content (PQ, HLG) or non-sRGB primaries (BT.2020, Display P3).
1479    ///
1480    /// Note: this only affects the signaled color encoding in the JXL header.
1481    /// Pixel linearization for lossy encoding is still controlled by
1482    /// `with_source_gamma()`. For float input, pixels are assumed already linear.
1483    pub fn with_color_encoding(
1484        mut self,
1485        ce: crate::headers::color_encoding::ColorEncoding,
1486    ) -> Self {
1487        self.color_encoding = Some(ce);
1488        self
1489    }
1490
1491    /// Encode pixels and return the JXL bytes.
1492    #[track_caller]
1493    pub fn encode(self, pixels: &[u8]) -> Result<Vec<u8>> {
1494        self.encode_inner(pixels)
1495            .map(|mut r| r.take_data().unwrap())
1496            .map_err(at)
1497    }
1498
1499    /// Encode pixels and return the JXL bytes together with [`EncodeStats`].
1500    #[track_caller]
1501    pub fn encode_with_stats(self, pixels: &[u8]) -> Result<EncodeResult> {
1502        self.encode_inner(pixels).map_err(at)
1503    }
1504
1505    /// Encode pixels, appending to an existing buffer. Returns metrics.
1506    #[track_caller]
1507    pub fn encode_into(self, pixels: &[u8], out: &mut Vec<u8>) -> Result<EncodeResult> {
1508        let mut result = self.encode_inner(pixels).map_err(at)?;
1509        if let Some(data) = result.data.take() {
1510            out.extend_from_slice(&data);
1511        }
1512        Ok(result)
1513    }
1514
1515    /// Encode pixels, writing to a `std::io::Write` destination. Returns metrics.
1516    #[cfg(feature = "std")]
1517    #[track_caller]
1518    pub fn encode_to(self, pixels: &[u8], mut dest: impl std::io::Write) -> Result<EncodeResult> {
1519        let mut result = self.encode_inner(pixels).map_err(at)?;
1520        if let Some(data) = result.data.take() {
1521            dest.write_all(&data)
1522                .map_err(|e| at(EncodeError::from(e)))?;
1523        }
1524        Ok(result)
1525    }
1526
1527    fn encode_inner(&self, pixels: &[u8]) -> core::result::Result<EncodeResult, EncodeError> {
1528        self.validate_pixels(pixels)?;
1529        self.check_limits()?;
1530
1531        let threads = match self.config {
1532            ConfigRef::Lossless(cfg) => cfg.threads,
1533            ConfigRef::Lossy(cfg) => cfg.threads,
1534        };
1535
1536        let (codestream, mut stats) = run_with_threads(threads, || match self.config {
1537            ConfigRef::Lossless(cfg) => self.encode_lossless(cfg, pixels),
1538            ConfigRef::Lossy(cfg) => self.encode_lossy(cfg, pixels),
1539        })?;
1540
1541        stats.codestream_size = codestream.len();
1542
1543        // Wrap in container if metadata (EXIF/XMP) is present
1544        let output = if let Some(meta) = self.metadata
1545            && (meta.exif.is_some() || meta.xmp.is_some())
1546        {
1547            crate::container::wrap_in_container(&codestream, meta.exif, meta.xmp)
1548        } else {
1549            codestream
1550        };
1551
1552        stats.output_size = output.len();
1553
1554        Ok(EncodeResult {
1555            data: Some(output),
1556            stats,
1557        })
1558    }
1559
1560    fn validate_pixels(&self, pixels: &[u8]) -> core::result::Result<(), EncodeError> {
1561        let w = self.width as usize;
1562        let h = self.height as usize;
1563        if w == 0 || h == 0 {
1564            return Err(EncodeError::InvalidInput {
1565                message: format!("zero dimensions: {w}x{h}"),
1566            });
1567        }
1568        let expected = w
1569            .checked_mul(h)
1570            .and_then(|n| n.checked_mul(self.layout.bytes_per_pixel()));
1571        match expected {
1572            Some(expected) if pixels.len() == expected => Ok(()),
1573            Some(expected) => Err(EncodeError::InvalidInput {
1574                message: format!(
1575                    "pixel buffer size mismatch: expected {expected} bytes for {w}x{h} {:?}, got {}",
1576                    self.layout,
1577                    pixels.len()
1578                ),
1579            }),
1580            None => Err(EncodeError::InvalidInput {
1581                message: "image dimensions overflow".into(),
1582            }),
1583        }
1584    }
1585
1586    fn check_limits(&self) -> core::result::Result<(), EncodeError> {
1587        let Some(limits) = self.limits else {
1588            return Ok(());
1589        };
1590        let w = self.width as u64;
1591        let h = self.height as u64;
1592        if let Some(max_w) = limits.max_width
1593            && w > max_w
1594        {
1595            return Err(EncodeError::LimitExceeded {
1596                message: format!("width {w} > max {max_w}"),
1597            });
1598        }
1599        if let Some(max_h) = limits.max_height
1600            && h > max_h
1601        {
1602            return Err(EncodeError::LimitExceeded {
1603                message: format!("height {h} > max {max_h}"),
1604            });
1605        }
1606        if let Some(max_px) = limits.max_pixels
1607            && w * h > max_px
1608        {
1609            return Err(EncodeError::LimitExceeded {
1610                message: format!("pixels {}x{} = {} > max {max_px}", w, h, w * h),
1611            });
1612        }
1613        Ok(())
1614    }
1615
1616    // ── Lossless path ───────────────────────────────────────────────────
1617
1618    fn encode_lossless(
1619        &self,
1620        cfg: &LosslessConfig,
1621        pixels: &[u8],
1622    ) -> core::result::Result<(Vec<u8>, EncodeStats), EncodeError> {
1623        use crate::bit_writer::BitWriter;
1624        use crate::headers::color_encoding::ColorSpace;
1625        use crate::headers::{ColorEncoding, FileHeader};
1626        use crate::modular::channel::ModularImage;
1627        use crate::modular::frame::{FrameEncoder, FrameEncoderOptions};
1628
1629        let w = self.width as usize;
1630        let h = self.height as usize;
1631
1632        // Normalize pixels to RGB8 for detection if needed (BGR swap)
1633        let rgb_pixels;
1634        let detection_pixels: &[u8] = match self.layout {
1635            PixelLayout::Bgr8 => {
1636                rgb_pixels = bgr_to_rgb(pixels, 3);
1637                &rgb_pixels
1638            }
1639            PixelLayout::Bgra8 => {
1640                rgb_pixels = bgr_to_rgb(pixels, 4);
1641                &rgb_pixels
1642            }
1643            _ => {
1644                rgb_pixels = Vec::new();
1645                let _ = &rgb_pixels;
1646                pixels
1647            }
1648        };
1649
1650        // Build ModularImage from pixel layout
1651        let mut image = match self.layout {
1652            PixelLayout::Rgb8 => ModularImage::from_rgb8(pixels, w, h),
1653            PixelLayout::Rgba8 => ModularImage::from_rgba8(pixels, w, h),
1654            PixelLayout::Bgr8 => ModularImage::from_rgb8(&bgr_to_rgb(pixels, 3), w, h),
1655            PixelLayout::Bgra8 => ModularImage::from_rgba8(&bgr_to_rgb(pixels, 4), w, h),
1656            PixelLayout::Gray8 => ModularImage::from_gray8(pixels, w, h),
1657            PixelLayout::GrayAlpha8 => ModularImage::from_grayalpha8(pixels, w, h),
1658            PixelLayout::Rgb16 => ModularImage::from_rgb16_native(pixels, w, h),
1659            PixelLayout::Rgba16 => ModularImage::from_rgba16_native(pixels, w, h),
1660            PixelLayout::Gray16 => ModularImage::from_gray16_native(pixels, w, h),
1661            PixelLayout::GrayAlpha16 => ModularImage::from_grayalpha16_native(pixels, w, h),
1662            other => return Err(EncodeError::UnsupportedPixelLayout(other)),
1663        }
1664        .map_err(EncodeError::from)?;
1665
1666        // Detect patches for lossless mode (RGB 8-bit only, non-grayscale)
1667        let num_channels = self.layout.bytes_per_pixel();
1668        let can_use_patches =
1669            cfg.patches && !image.is_grayscale && image.bit_depth <= 8 && num_channels >= 3;
1670        let patches_data = if can_use_patches {
1671            crate::vardct::patches::find_and_build_lossless(
1672                detection_pixels,
1673                w,
1674                h,
1675                num_channels,
1676                image.bit_depth,
1677            )
1678        } else {
1679            None
1680        };
1681
1682        // Build file header
1683        let mut file_header = if image.is_grayscale {
1684            FileHeader::new_gray(self.width, self.height)
1685        } else if image.has_alpha {
1686            FileHeader::new_rgba(self.width, self.height)
1687        } else {
1688            FileHeader::new_rgb(self.width, self.height)
1689        };
1690        if image.bit_depth == 16 {
1691            file_header.metadata.bit_depth = crate::headers::file_header::BitDepth::uint16();
1692            for ec in &mut file_header.metadata.extra_channels {
1693                ec.bit_depth = crate::headers::file_header::BitDepth::uint16();
1694            }
1695        }
1696        if let Some(meta) = self.metadata {
1697            if meta.icc_profile.is_some() {
1698                file_header.metadata.color_encoding.want_icc = true;
1699            }
1700            if let Some(it) = meta.intensity_target {
1701                file_header.metadata.intensity_target = it;
1702            }
1703            if let Some(mn) = meta.min_nits {
1704                file_header.metadata.min_nits = mn;
1705            }
1706            if let Some((w, h)) = meta.intrinsic_size {
1707                file_header.metadata.have_intrinsic_size = true;
1708                file_header.metadata.intrinsic_width = w;
1709                file_header.metadata.intrinsic_height = h;
1710            }
1711        }
1712
1713        // Write codestream
1714        let mut writer = BitWriter::new();
1715        file_header.write(&mut writer).map_err(EncodeError::from)?;
1716        if let Some(meta) = self.metadata
1717            && let Some(icc) = meta.icc_profile
1718        {
1719            crate::icc::write_icc(icc, &mut writer).map_err(EncodeError::from)?;
1720        }
1721        writer.zero_pad_to_byte();
1722
1723        // Write reference frame and subtract patches from image if detected
1724        if let Some(ref pd) = patches_data {
1725            let lossless_profile = crate::effort::EffortProfile::lossless(cfg.effort, cfg.mode);
1726            crate::vardct::patches::encode_reference_frame_rgb(
1727                pd,
1728                image.bit_depth,
1729                cfg.use_ans,
1730                lossless_profile.patch_ref_tree_learning,
1731                &mut writer,
1732            )
1733            .map_err(EncodeError::from)?;
1734            writer.zero_pad_to_byte();
1735            let bd = image.bit_depth;
1736            crate::vardct::patches::subtract_patches_modular(&mut image, pd, bd);
1737        }
1738
1739        // Encode frame
1740        let use_tree_learning = cfg.tree_learning;
1741        let frame_encoder = FrameEncoder::new(
1742            w,
1743            h,
1744            FrameEncoderOptions {
1745                use_modular: true,
1746                effort: cfg.effort,
1747                use_ans: cfg.use_ans,
1748                use_tree_learning,
1749                use_squeeze: cfg.squeeze,
1750                enable_lz77: cfg.lz77,
1751                lz77_method: cfg.lz77_method,
1752                lossy_palette: cfg.lossy_palette,
1753                encoder_mode: cfg.mode,
1754                profile: crate::effort::EffortProfile::lossless(cfg.effort, cfg.mode),
1755                have_animation: false,
1756                duration: 0,
1757                is_last: true,
1758                crop: None,
1759                skip_rct: false,
1760            },
1761        );
1762        let color_encoding = if let Some(ce) = self.color_encoding.clone() {
1763            // Explicit color encoding overrides source_gamma and defaults.
1764            // Adjust for grayscale if needed.
1765            if image.is_grayscale && ce.color_space != ColorSpace::Gray {
1766                ColorEncoding {
1767                    color_space: ColorSpace::Gray,
1768                    ..ce
1769                }
1770            } else {
1771                ce
1772            }
1773        } else if let Some(gamma) = self.source_gamma {
1774            if image.is_grayscale {
1775                ColorEncoding::gray_with_gamma(gamma)
1776            } else {
1777                ColorEncoding::with_gamma(gamma)
1778            }
1779        } else if image.is_grayscale {
1780            ColorEncoding::gray()
1781        } else {
1782            ColorEncoding::srgb()
1783        };
1784        frame_encoder
1785            .encode_modular_with_patches(
1786                &image,
1787                &color_encoding,
1788                &mut writer,
1789                patches_data.as_ref(),
1790            )
1791            .map_err(EncodeError::from)?;
1792
1793        let stats = EncodeStats {
1794            mode: EncodeMode::Lossless,
1795            ans: cfg.use_ans,
1796            ..Default::default()
1797        };
1798        Ok((writer.finish_with_padding(), stats))
1799    }
1800
1801    // ── Lossy path ──────────────────────────────────────────────────────
1802
1803    fn encode_lossy(
1804        &self,
1805        cfg: &LossyConfig,
1806        pixels: &[u8],
1807    ) -> core::result::Result<(Vec<u8>, EncodeStats), EncodeError> {
1808        let w = self.width as usize;
1809        let h = self.height as usize;
1810
1811        // Build linear f32 RGB and extract alpha from input layout.
1812        // Grayscale layouts are expanded to RGB (R=G=B) for VarDCT encoding.
1813        // When source_gamma is set, use gamma linearization instead of sRGB TF.
1814        let gamma = self.source_gamma;
1815        let (linear_rgb, alpha, bit_depth_16) = match self.layout {
1816            PixelLayout::Rgb8 => {
1817                let linear = if let Some(g) = gamma {
1818                    gamma_u8_to_linear_f32(pixels, 3, g)
1819                } else {
1820                    srgb_u8_to_linear_f32(pixels, 3)
1821                };
1822                (linear, None, false)
1823            }
1824            PixelLayout::Bgr8 => {
1825                let rgb = bgr_to_rgb(pixels, 3);
1826                let linear = if let Some(g) = gamma {
1827                    gamma_u8_to_linear_f32(&rgb, 3, g)
1828                } else {
1829                    srgb_u8_to_linear_f32(&rgb, 3)
1830                };
1831                (linear, None, false)
1832            }
1833            PixelLayout::Rgba8 => {
1834                let rgb = if let Some(g) = gamma {
1835                    gamma_u8_to_linear_f32(pixels, 4, g)
1836                } else {
1837                    srgb_u8_to_linear_f32(pixels, 4)
1838                };
1839                let alpha = extract_alpha(pixels, 4, 3);
1840                (rgb, Some(alpha), false)
1841            }
1842            PixelLayout::Bgra8 => {
1843                let swapped = bgr_to_rgb(pixels, 4);
1844                let rgb = if let Some(g) = gamma {
1845                    gamma_u8_to_linear_f32(&swapped, 4, g)
1846                } else {
1847                    srgb_u8_to_linear_f32(&swapped, 4)
1848                };
1849                let alpha = extract_alpha(pixels, 4, 3);
1850                (rgb, Some(alpha), false)
1851            }
1852            PixelLayout::Gray8 => {
1853                let rgb = if let Some(g) = gamma {
1854                    gamma_gray_u8_to_linear_f32_rgb(pixels, 1, g)
1855                } else {
1856                    gray_u8_to_linear_f32_rgb(pixels, 1)
1857                };
1858                (rgb, None, false)
1859            }
1860            PixelLayout::GrayAlpha8 => {
1861                let rgb = if let Some(g) = gamma {
1862                    gamma_gray_u8_to_linear_f32_rgb(pixels, 2, g)
1863                } else {
1864                    gray_u8_to_linear_f32_rgb(pixels, 2)
1865                };
1866                let alpha = extract_alpha(pixels, 2, 1);
1867                (rgb, Some(alpha), false)
1868            }
1869            PixelLayout::Rgb16 => {
1870                let linear = if let Some(g) = gamma {
1871                    gamma_u16_to_linear_f32(pixels, 3, g)
1872                } else {
1873                    srgb_u16_to_linear_f32(pixels, 3)
1874                };
1875                (linear, None, true)
1876            }
1877            PixelLayout::Rgba16 => {
1878                let rgb = if let Some(g) = gamma {
1879                    gamma_u16_to_linear_f32(pixels, 4, g)
1880                } else {
1881                    srgb_u16_to_linear_f32(pixels, 4)
1882                };
1883                let alpha = extract_alpha_u16(pixels, 4, 3);
1884                (rgb, Some(alpha), true)
1885            }
1886            PixelLayout::Gray16 => {
1887                let rgb = if let Some(g) = gamma {
1888                    gamma_gray_u16_to_linear_f32_rgb(pixels, 1, g)
1889                } else {
1890                    gray_u16_to_linear_f32_rgb(pixels, 1)
1891                };
1892                (rgb, None, true)
1893            }
1894            PixelLayout::GrayAlpha16 => {
1895                let rgb = if let Some(g) = gamma {
1896                    gamma_gray_u16_to_linear_f32_rgb(pixels, 2, g)
1897                } else {
1898                    gray_u16_to_linear_f32_rgb(pixels, 2)
1899                };
1900                let alpha = extract_alpha_u16(pixels, 2, 1);
1901                (rgb, Some(alpha), true)
1902            }
1903            PixelLayout::RgbLinearF32 => {
1904                let floats: &[f32] = bytemuck::cast_slice(pixels);
1905                (floats.to_vec(), None, false)
1906            }
1907            PixelLayout::RgbaLinearF32 => {
1908                let floats: &[f32] = bytemuck::cast_slice(pixels);
1909                let rgb: Vec<f32> = floats
1910                    .chunks(4)
1911                    .flat_map(|px| [px[0], px[1], px[2]])
1912                    .collect();
1913                let alpha = extract_alpha_f32(floats, 4, 3);
1914                (rgb, Some(alpha), false)
1915            }
1916            PixelLayout::GrayLinearF32 => {
1917                let floats: &[f32] = bytemuck::cast_slice(pixels);
1918                (gray_f32_to_linear_f32_rgb(floats, 1), None, false)
1919            }
1920            PixelLayout::GrayAlphaLinearF32 => {
1921                let floats: &[f32] = bytemuck::cast_slice(pixels);
1922                let rgb = gray_f32_to_linear_f32_rgb(floats, 2);
1923                let alpha = extract_alpha_f32(floats, 2, 1);
1924                (rgb, Some(alpha), false)
1925            }
1926        };
1927
1928        let mut profile = crate::effort::EffortProfile::lossy(cfg.effort, cfg.mode);
1929
1930        // Apply max_strategy_size to profile flags
1931        if let Some(max_size) = cfg.max_strategy_size {
1932            if max_size < 16 {
1933                profile.try_dct16 = false;
1934            }
1935            if max_size < 32 {
1936                profile.try_dct32 = false;
1937            }
1938            if max_size < 64 {
1939                profile.try_dct64 = false;
1940            }
1941        }
1942
1943        let mut enc = crate::vardct::VarDctEncoder::new(cfg.distance);
1944        enc.effort = cfg.effort;
1945        enc.profile = profile;
1946        enc.use_ans = cfg.use_ans;
1947        enc.optimize_codes = enc.profile.optimize_codes;
1948        enc.custom_orders = enc.profile.custom_orders;
1949        enc.ac_strategy_enabled = enc.profile.ac_strategy_enabled;
1950        enc.enable_noise = cfg.noise;
1951        enc.enable_denoise = cfg.denoise;
1952        // libjxl gates gaborish at distance > 0.5 (enc_frame.cc:281)
1953        enc.enable_gaborish = cfg.gaborish && cfg.distance > 0.5;
1954        enc.error_diffusion = cfg.error_diffusion;
1955        enc.pixel_domain_loss = cfg.pixel_domain_loss;
1956        enc.enable_lz77 = cfg.lz77;
1957        enc.lz77_method = cfg.lz77_method;
1958        enc.force_strategy = cfg.force_strategy;
1959        enc.enable_patches = cfg.patches;
1960        enc.encoder_mode = cfg.mode;
1961        enc.splines = cfg.splines.clone();
1962        enc.is_grayscale = self.layout.is_grayscale();
1963        enc.progressive = cfg.progressive;
1964        enc.use_lf_frame = cfg.lf_frame;
1965        #[cfg(feature = "butteraugli-loop")]
1966        {
1967            enc.butteraugli_iters = cfg.butteraugli_iters;
1968        }
1969        #[cfg(feature = "ssim2-loop")]
1970        {
1971            enc.ssim2_iters = cfg.ssim2_iters;
1972        }
1973        #[cfg(feature = "zensim-loop")]
1974        {
1975            enc.zensim_iters = cfg.zensim_iters;
1976        }
1977
1978        enc.bit_depth_16 = bit_depth_16;
1979        enc.source_gamma = self.source_gamma;
1980        enc.color_encoding = self.color_encoding.clone();
1981
1982        // Tone mapping and intrinsic size from metadata
1983        if let Some(meta) = self.metadata {
1984            if let Some(it) = meta.intensity_target {
1985                enc.intensity_target = it;
1986            }
1987            if let Some(mn) = meta.min_nits {
1988                enc.min_nits = mn;
1989            }
1990            if meta.intrinsic_size.is_some() {
1991                enc.intrinsic_size = meta.intrinsic_size;
1992            }
1993        }
1994
1995        // ICC profile from metadata
1996        if let Some(meta) = self.metadata
1997            && let Some(icc) = meta.icc_profile
1998        {
1999            enc.icc_profile = Some(icc.to_vec());
2000        }
2001
2002        let output = enc
2003            .encode(w, h, &linear_rgb, alpha.as_deref())
2004            .map_err(EncodeError::from)?;
2005
2006        #[cfg(feature = "butteraugli-loop")]
2007        let butteraugli_iters_actual = cfg.butteraugli_iters;
2008        #[cfg(not(feature = "butteraugli-loop"))]
2009        let butteraugli_iters_actual = 0u32;
2010
2011        let stats = EncodeStats {
2012            mode: EncodeMode::Lossy,
2013            strategy_counts: output.strategy_counts,
2014            gaborish: cfg.gaborish,
2015            ans: cfg.use_ans,
2016            butteraugli_iters: butteraugli_iters_actual,
2017            pixel_domain_loss: cfg.pixel_domain_loss,
2018            ..Default::default()
2019        };
2020        Ok((output.data, stats))
2021    }
2022}
2023
2024// ── Streaming Encoders ──────────────────────────────────────────────────────
2025
2026/// Streaming lossy (VarDCT) encoder.
2027///
2028/// Accepts pixel rows incrementally via [`push_rows`](Self::push_rows), then
2029/// encodes on [`finish`](Self::finish). This allows callers to free source pixel
2030/// buffers as rows are pushed, rather than materializing the entire image in
2031/// memory before encoding.
2032///
2033/// ```rust,no_run
2034/// use jxl_encoder::{LossyConfig, PixelLayout};
2035///
2036/// let mut enc = LossyConfig::new(1.0)
2037///     .encoder(800, 600, PixelLayout::Rgb8)?;
2038///
2039/// // Push rows from a streaming source (e.g. PNG decoder)
2040/// # let row_bytes = 800 * 3;
2041/// # let source_rows = vec![0u8; row_bytes * 600];
2042/// for chunk in source_rows.chunks(row_bytes * 100) {
2043///     enc.push_rows(chunk, 100)?;
2044/// }
2045///
2046/// let jxl_bytes = enc.finish()?;
2047/// # Ok::<_, jxl_encoder::At<jxl_encoder::EncodeError>>(())
2048/// ```
2049pub struct LossyEncoder {
2050    cfg: LossyConfig,
2051    width: u32,
2052    height: u32,
2053    layout: PixelLayout,
2054    rows_pushed: u32,
2055    linear_rgb: Vec<f32>,
2056    alpha: Option<Vec<u8>>,
2057    bit_depth_16: bool,
2058    icc_profile: Option<Vec<u8>>,
2059    exif: Option<Vec<u8>>,
2060    xmp: Option<Vec<u8>>,
2061    source_gamma: Option<f32>,
2062    color_encoding: Option<crate::headers::color_encoding::ColorEncoding>,
2063    intensity_target: f32,
2064    min_nits: f32,
2065    intrinsic_size: Option<(u32, u32)>,
2066}
2067
2068impl LossyEncoder {
2069    /// Attach an ICC color profile.
2070    pub fn with_icc_profile(mut self, data: &[u8]) -> Self {
2071        self.icc_profile = Some(data.to_vec());
2072        self
2073    }
2074
2075    /// Attach EXIF data.
2076    pub fn with_exif(mut self, data: &[u8]) -> Self {
2077        self.exif = Some(data.to_vec());
2078        self
2079    }
2080
2081    /// Attach XMP data.
2082    pub fn with_xmp(mut self, data: &[u8]) -> Self {
2083        self.xmp = Some(data.to_vec());
2084        self
2085    }
2086
2087    /// Specify that source pixels use a custom gamma transfer function.
2088    pub fn with_source_gamma(mut self, gamma: f32) -> Self {
2089        self.source_gamma = Some(gamma);
2090        self
2091    }
2092
2093    /// Override the color encoding written to the JXL header.
2094    pub fn with_color_encoding(
2095        mut self,
2096        ce: crate::headers::color_encoding::ColorEncoding,
2097    ) -> Self {
2098        self.color_encoding = Some(ce);
2099        self
2100    }
2101
2102    /// Set the peak display luminance in nits for HDR content.
2103    pub fn with_intensity_target(mut self, nits: f32) -> Self {
2104        self.intensity_target = nits;
2105        self
2106    }
2107
2108    /// Set the minimum display luminance in nits.
2109    pub fn with_min_nits(mut self, nits: f32) -> Self {
2110        self.min_nits = nits;
2111        self
2112    }
2113
2114    /// Set the intrinsic display size.
2115    pub fn with_intrinsic_size(mut self, width: u32, height: u32) -> Self {
2116        self.intrinsic_size = Some((width, height));
2117        self
2118    }
2119
2120    /// Number of rows pushed so far.
2121    pub fn rows_pushed(&self) -> u32 {
2122        self.rows_pushed
2123    }
2124
2125    /// Total expected height.
2126    pub fn height(&self) -> u32 {
2127        self.height
2128    }
2129
2130    /// Push pixel rows into the encoder.
2131    ///
2132    /// `pixels` must contain exactly `width * num_rows * bytes_per_pixel` bytes.
2133    /// Rows are converted to the internal linear f32 format immediately, so the
2134    /// caller can free the source buffer after this call returns.
2135    #[track_caller]
2136    pub fn push_rows(&mut self, pixels: &[u8], num_rows: u32) -> Result<()> {
2137        self.push_rows_inner(pixels, num_rows).map_err(at)
2138    }
2139
2140    fn push_rows_inner(
2141        &mut self,
2142        pixels: &[u8],
2143        num_rows: u32,
2144    ) -> core::result::Result<(), EncodeError> {
2145        if num_rows == 0 {
2146            return Ok(());
2147        }
2148        let remaining = self.height - self.rows_pushed;
2149        if num_rows > remaining {
2150            return Err(EncodeError::InvalidInput {
2151                message: format!(
2152                    "push_rows: {num_rows} rows would exceed image height \
2153                     ({} pushed + {num_rows} > {})",
2154                    self.rows_pushed, self.height
2155                ),
2156            });
2157        }
2158        let w = self.width as usize;
2159        let n = num_rows as usize;
2160        let expected = w
2161            .checked_mul(n)
2162            .and_then(|wn| wn.checked_mul(self.layout.bytes_per_pixel()));
2163        match expected {
2164            Some(expected) if pixels.len() == expected => {}
2165            Some(expected) => {
2166                return Err(EncodeError::InvalidInput {
2167                    message: format!(
2168                        "push_rows: expected {expected} bytes for {w}x{n} {:?}, got {}",
2169                        self.layout,
2170                        pixels.len()
2171                    ),
2172                });
2173            }
2174            None => {
2175                return Err(EncodeError::InvalidInput {
2176                    message: "push_rows: row dimensions overflow".into(),
2177                });
2178            }
2179        }
2180
2181        let gamma = self.source_gamma;
2182
2183        // Convert and append linear RGB
2184        let new_linear: Vec<f32> = match self.layout {
2185            PixelLayout::Rgb8 => {
2186                if let Some(g) = gamma {
2187                    gamma_u8_to_linear_f32(pixels, 3, g)
2188                } else {
2189                    srgb_u8_to_linear_f32(pixels, 3)
2190                }
2191            }
2192            PixelLayout::Bgr8 => {
2193                let rgb = bgr_to_rgb(pixels, 3);
2194                if let Some(g) = gamma {
2195                    gamma_u8_to_linear_f32(&rgb, 3, g)
2196                } else {
2197                    srgb_u8_to_linear_f32(&rgb, 3)
2198                }
2199            }
2200            PixelLayout::Rgba8 => {
2201                if let Some(g) = gamma {
2202                    gamma_u8_to_linear_f32(pixels, 4, g)
2203                } else {
2204                    srgb_u8_to_linear_f32(pixels, 4)
2205                }
2206            }
2207            PixelLayout::Bgra8 => {
2208                let swapped = bgr_to_rgb(pixels, 4);
2209                if let Some(g) = gamma {
2210                    gamma_u8_to_linear_f32(&swapped, 4, g)
2211                } else {
2212                    srgb_u8_to_linear_f32(&swapped, 4)
2213                }
2214            }
2215            PixelLayout::Gray8 => {
2216                if let Some(g) = gamma {
2217                    gamma_gray_u8_to_linear_f32_rgb(pixels, 1, g)
2218                } else {
2219                    gray_u8_to_linear_f32_rgb(pixels, 1)
2220                }
2221            }
2222            PixelLayout::GrayAlpha8 => {
2223                if let Some(g) = gamma {
2224                    gamma_gray_u8_to_linear_f32_rgb(pixels, 2, g)
2225                } else {
2226                    gray_u8_to_linear_f32_rgb(pixels, 2)
2227                }
2228            }
2229            PixelLayout::Rgb16 => {
2230                if let Some(g) = gamma {
2231                    gamma_u16_to_linear_f32(pixels, 3, g)
2232                } else {
2233                    srgb_u16_to_linear_f32(pixels, 3)
2234                }
2235            }
2236            PixelLayout::Rgba16 => {
2237                if let Some(g) = gamma {
2238                    gamma_u16_to_linear_f32(pixels, 4, g)
2239                } else {
2240                    srgb_u16_to_linear_f32(pixels, 4)
2241                }
2242            }
2243            PixelLayout::Gray16 => {
2244                if let Some(g) = gamma {
2245                    gamma_gray_u16_to_linear_f32_rgb(pixels, 1, g)
2246                } else {
2247                    gray_u16_to_linear_f32_rgb(pixels, 1)
2248                }
2249            }
2250            PixelLayout::GrayAlpha16 => {
2251                if let Some(g) = gamma {
2252                    gamma_gray_u16_to_linear_f32_rgb(pixels, 2, g)
2253                } else {
2254                    gray_u16_to_linear_f32_rgb(pixels, 2)
2255                }
2256            }
2257            PixelLayout::RgbLinearF32 => {
2258                let floats: &[f32] = bytemuck::cast_slice(pixels);
2259                floats.to_vec()
2260            }
2261            PixelLayout::RgbaLinearF32 => {
2262                let floats: &[f32] = bytemuck::cast_slice(pixels);
2263                floats
2264                    .chunks(4)
2265                    .flat_map(|px| [px[0], px[1], px[2]])
2266                    .collect()
2267            }
2268            PixelLayout::GrayLinearF32 => {
2269                let floats: &[f32] = bytemuck::cast_slice(pixels);
2270                gray_f32_to_linear_f32_rgb(floats, 1)
2271            }
2272            PixelLayout::GrayAlphaLinearF32 => {
2273                let floats: &[f32] = bytemuck::cast_slice(pixels);
2274                gray_f32_to_linear_f32_rgb(floats, 2)
2275            }
2276        };
2277        self.linear_rgb.extend_from_slice(&new_linear);
2278
2279        // Extract and append alpha
2280        match self.layout {
2281            PixelLayout::Rgba8 | PixelLayout::Bgra8 => {
2282                let new_alpha = extract_alpha(pixels, 4, 3);
2283                self.alpha
2284                    .get_or_insert_with(Vec::new)
2285                    .extend_from_slice(&new_alpha);
2286            }
2287            PixelLayout::GrayAlpha8 => {
2288                let new_alpha = extract_alpha(pixels, 2, 1);
2289                self.alpha
2290                    .get_or_insert_with(Vec::new)
2291                    .extend_from_slice(&new_alpha);
2292            }
2293            PixelLayout::Rgba16 => {
2294                let new_alpha = extract_alpha_u16(pixels, 4, 3);
2295                self.alpha
2296                    .get_or_insert_with(Vec::new)
2297                    .extend_from_slice(&new_alpha);
2298            }
2299            PixelLayout::GrayAlpha16 => {
2300                let new_alpha = extract_alpha_u16(pixels, 2, 1);
2301                self.alpha
2302                    .get_or_insert_with(Vec::new)
2303                    .extend_from_slice(&new_alpha);
2304            }
2305            PixelLayout::RgbaLinearF32 => {
2306                let floats: &[f32] = bytemuck::cast_slice(pixels);
2307                let new_alpha = extract_alpha_f32(floats, 4, 3);
2308                self.alpha
2309                    .get_or_insert_with(Vec::new)
2310                    .extend_from_slice(&new_alpha);
2311            }
2312            PixelLayout::GrayAlphaLinearF32 => {
2313                let floats: &[f32] = bytemuck::cast_slice(pixels);
2314                let new_alpha = extract_alpha_f32(floats, 2, 1);
2315                self.alpha
2316                    .get_or_insert_with(Vec::new)
2317                    .extend_from_slice(&new_alpha);
2318            }
2319            _ => {}
2320        }
2321
2322        self.rows_pushed += num_rows;
2323        Ok(())
2324    }
2325
2326    /// Encode the accumulated pixels and return the JXL bytes.
2327    ///
2328    /// All rows must have been pushed via [`push_rows`](Self::push_rows) before
2329    /// calling this. Returns an error if the image is incomplete.
2330    #[track_caller]
2331    pub fn finish(self) -> Result<Vec<u8>> {
2332        self.finish_inner()
2333            .map(|mut r| r.take_data().unwrap())
2334            .map_err(at)
2335    }
2336
2337    /// Encode and return JXL bytes together with [`EncodeStats`].
2338    #[track_caller]
2339    pub fn finish_with_stats(self) -> Result<EncodeResult> {
2340        self.finish_inner().map_err(at)
2341    }
2342
2343    /// Encode, appending to an existing buffer.
2344    #[track_caller]
2345    pub fn finish_into(self, out: &mut Vec<u8>) -> Result<EncodeResult> {
2346        let mut result = self.finish_inner().map_err(at)?;
2347        if let Some(data) = result.data.take() {
2348            out.extend_from_slice(&data);
2349        }
2350        Ok(result)
2351    }
2352
2353    /// Encode, writing to a `std::io::Write` destination.
2354    #[cfg(feature = "std")]
2355    #[track_caller]
2356    pub fn finish_to(self, mut dest: impl std::io::Write) -> Result<EncodeResult> {
2357        let mut result = self.finish_inner().map_err(at)?;
2358        if let Some(data) = result.data.take() {
2359            dest.write_all(&data)
2360                .map_err(|e| at(EncodeError::from(e)))?;
2361        }
2362        Ok(result)
2363    }
2364
2365    fn finish_inner(self) -> core::result::Result<EncodeResult, EncodeError> {
2366        if self.rows_pushed != self.height {
2367            return Err(EncodeError::InvalidInput {
2368                message: format!(
2369                    "incomplete image: {} of {} rows pushed",
2370                    self.rows_pushed, self.height
2371                ),
2372            });
2373        }
2374
2375        let cfg = &self.cfg;
2376        let w = self.width as usize;
2377        let h = self.height as usize;
2378        let linear_rgb = self.linear_rgb;
2379        let alpha = self.alpha;
2380
2381        let (codestream, mut stats) = run_with_threads(cfg.threads, || {
2382            let mut profile = crate::effort::EffortProfile::lossy(cfg.effort, cfg.mode);
2383            if let Some(max_size) = cfg.max_strategy_size {
2384                if max_size < 16 {
2385                    profile.try_dct16 = false;
2386                }
2387                if max_size < 32 {
2388                    profile.try_dct32 = false;
2389                }
2390                if max_size < 64 {
2391                    profile.try_dct64 = false;
2392                }
2393            }
2394
2395            let mut enc = crate::vardct::VarDctEncoder::new(cfg.distance);
2396            enc.effort = cfg.effort;
2397            enc.profile = profile;
2398            enc.use_ans = cfg.use_ans;
2399            enc.optimize_codes = enc.profile.optimize_codes;
2400            enc.custom_orders = enc.profile.custom_orders;
2401            enc.ac_strategy_enabled = enc.profile.ac_strategy_enabled;
2402            enc.enable_noise = cfg.noise;
2403            enc.enable_denoise = cfg.denoise;
2404            enc.enable_gaborish = cfg.gaborish && cfg.distance > 0.5;
2405            enc.error_diffusion = cfg.error_diffusion;
2406            enc.pixel_domain_loss = cfg.pixel_domain_loss;
2407            enc.enable_lz77 = cfg.lz77;
2408            enc.lz77_method = cfg.lz77_method;
2409            enc.force_strategy = cfg.force_strategy;
2410            enc.enable_patches = cfg.patches;
2411            enc.encoder_mode = cfg.mode;
2412            enc.splines = cfg.splines.clone();
2413            enc.is_grayscale = self.layout.is_grayscale();
2414            enc.progressive = cfg.progressive;
2415            enc.use_lf_frame = cfg.lf_frame;
2416            #[cfg(feature = "butteraugli-loop")]
2417            {
2418                enc.butteraugli_iters = cfg.butteraugli_iters;
2419            }
2420            enc.bit_depth_16 = self.bit_depth_16;
2421            enc.source_gamma = self.source_gamma;
2422            enc.color_encoding = self.color_encoding.clone();
2423            enc.intensity_target = self.intensity_target;
2424            enc.min_nits = self.min_nits;
2425            enc.intrinsic_size = self.intrinsic_size;
2426            if let Some(ref icc) = self.icc_profile {
2427                enc.icc_profile = Some(icc.clone());
2428            }
2429
2430            let output = enc
2431                .encode(w, h, &linear_rgb, alpha.as_deref())
2432                .map_err(EncodeError::from)?;
2433
2434            #[cfg(feature = "butteraugli-loop")]
2435            let butteraugli_iters_actual = cfg.butteraugli_iters;
2436            #[cfg(not(feature = "butteraugli-loop"))]
2437            let butteraugli_iters_actual = 0u32;
2438
2439            let stats = EncodeStats {
2440                mode: EncodeMode::Lossy,
2441                strategy_counts: output.strategy_counts,
2442                gaborish: cfg.gaborish,
2443                ans: cfg.use_ans,
2444                butteraugli_iters: butteraugli_iters_actual,
2445                pixel_domain_loss: cfg.pixel_domain_loss,
2446                ..Default::default()
2447            };
2448            Ok::<_, EncodeError>((output.data, stats))
2449        })?;
2450
2451        stats.codestream_size = codestream.len();
2452
2453        let output = if self.exif.is_some() || self.xmp.is_some() {
2454            crate::container::wrap_in_container(
2455                &codestream,
2456                self.exif.as_deref(),
2457                self.xmp.as_deref(),
2458            )
2459        } else {
2460            codestream
2461        };
2462
2463        stats.output_size = output.len();
2464        Ok(EncodeResult {
2465            data: Some(output),
2466            stats,
2467        })
2468    }
2469}
2470
2471impl LossyConfig {
2472    /// Create a streaming encoder for incremental row input.
2473    ///
2474    /// Pixels are converted to the internal format as rows are pushed via
2475    /// [`LossyEncoder::push_rows`], allowing callers to free source buffers
2476    /// incrementally rather than materializing the entire image.
2477    #[track_caller]
2478    pub fn encoder(&self, width: u32, height: u32, layout: PixelLayout) -> Result<LossyEncoder> {
2479        if width == 0 || height == 0 {
2480            return Err(at(EncodeError::InvalidInput {
2481                message: format!("zero dimensions: {width}x{height}"),
2482            }));
2483        }
2484        let w = width as usize;
2485        let h = height as usize;
2486        let rgb_capacity = w.checked_mul(h).and_then(|n| n.checked_mul(3));
2487        let Some(rgb_capacity) = rgb_capacity else {
2488            return Err(at(EncodeError::InvalidInput {
2489                message: "image dimensions overflow".into(),
2490            }));
2491        };
2492
2493        let bit_depth_16 = layout.is_16bit();
2494        let has_alpha = layout.has_alpha();
2495        let alpha = if has_alpha {
2496            let mut v = Vec::new();
2497            v.try_reserve(w * h)
2498                .map_err(|e| at(EncodeError::from(crate::error::Error::from(e))))?;
2499            Some(v)
2500        } else {
2501            None
2502        };
2503
2504        let mut linear_rgb = Vec::new();
2505        linear_rgb
2506            .try_reserve(rgb_capacity)
2507            .map_err(|e| at(EncodeError::from(crate::error::Error::from(e))))?;
2508
2509        Ok(LossyEncoder {
2510            cfg: self.clone(),
2511            width,
2512            height,
2513            layout,
2514            rows_pushed: 0,
2515            linear_rgb,
2516            alpha,
2517            bit_depth_16,
2518            icc_profile: None,
2519            exif: None,
2520            xmp: None,
2521            source_gamma: None,
2522            color_encoding: None,
2523            intensity_target: 255.0,
2524            min_nits: 0.0,
2525            intrinsic_size: None,
2526        })
2527    }
2528}
2529
2530/// Streaming lossless (modular) encoder.
2531///
2532/// Accepts pixel rows incrementally via [`push_rows`](Self::push_rows), then
2533/// encodes on [`finish`](Self::finish). This allows callers to free source pixel
2534/// buffers as rows are pushed, rather than materializing the entire image in
2535/// memory before encoding.
2536///
2537/// ```rust,no_run
2538/// use jxl_encoder::{LosslessConfig, PixelLayout};
2539///
2540/// let mut enc = LosslessConfig::new()
2541///     .encoder(800, 600, PixelLayout::Rgb8)?;
2542///
2543/// # let row_bytes = 800 * 3;
2544/// # let source_rows = vec![0u8; row_bytes * 600];
2545/// for chunk in source_rows.chunks(row_bytes * 100) {
2546///     enc.push_rows(chunk, 100)?;
2547/// }
2548///
2549/// let jxl_bytes = enc.finish()?;
2550/// # Ok::<_, jxl_encoder::At<jxl_encoder::EncodeError>>(())
2551/// ```
2552pub struct LosslessEncoder {
2553    cfg: LosslessConfig,
2554    width: u32,
2555    height: u32,
2556    layout: PixelLayout,
2557    rows_pushed: u32,
2558    channels: Vec<crate::modular::channel::Channel>,
2559    num_source_channels: usize,
2560    bit_depth: u32,
2561    is_grayscale: bool,
2562    has_alpha: bool,
2563    icc_profile: Option<Vec<u8>>,
2564    exif: Option<Vec<u8>>,
2565    xmp: Option<Vec<u8>>,
2566    source_gamma: Option<f32>,
2567    color_encoding: Option<crate::headers::color_encoding::ColorEncoding>,
2568    intensity_target: f32,
2569    min_nits: f32,
2570    intrinsic_size: Option<(u32, u32)>,
2571}
2572
2573impl LosslessEncoder {
2574    /// Attach an ICC color profile.
2575    pub fn with_icc_profile(mut self, data: &[u8]) -> Self {
2576        self.icc_profile = Some(data.to_vec());
2577        self
2578    }
2579
2580    /// Attach EXIF data.
2581    pub fn with_exif(mut self, data: &[u8]) -> Self {
2582        self.exif = Some(data.to_vec());
2583        self
2584    }
2585
2586    /// Attach XMP data.
2587    pub fn with_xmp(mut self, data: &[u8]) -> Self {
2588        self.xmp = Some(data.to_vec());
2589        self
2590    }
2591
2592    /// Specify that source pixels use a custom gamma transfer function.
2593    pub fn with_source_gamma(mut self, gamma: f32) -> Self {
2594        self.source_gamma = Some(gamma);
2595        self
2596    }
2597
2598    /// Override the color encoding written to the JXL header.
2599    pub fn with_color_encoding(
2600        mut self,
2601        ce: crate::headers::color_encoding::ColorEncoding,
2602    ) -> Self {
2603        self.color_encoding = Some(ce);
2604        self
2605    }
2606
2607    /// Set the peak display luminance in nits for HDR content.
2608    pub fn with_intensity_target(mut self, nits: f32) -> Self {
2609        self.intensity_target = nits;
2610        self
2611    }
2612
2613    /// Set the minimum display luminance in nits.
2614    pub fn with_min_nits(mut self, nits: f32) -> Self {
2615        self.min_nits = nits;
2616        self
2617    }
2618
2619    /// Set the intrinsic display size.
2620    pub fn with_intrinsic_size(mut self, width: u32, height: u32) -> Self {
2621        self.intrinsic_size = Some((width, height));
2622        self
2623    }
2624
2625    /// Number of rows pushed so far.
2626    pub fn rows_pushed(&self) -> u32 {
2627        self.rows_pushed
2628    }
2629
2630    /// Total expected height.
2631    pub fn height(&self) -> u32 {
2632        self.height
2633    }
2634
2635    /// Push pixel rows into the encoder.
2636    ///
2637    /// `pixels` must contain exactly `width * num_rows * bytes_per_pixel` bytes.
2638    /// Rows are deinterleaved into per-channel planes immediately, so the caller
2639    /// can free the source buffer after this call returns.
2640    #[track_caller]
2641    pub fn push_rows(&mut self, pixels: &[u8], num_rows: u32) -> Result<()> {
2642        self.push_rows_inner(pixels, num_rows).map_err(at)
2643    }
2644
2645    fn push_rows_inner(
2646        &mut self,
2647        pixels: &[u8],
2648        num_rows: u32,
2649    ) -> core::result::Result<(), EncodeError> {
2650        if num_rows == 0 {
2651            return Ok(());
2652        }
2653        let remaining = self.height - self.rows_pushed;
2654        if num_rows > remaining {
2655            return Err(EncodeError::InvalidInput {
2656                message: format!(
2657                    "push_rows: {num_rows} rows would exceed image height \
2658                     ({} pushed + {num_rows} > {})",
2659                    self.rows_pushed, self.height
2660                ),
2661            });
2662        }
2663        let w = self.width as usize;
2664        let n = num_rows as usize;
2665        let bpp = self.layout.bytes_per_pixel();
2666        let expected = w.checked_mul(n).and_then(|wn| wn.checked_mul(bpp));
2667        match expected {
2668            Some(expected) if pixels.len() == expected => {}
2669            Some(expected) => {
2670                return Err(EncodeError::InvalidInput {
2671                    message: format!(
2672                        "push_rows: expected {expected} bytes for {w}x{n} {:?}, got {}",
2673                        self.layout,
2674                        pixels.len()
2675                    ),
2676                });
2677            }
2678            None => {
2679                return Err(EncodeError::InvalidInput {
2680                    message: "push_rows: row dimensions overflow".into(),
2681                });
2682            }
2683        }
2684
2685        let y_start = self.rows_pushed as usize;
2686        let nc = self.num_source_channels;
2687
2688        match self.layout {
2689            PixelLayout::Rgb8 | PixelLayout::Bgr8 => {
2690                let is_bgr = matches!(self.layout, PixelLayout::Bgr8);
2691                for y in 0..n {
2692                    let row_offset = y * w * 3;
2693                    let dst_y = y_start + y;
2694                    for x in 0..w {
2695                        let src = row_offset + x * 3;
2696                        let (r, g, b) = if is_bgr {
2697                            (pixels[src + 2], pixels[src + 1], pixels[src])
2698                        } else {
2699                            (pixels[src], pixels[src + 1], pixels[src + 2])
2700                        };
2701                        self.channels[0].set(x, dst_y, r as i32);
2702                        self.channels[1].set(x, dst_y, g as i32);
2703                        self.channels[2].set(x, dst_y, b as i32);
2704                    }
2705                }
2706            }
2707            PixelLayout::Rgba8 | PixelLayout::Bgra8 => {
2708                let is_bgr = matches!(self.layout, PixelLayout::Bgra8);
2709                for y in 0..n {
2710                    let row_offset = y * w * 4;
2711                    let dst_y = y_start + y;
2712                    for x in 0..w {
2713                        let src = row_offset + x * 4;
2714                        let (r, g, b) = if is_bgr {
2715                            (pixels[src + 2], pixels[src + 1], pixels[src])
2716                        } else {
2717                            (pixels[src], pixels[src + 1], pixels[src + 2])
2718                        };
2719                        self.channels[0].set(x, dst_y, r as i32);
2720                        self.channels[1].set(x, dst_y, g as i32);
2721                        self.channels[2].set(x, dst_y, b as i32);
2722                        self.channels[3].set(x, dst_y, pixels[src + 3] as i32);
2723                    }
2724                }
2725            }
2726            PixelLayout::Gray8 => {
2727                for y in 0..n {
2728                    let row_offset = y * w;
2729                    let dst_y = y_start + y;
2730                    for x in 0..w {
2731                        self.channels[0].set(x, dst_y, pixels[row_offset + x] as i32);
2732                    }
2733                }
2734            }
2735            PixelLayout::GrayAlpha8 => {
2736                for y in 0..n {
2737                    let row_offset = y * w * 2;
2738                    let dst_y = y_start + y;
2739                    for x in 0..w {
2740                        let src = row_offset + x * 2;
2741                        self.channels[0].set(x, dst_y, pixels[src] as i32);
2742                        self.channels[1].set(x, dst_y, pixels[src + 1] as i32);
2743                    }
2744                }
2745            }
2746            PixelLayout::Rgb16
2747            | PixelLayout::Rgba16
2748            | PixelLayout::Gray16
2749            | PixelLayout::GrayAlpha16 => {
2750                let pixels_u16: &[u16] = bytemuck::cast_slice(pixels);
2751                for y in 0..n {
2752                    let row_offset = y * w * nc;
2753                    let dst_y = y_start + y;
2754                    for x in 0..w {
2755                        let src = row_offset + x * nc;
2756                        for c in 0..nc {
2757                            self.channels[c].set(x, dst_y, pixels_u16[src + c] as i32);
2758                        }
2759                    }
2760                }
2761            }
2762            _ => {
2763                return Err(EncodeError::UnsupportedPixelLayout(self.layout));
2764            }
2765        }
2766
2767        self.rows_pushed += num_rows;
2768        Ok(())
2769    }
2770
2771    /// Encode the accumulated pixels and return the JXL bytes.
2772    ///
2773    /// All rows must have been pushed via [`push_rows`](Self::push_rows) before
2774    /// calling this. Returns an error if the image is incomplete.
2775    #[track_caller]
2776    pub fn finish(self) -> Result<Vec<u8>> {
2777        self.finish_inner()
2778            .map(|mut r| r.take_data().unwrap())
2779            .map_err(at)
2780    }
2781
2782    /// Encode and return JXL bytes together with [`EncodeStats`].
2783    #[track_caller]
2784    pub fn finish_with_stats(self) -> Result<EncodeResult> {
2785        self.finish_inner().map_err(at)
2786    }
2787
2788    /// Encode, appending to an existing buffer.
2789    #[track_caller]
2790    pub fn finish_into(self, out: &mut Vec<u8>) -> Result<EncodeResult> {
2791        let mut result = self.finish_inner().map_err(at)?;
2792        if let Some(data) = result.data.take() {
2793            out.extend_from_slice(&data);
2794        }
2795        Ok(result)
2796    }
2797
2798    /// Encode, writing to a `std::io::Write` destination.
2799    #[cfg(feature = "std")]
2800    #[track_caller]
2801    pub fn finish_to(self, mut dest: impl std::io::Write) -> Result<EncodeResult> {
2802        let mut result = self.finish_inner().map_err(at)?;
2803        if let Some(data) = result.data.take() {
2804            dest.write_all(&data)
2805                .map_err(|e| at(EncodeError::from(e)))?;
2806        }
2807        Ok(result)
2808    }
2809
2810    fn finish_inner(self) -> core::result::Result<EncodeResult, EncodeError> {
2811        use crate::bit_writer::BitWriter;
2812        use crate::headers::color_encoding::ColorSpace;
2813        use crate::headers::{ColorEncoding, FileHeader};
2814        use crate::modular::channel::ModularImage;
2815        use crate::modular::frame::{FrameEncoder, FrameEncoderOptions};
2816
2817        if self.rows_pushed != self.height {
2818            return Err(EncodeError::InvalidInput {
2819                message: format!(
2820                    "incomplete image: {} of {} rows pushed",
2821                    self.rows_pushed, self.height
2822                ),
2823            });
2824        }
2825
2826        let cfg = &self.cfg;
2827        let w = self.width as usize;
2828        let h = self.height as usize;
2829
2830        let mut image = ModularImage {
2831            channels: self.channels,
2832            bit_depth: self.bit_depth,
2833            is_grayscale: self.is_grayscale,
2834            has_alpha: self.has_alpha,
2835        };
2836
2837        let (codestream, mut stats) = run_with_threads(cfg.threads, || {
2838            // Reconstruct interleaved pixels for patch detection (8-bit RGB only)
2839            let num_channels = self.layout.bytes_per_pixel();
2840            let can_use_patches =
2841                cfg.patches && !image.is_grayscale && image.bit_depth <= 8 && num_channels >= 3;
2842            let patches_data = if can_use_patches {
2843                let mut detection_pixels = vec![0u8; w * h * num_channels];
2844                let nc = core::cmp::min(num_channels, image.channels.len());
2845                for y in 0..h {
2846                    for x in 0..w {
2847                        for c in 0..nc {
2848                            detection_pixels[(y * w + x) * num_channels + c] =
2849                                image.channels[c].get(x, y) as u8;
2850                        }
2851                        // Fill remaining channels (alpha) from the image
2852                        for c in nc..num_channels {
2853                            if c < image.channels.len() {
2854                                detection_pixels[(y * w + x) * num_channels + c] =
2855                                    image.channels[c].get(x, y) as u8;
2856                            }
2857                        }
2858                    }
2859                }
2860                crate::vardct::patches::find_and_build_lossless(
2861                    &detection_pixels,
2862                    w,
2863                    h,
2864                    num_channels,
2865                    image.bit_depth,
2866                )
2867            } else {
2868                None
2869            };
2870
2871            // Build file header
2872            let mut file_header = if image.is_grayscale {
2873                FileHeader::new_gray(self.width, self.height)
2874            } else if image.has_alpha {
2875                FileHeader::new_rgba(self.width, self.height)
2876            } else {
2877                FileHeader::new_rgb(self.width, self.height)
2878            };
2879            if image.bit_depth == 16 {
2880                file_header.metadata.bit_depth = crate::headers::file_header::BitDepth::uint16();
2881                for ec in &mut file_header.metadata.extra_channels {
2882                    ec.bit_depth = crate::headers::file_header::BitDepth::uint16();
2883                }
2884            }
2885            if self.icc_profile.is_some() {
2886                file_header.metadata.color_encoding.want_icc = true;
2887            }
2888            file_header.metadata.intensity_target = self.intensity_target;
2889            file_header.metadata.min_nits = self.min_nits;
2890            if let Some((w, h)) = self.intrinsic_size {
2891                file_header.metadata.have_intrinsic_size = true;
2892                file_header.metadata.intrinsic_width = w;
2893                file_header.metadata.intrinsic_height = h;
2894            }
2895
2896            let mut writer = BitWriter::new();
2897            file_header.write(&mut writer).map_err(EncodeError::from)?;
2898            if let Some(ref icc) = self.icc_profile {
2899                crate::icc::write_icc(icc, &mut writer).map_err(EncodeError::from)?;
2900            }
2901            writer.zero_pad_to_byte();
2902
2903            // Write reference frame and subtract patches
2904            if let Some(ref pd) = patches_data {
2905                let lossless_profile = crate::effort::EffortProfile::lossless(cfg.effort, cfg.mode);
2906                crate::vardct::patches::encode_reference_frame_rgb(
2907                    pd,
2908                    image.bit_depth,
2909                    cfg.use_ans,
2910                    lossless_profile.patch_ref_tree_learning,
2911                    &mut writer,
2912                )
2913                .map_err(EncodeError::from)?;
2914                writer.zero_pad_to_byte();
2915                let bd = image.bit_depth;
2916                crate::vardct::patches::subtract_patches_modular(&mut image, pd, bd);
2917            }
2918
2919            // Encode frame
2920            let frame_encoder = FrameEncoder::new(
2921                w,
2922                h,
2923                FrameEncoderOptions {
2924                    use_modular: true,
2925                    effort: cfg.effort,
2926                    use_ans: cfg.use_ans,
2927                    use_tree_learning: cfg.tree_learning,
2928                    use_squeeze: cfg.squeeze,
2929                    enable_lz77: cfg.lz77,
2930                    lz77_method: cfg.lz77_method,
2931                    lossy_palette: cfg.lossy_palette,
2932                    encoder_mode: cfg.mode,
2933                    profile: crate::effort::EffortProfile::lossless(cfg.effort, cfg.mode),
2934                    have_animation: false,
2935                    duration: 0,
2936                    is_last: true,
2937                    crop: None,
2938                    skip_rct: false,
2939                },
2940            );
2941            let color_encoding = if let Some(ce) = self.color_encoding.clone() {
2942                if image.is_grayscale && ce.color_space != ColorSpace::Gray {
2943                    ColorEncoding {
2944                        color_space: ColorSpace::Gray,
2945                        ..ce
2946                    }
2947                } else {
2948                    ce
2949                }
2950            } else if let Some(gamma) = self.source_gamma {
2951                if image.is_grayscale {
2952                    ColorEncoding::gray_with_gamma(gamma)
2953                } else {
2954                    ColorEncoding::with_gamma(gamma)
2955                }
2956            } else if image.is_grayscale {
2957                ColorEncoding::gray()
2958            } else {
2959                ColorEncoding::srgb()
2960            };
2961            frame_encoder
2962                .encode_modular_with_patches(
2963                    &image,
2964                    &color_encoding,
2965                    &mut writer,
2966                    patches_data.as_ref(),
2967                )
2968                .map_err(EncodeError::from)?;
2969
2970            let stats = EncodeStats {
2971                mode: EncodeMode::Lossless,
2972                ans: cfg.use_ans,
2973                ..Default::default()
2974            };
2975            Ok::<_, EncodeError>((writer.finish_with_padding(), stats))
2976        })?;
2977
2978        stats.codestream_size = codestream.len();
2979
2980        let output = if self.exif.is_some() || self.xmp.is_some() {
2981            crate::container::wrap_in_container(
2982                &codestream,
2983                self.exif.as_deref(),
2984                self.xmp.as_deref(),
2985            )
2986        } else {
2987            codestream
2988        };
2989
2990        stats.output_size = output.len();
2991        Ok(EncodeResult {
2992            data: Some(output),
2993            stats,
2994        })
2995    }
2996}
2997
2998impl LosslessConfig {
2999    /// Create a streaming encoder for incremental row input.
3000    ///
3001    /// Per-channel planes are pre-allocated and filled as rows are pushed via
3002    /// [`LosslessEncoder::push_rows`], allowing callers to free source buffers
3003    /// incrementally rather than materializing the entire image.
3004    #[track_caller]
3005    pub fn encoder(&self, width: u32, height: u32, layout: PixelLayout) -> Result<LosslessEncoder> {
3006        use crate::modular::channel::Channel;
3007
3008        if width == 0 || height == 0 {
3009            return Err(at(EncodeError::InvalidInput {
3010                message: format!("zero dimensions: {width}x{height}"),
3011            }));
3012        }
3013
3014        let w = width as usize;
3015        let h = height as usize;
3016
3017        let (num_channels, bit_depth, is_grayscale, has_alpha) = match layout {
3018            PixelLayout::Rgb8 | PixelLayout::Bgr8 => (3, 8u32, false, false),
3019            PixelLayout::Rgba8 | PixelLayout::Bgra8 => (4, 8, false, true),
3020            PixelLayout::Gray8 => (1, 8, true, false),
3021            PixelLayout::GrayAlpha8 => (2, 8, true, true),
3022            PixelLayout::Rgb16 => (3, 16, false, false),
3023            PixelLayout::Rgba16 => (4, 16, false, true),
3024            PixelLayout::Gray16 => (1, 16, true, false),
3025            PixelLayout::GrayAlpha16 => (2, 16, true, true),
3026            other => return Err(at(EncodeError::UnsupportedPixelLayout(other))),
3027        };
3028
3029        let mut channels = Vec::with_capacity(num_channels);
3030        for _ in 0..num_channels {
3031            channels.push(Channel::new(w, h).map_err(|e| at(EncodeError::from(e)))?);
3032        }
3033
3034        Ok(LosslessEncoder {
3035            cfg: self.clone(),
3036            width,
3037            height,
3038            layout,
3039            rows_pushed: 0,
3040            channels,
3041            num_source_channels: num_channels,
3042            bit_depth,
3043            is_grayscale,
3044            has_alpha,
3045            icc_profile: None,
3046            exif: None,
3047            xmp: None,
3048            source_gamma: None,
3049            color_encoding: None,
3050            intensity_target: 255.0,
3051            min_nits: 0.0,
3052            intrinsic_size: None,
3053        })
3054    }
3055}
3056
3057// ── Thread pool helper ──────────────────────────────────────────────────────
3058
3059/// Run a closure inside a scoped rayon thread pool when the `parallel` feature
3060/// is enabled and `threads != 1`. Otherwise, just call the closure directly.
3061#[cfg(feature = "parallel")]
3062fn run_with_threads<T>(threads: usize, f: impl FnOnce() -> T + Send) -> T
3063where
3064    T: Send,
3065{
3066    if threads == 1 {
3067        return f();
3068    }
3069    let mut builder = rayon::ThreadPoolBuilder::new();
3070    if threads > 0 {
3071        builder = builder.num_threads(threads);
3072    }
3073    match builder.build() {
3074        Ok(pool) => pool.install(f),
3075        Err(_) => f(), // fall back to global pool / sequential
3076    }
3077}
3078
3079#[cfg(not(feature = "parallel"))]
3080fn run_with_threads<T>(_threads: usize, f: impl FnOnce() -> T) -> T {
3081    f()
3082}
3083
3084// ── Animation encode implementations ────────────────────────────────────────
3085
3086fn validate_animation_input(
3087    width: u32,
3088    height: u32,
3089    layout: PixelLayout,
3090    frames: &[AnimationFrame<'_>],
3091) -> core::result::Result<(), EncodeError> {
3092    if width == 0 || height == 0 {
3093        return Err(EncodeError::InvalidInput {
3094            message: format!("zero dimensions: {width}x{height}"),
3095        });
3096    }
3097    if frames.is_empty() {
3098        return Err(EncodeError::InvalidInput {
3099            message: "animation requires at least one frame".into(),
3100        });
3101    }
3102    let expected_size = (width as usize)
3103        .checked_mul(height as usize)
3104        .and_then(|n| n.checked_mul(layout.bytes_per_pixel()))
3105        .ok_or_else(|| EncodeError::InvalidInput {
3106            message: "image dimensions overflow".into(),
3107        })?;
3108    for (i, frame) in frames.iter().enumerate() {
3109        if frame.pixels.len() != expected_size {
3110            return Err(EncodeError::InvalidInput {
3111                message: format!(
3112                    "frame {} pixel buffer size mismatch: expected {expected_size}, got {}",
3113                    i,
3114                    frame.pixels.len()
3115                ),
3116            });
3117        }
3118    }
3119    Ok(())
3120}
3121
3122fn encode_animation_lossless(
3123    cfg: &LosslessConfig,
3124    width: u32,
3125    height: u32,
3126    layout: PixelLayout,
3127    animation: &AnimationParams,
3128    frames: &[AnimationFrame<'_>],
3129) -> core::result::Result<Vec<u8>, EncodeError> {
3130    use crate::bit_writer::BitWriter;
3131    use crate::headers::file_header::AnimationHeader;
3132    use crate::headers::{ColorEncoding, FileHeader};
3133    use crate::modular::channel::ModularImage;
3134    use crate::modular::frame::{FrameEncoder, FrameEncoderOptions};
3135
3136    validate_animation_input(width, height, layout, frames)?;
3137
3138    let w = width as usize;
3139    let h = height as usize;
3140    let num_frames = frames.len();
3141
3142    // Build file header with animation
3143    let sample_image = match layout {
3144        PixelLayout::Rgb8 => ModularImage::from_rgb8(frames[0].pixels, w, h),
3145        PixelLayout::Rgba8 => ModularImage::from_rgba8(frames[0].pixels, w, h),
3146        PixelLayout::Bgr8 => ModularImage::from_rgb8(&bgr_to_rgb(frames[0].pixels, 3), w, h),
3147        PixelLayout::Bgra8 => ModularImage::from_rgba8(&bgr_to_rgb(frames[0].pixels, 4), w, h),
3148        PixelLayout::Gray8 => ModularImage::from_gray8(frames[0].pixels, w, h),
3149        PixelLayout::GrayAlpha8 => ModularImage::from_grayalpha8(frames[0].pixels, w, h),
3150        PixelLayout::Rgb16 => ModularImage::from_rgb16_native(frames[0].pixels, w, h),
3151        PixelLayout::Rgba16 => ModularImage::from_rgba16_native(frames[0].pixels, w, h),
3152        PixelLayout::Gray16 => ModularImage::from_gray16_native(frames[0].pixels, w, h),
3153        PixelLayout::GrayAlpha16 => ModularImage::from_grayalpha16_native(frames[0].pixels, w, h),
3154        other => return Err(EncodeError::UnsupportedPixelLayout(other)),
3155    }
3156    .map_err(EncodeError::from)?;
3157
3158    let mut file_header = if sample_image.is_grayscale {
3159        FileHeader::new_gray(width, height)
3160    } else if sample_image.has_alpha {
3161        FileHeader::new_rgba(width, height)
3162    } else {
3163        FileHeader::new_rgb(width, height)
3164    };
3165    if sample_image.bit_depth == 16 {
3166        file_header.metadata.bit_depth = crate::headers::file_header::BitDepth::uint16();
3167        for ec in &mut file_header.metadata.extra_channels {
3168            ec.bit_depth = crate::headers::file_header::BitDepth::uint16();
3169        }
3170    }
3171    file_header.metadata.animation = Some(AnimationHeader {
3172        tps_numerator: animation.tps_numerator,
3173        tps_denominator: animation.tps_denominator,
3174        num_loops: animation.num_loops,
3175        have_timecodes: false,
3176    });
3177
3178    // Write file header
3179    let mut writer = BitWriter::new();
3180    file_header.write(&mut writer).map_err(EncodeError::from)?;
3181    writer.zero_pad_to_byte();
3182
3183    // Encode each frame with crop detection
3184    let color_encoding = ColorEncoding::srgb();
3185    let bpp = layout.bytes_per_pixel();
3186    let mut prev_pixels: Option<&[u8]> = None;
3187
3188    for (i, frame) in frames.iter().enumerate() {
3189        // Detect crop: compare current frame against previous.
3190        // Only use crop when it's smaller than the full frame.
3191        let crop = if let Some(prev) = prev_pixels {
3192            match detect_frame_crop(prev, frame.pixels, w, h, bpp, false) {
3193                Some(crop) if (crop.width as usize) < w || (crop.height as usize) < h => Some(crop),
3194                Some(_) => None, // Crop covers full frame — no benefit
3195                None => {
3196                    // Frames are identical — emit a minimal 1x1 crop to preserve canvas
3197                    Some(FrameCrop {
3198                        x0: 0,
3199                        y0: 0,
3200                        width: 1,
3201                        height: 1,
3202                    })
3203                }
3204            }
3205        } else {
3206            None // Frame 0: always full frame
3207        };
3208
3209        // Build ModularImage from the appropriate pixel region
3210        let (frame_w, frame_h, frame_pixels_owned);
3211        let frame_pixels: &[u8] = if let Some(ref crop) = crop {
3212            frame_w = crop.width as usize;
3213            frame_h = crop.height as usize;
3214            frame_pixels_owned = extract_pixel_crop(frame.pixels, w, crop, bpp);
3215            &frame_pixels_owned
3216        } else {
3217            frame_w = w;
3218            frame_h = h;
3219            frame_pixels_owned = Vec::new();
3220            let _ = &frame_pixels_owned; // suppress unused warning
3221            frame.pixels
3222        };
3223
3224        let image = match layout {
3225            PixelLayout::Rgb8 => ModularImage::from_rgb8(frame_pixels, frame_w, frame_h),
3226            PixelLayout::Rgba8 => ModularImage::from_rgba8(frame_pixels, frame_w, frame_h),
3227            PixelLayout::Bgr8 => {
3228                ModularImage::from_rgb8(&bgr_to_rgb(frame_pixels, 3), frame_w, frame_h)
3229            }
3230            PixelLayout::Bgra8 => {
3231                ModularImage::from_rgba8(&bgr_to_rgb(frame_pixels, 4), frame_w, frame_h)
3232            }
3233            PixelLayout::Gray8 => ModularImage::from_gray8(frame_pixels, frame_w, frame_h),
3234            PixelLayout::GrayAlpha8 => {
3235                ModularImage::from_grayalpha8(frame_pixels, frame_w, frame_h)
3236            }
3237            PixelLayout::Rgb16 => ModularImage::from_rgb16_native(frame_pixels, frame_w, frame_h),
3238            PixelLayout::Rgba16 => ModularImage::from_rgba16_native(frame_pixels, frame_w, frame_h),
3239            PixelLayout::Gray16 => ModularImage::from_gray16_native(frame_pixels, frame_w, frame_h),
3240            PixelLayout::GrayAlpha16 => {
3241                ModularImage::from_grayalpha16_native(frame_pixels, frame_w, frame_h)
3242            }
3243            other => return Err(EncodeError::UnsupportedPixelLayout(other)),
3244        }
3245        .map_err(EncodeError::from)?;
3246
3247        let use_tree_learning = cfg.tree_learning;
3248        let frame_encoder = FrameEncoder::new(
3249            frame_w,
3250            frame_h,
3251            FrameEncoderOptions {
3252                use_modular: true,
3253                effort: cfg.effort,
3254                use_ans: cfg.use_ans,
3255                use_tree_learning,
3256                use_squeeze: cfg.squeeze,
3257                enable_lz77: cfg.lz77,
3258                lz77_method: cfg.lz77_method,
3259                lossy_palette: cfg.lossy_palette,
3260                encoder_mode: cfg.mode,
3261                profile: crate::effort::EffortProfile::lossless(cfg.effort, cfg.mode),
3262                have_animation: true,
3263                duration: frame.duration,
3264                is_last: i == num_frames - 1,
3265                crop,
3266                skip_rct: false,
3267            },
3268        );
3269        frame_encoder
3270            .encode_modular(&image, &color_encoding, &mut writer)
3271            .map_err(EncodeError::from)?;
3272
3273        prev_pixels = Some(frame.pixels);
3274    }
3275
3276    Ok(writer.finish_with_padding())
3277}
3278
3279fn encode_animation_lossy(
3280    cfg: &LossyConfig,
3281    width: u32,
3282    height: u32,
3283    layout: PixelLayout,
3284    animation: &AnimationParams,
3285    frames: &[AnimationFrame<'_>],
3286) -> core::result::Result<Vec<u8>, EncodeError> {
3287    use crate::bit_writer::BitWriter;
3288    use crate::headers::file_header::AnimationHeader;
3289    use crate::headers::frame_header::FrameOptions;
3290
3291    validate_animation_input(width, height, layout, frames)?;
3292
3293    let w = width as usize;
3294    let h = height as usize;
3295    let num_frames = frames.len();
3296
3297    // Set up VarDCT encoder
3298    let mut profile = crate::effort::EffortProfile::lossy(cfg.effort, cfg.mode);
3299
3300    // Apply max_strategy_size to profile flags
3301    if let Some(max_size) = cfg.max_strategy_size {
3302        if max_size < 16 {
3303            profile.try_dct16 = false;
3304        }
3305        if max_size < 32 {
3306            profile.try_dct32 = false;
3307        }
3308        if max_size < 64 {
3309            profile.try_dct64 = false;
3310        }
3311    }
3312
3313    let mut enc = crate::vardct::VarDctEncoder::new(cfg.distance);
3314    enc.effort = cfg.effort;
3315    enc.profile = profile;
3316    enc.use_ans = cfg.use_ans;
3317    enc.optimize_codes = enc.profile.optimize_codes;
3318    enc.custom_orders = enc.profile.custom_orders;
3319    enc.ac_strategy_enabled = enc.profile.ac_strategy_enabled;
3320    enc.enable_noise = cfg.noise;
3321    enc.enable_denoise = cfg.denoise;
3322    // libjxl gates gaborish at distance > 0.5 (enc_frame.cc:281)
3323    enc.enable_gaborish = cfg.gaborish && cfg.distance > 0.5;
3324    enc.error_diffusion = cfg.error_diffusion;
3325    enc.pixel_domain_loss = cfg.pixel_domain_loss;
3326    enc.enable_lz77 = cfg.lz77;
3327    enc.lz77_method = cfg.lz77_method;
3328    enc.force_strategy = cfg.force_strategy;
3329    enc.progressive = cfg.progressive;
3330    enc.use_lf_frame = cfg.lf_frame;
3331    #[cfg(feature = "butteraugli-loop")]
3332    {
3333        enc.butteraugli_iters = cfg.butteraugli_iters;
3334    }
3335    #[cfg(feature = "ssim2-loop")]
3336    {
3337        enc.ssim2_iters = cfg.ssim2_iters;
3338    }
3339    #[cfg(feature = "zensim-loop")]
3340    {
3341        enc.zensim_iters = cfg.zensim_iters;
3342    }
3343
3344    // Detect alpha and 16-bit from layout
3345    let has_alpha = layout.has_alpha();
3346    let bit_depth_16 = matches!(layout, PixelLayout::Rgb16 | PixelLayout::Rgba16);
3347    enc.bit_depth_16 = bit_depth_16;
3348
3349    // Build file header from VarDCT encoder (sets xyb_encoded, rendering_intent, etc.)
3350    // then add animation metadata
3351    let mut file_header = enc.build_file_header(w, h, has_alpha);
3352    file_header.metadata.animation = Some(AnimationHeader {
3353        tps_numerator: animation.tps_numerator,
3354        tps_denominator: animation.tps_denominator,
3355        num_loops: animation.num_loops,
3356        have_timecodes: false,
3357    });
3358
3359    let mut writer = BitWriter::with_capacity(w * h * 4);
3360    file_header.write(&mut writer).map_err(EncodeError::from)?;
3361    if let Some(ref icc) = enc.icc_profile {
3362        crate::icc::write_icc(icc, &mut writer).map_err(EncodeError::from)?;
3363    }
3364    writer.zero_pad_to_byte();
3365
3366    // Encode each frame with crop detection
3367    let bpp = layout.bytes_per_pixel();
3368    let mut prev_pixels: Option<&[u8]> = None;
3369
3370    for (i, frame) in frames.iter().enumerate() {
3371        // Detect crop on raw input pixels (before linear conversion).
3372        // Only use crop when it's smaller than the full frame.
3373        let crop = if let Some(prev) = prev_pixels {
3374            match detect_frame_crop(prev, frame.pixels, w, h, bpp, true) {
3375                Some(crop) if (crop.width as usize) < w || (crop.height as usize) < h => Some(crop),
3376                Some(_) => None, // Crop covers full frame — no benefit
3377                None => {
3378                    // Frames identical — emit minimal 8x8 crop (VarDCT minimum)
3379                    Some(FrameCrop {
3380                        x0: 0,
3381                        y0: 0,
3382                        width: 8.min(width),
3383                        height: 8.min(height),
3384                    })
3385                }
3386            }
3387        } else {
3388            None // Frame 0: always full frame
3389        };
3390
3391        // Extract crop region from raw pixels, then convert to linear
3392        let (frame_w, frame_h) = if let Some(ref crop) = crop {
3393            (crop.width as usize, crop.height as usize)
3394        } else {
3395            (w, h)
3396        };
3397
3398        let crop_pixels_owned;
3399        let src_pixels: &[u8] = if let Some(ref crop) = crop {
3400            crop_pixels_owned = extract_pixel_crop(frame.pixels, w, crop, bpp);
3401            &crop_pixels_owned
3402        } else {
3403            crop_pixels_owned = Vec::new();
3404            let _ = &crop_pixels_owned;
3405            frame.pixels
3406        };
3407
3408        let (linear_rgb, alpha) = match layout {
3409            PixelLayout::Rgb8 => (srgb_u8_to_linear_f32(src_pixels, 3), None),
3410            PixelLayout::Bgr8 => (srgb_u8_to_linear_f32(&bgr_to_rgb(src_pixels, 3), 3), None),
3411            PixelLayout::Rgba8 => {
3412                let rgb = srgb_u8_to_linear_f32(src_pixels, 4);
3413                let alpha = extract_alpha(src_pixels, 4, 3);
3414                (rgb, Some(alpha))
3415            }
3416            PixelLayout::Bgra8 => {
3417                let swapped = bgr_to_rgb(src_pixels, 4);
3418                let rgb = srgb_u8_to_linear_f32(&swapped, 4);
3419                let alpha = extract_alpha(src_pixels, 4, 3);
3420                (rgb, Some(alpha))
3421            }
3422            PixelLayout::Gray8 => (gray_u8_to_linear_f32_rgb(src_pixels, 1), None),
3423            PixelLayout::GrayAlpha8 => {
3424                let rgb = gray_u8_to_linear_f32_rgb(src_pixels, 2);
3425                let alpha = extract_alpha(src_pixels, 2, 1);
3426                (rgb, Some(alpha))
3427            }
3428            PixelLayout::Rgb16 => (srgb_u16_to_linear_f32(src_pixels, 3), None),
3429            PixelLayout::Rgba16 => {
3430                let rgb = srgb_u16_to_linear_f32(src_pixels, 4);
3431                let alpha = extract_alpha_u16(src_pixels, 4, 3);
3432                (rgb, Some(alpha))
3433            }
3434            PixelLayout::Gray16 => (gray_u16_to_linear_f32_rgb(src_pixels, 1), None),
3435            PixelLayout::GrayAlpha16 => {
3436                let rgb = gray_u16_to_linear_f32_rgb(src_pixels, 2);
3437                let alpha = extract_alpha_u16(src_pixels, 2, 1);
3438                (rgb, Some(alpha))
3439            }
3440            PixelLayout::RgbLinearF32 => {
3441                let floats: &[f32] = bytemuck::cast_slice(src_pixels);
3442                (floats.to_vec(), None)
3443            }
3444            PixelLayout::RgbaLinearF32 => {
3445                let floats: &[f32] = bytemuck::cast_slice(src_pixels);
3446                let rgb: Vec<f32> = floats
3447                    .chunks(4)
3448                    .flat_map(|px| [px[0], px[1], px[2]])
3449                    .collect();
3450                let alpha = extract_alpha_f32(floats, 4, 3);
3451                (rgb, Some(alpha))
3452            }
3453            PixelLayout::GrayLinearF32 => {
3454                let floats: &[f32] = bytemuck::cast_slice(src_pixels);
3455                (gray_f32_to_linear_f32_rgb(floats, 1), None)
3456            }
3457            PixelLayout::GrayAlphaLinearF32 => {
3458                let floats: &[f32] = bytemuck::cast_slice(src_pixels);
3459                let rgb = gray_f32_to_linear_f32_rgb(floats, 2);
3460                let alpha = extract_alpha_f32(floats, 2, 1);
3461                (rgb, Some(alpha))
3462            }
3463        };
3464
3465        let frame_options = FrameOptions {
3466            have_animation: true,
3467            have_timecodes: false,
3468            duration: frame.duration,
3469            is_last: i == num_frames - 1,
3470            crop,
3471        };
3472
3473        enc.encode_frame_to_writer(
3474            frame_w,
3475            frame_h,
3476            &linear_rgb,
3477            alpha.as_deref(),
3478            &frame_options,
3479            &mut writer,
3480        )
3481        .map_err(EncodeError::from)?;
3482
3483        prev_pixels = Some(frame.pixels);
3484    }
3485
3486    Ok(writer.finish_with_padding())
3487}
3488
3489// ── Animation frame crop detection ──────────────────────────────────────────
3490
3491use crate::headers::frame_header::FrameCrop;
3492
3493/// Detects the minimal bounding rectangle that differs between two frames.
3494///
3495/// Compares `prev` and `curr` byte-by-byte. Returns `Some(FrameCrop)` with the
3496/// tight bounding box of changed pixels, or `None` if the frames are identical.
3497///
3498/// When `align_to_8x8` is true (for VarDCT), the crop is expanded outward to
3499/// 8x8 block boundaries for better compression.
3500fn detect_frame_crop(
3501    prev: &[u8],
3502    curr: &[u8],
3503    width: usize,
3504    height: usize,
3505    bytes_per_pixel: usize,
3506    align_to_8x8: bool,
3507) -> Option<FrameCrop> {
3508    let stride = width * bytes_per_pixel;
3509    debug_assert_eq!(prev.len(), height * stride);
3510    debug_assert_eq!(curr.len(), height * stride);
3511
3512    // Find top (first row with a difference)
3513    let mut top = height;
3514    let mut bottom = 0;
3515    let mut left = width;
3516    let mut right = 0;
3517
3518    for y in 0..height {
3519        let row_start = y * stride;
3520        let prev_row = &prev[row_start..row_start + stride];
3521        let curr_row = &curr[row_start..row_start + stride];
3522
3523        // Fast row comparison via u64 chunks — lets the compiler auto-vectorize
3524        let (prev_prefix, prev_u64, prev_suffix) = bytemuck::pod_align_to::<u8, u64>(prev_row);
3525        let (curr_prefix, curr_u64, curr_suffix) = bytemuck::pod_align_to::<u8, u64>(curr_row);
3526        if prev_prefix == curr_prefix && prev_u64 == curr_u64 && prev_suffix == curr_suffix {
3527            continue;
3528        }
3529
3530        // This row has differences — find leftmost and rightmost changed pixel
3531        if top == height {
3532            top = y;
3533        }
3534        bottom = y;
3535
3536        // Scan from left to find first differing pixel
3537        for x in 0..width {
3538            let px_start = x * bytes_per_pixel;
3539            if prev_row[px_start..px_start + bytes_per_pixel]
3540                != curr_row[px_start..px_start + bytes_per_pixel]
3541            {
3542                left = left.min(x);
3543                break;
3544            }
3545        }
3546        // Scan from right to find last differing pixel
3547        for x in (0..width).rev() {
3548            let px_start = x * bytes_per_pixel;
3549            if prev_row[px_start..px_start + bytes_per_pixel]
3550                != curr_row[px_start..px_start + bytes_per_pixel]
3551            {
3552                right = right.max(x);
3553                break;
3554            }
3555        }
3556    }
3557
3558    if top == height {
3559        // Frames are identical
3560        return None;
3561    }
3562
3563    // Convert to crop rectangle (inclusive → exclusive for width/height)
3564    let mut crop_x = left as i32;
3565    let mut crop_y = top as i32;
3566    let mut crop_w = (right - left + 1) as u32;
3567    let mut crop_h = (bottom - top + 1) as u32;
3568
3569    if align_to_8x8 {
3570        // Expand to 8x8 block boundaries
3571        let aligned_x = (crop_x / 8) * 8;
3572        let aligned_y = (crop_y / 8) * 8;
3573        let end_x = (crop_x as u32 + crop_w).div_ceil(8) * 8;
3574        let end_y = (crop_y as u32 + crop_h).div_ceil(8) * 8;
3575        crop_x = aligned_x;
3576        crop_y = aligned_y;
3577        crop_w = end_x.min(width as u32) - aligned_x as u32;
3578        crop_h = end_y.min(height as u32) - aligned_y as u32;
3579    }
3580
3581    Some(FrameCrop {
3582        x0: crop_x,
3583        y0: crop_y,
3584        width: crop_w,
3585        height: crop_h,
3586    })
3587}
3588
3589/// Extracts a rectangular crop region from a pixel buffer.
3590///
3591/// `bytes_per_pixel` is the number of bytes per pixel (e.g., 3 for RGB, 4 for RGBA).
3592fn extract_pixel_crop(
3593    pixels: &[u8],
3594    full_width: usize,
3595    crop: &FrameCrop,
3596    bytes_per_pixel: usize,
3597) -> Vec<u8> {
3598    let cx = crop.x0 as usize;
3599    let cy = crop.y0 as usize;
3600    let cw = crop.width as usize;
3601    let ch = crop.height as usize;
3602    let stride = full_width * bytes_per_pixel;
3603
3604    let mut out = Vec::with_capacity(cw * ch * bytes_per_pixel);
3605    for y in cy..cy + ch {
3606        let row_start = y * stride + cx * bytes_per_pixel;
3607        out.extend_from_slice(&pixels[row_start..row_start + cw * bytes_per_pixel]);
3608    }
3609    out
3610}
3611
3612// ── Pixel conversion helpers ────────────────────────────────────────────────
3613
3614/// Pre-computed sRGB u8 → linear f32 lookup table (256 entries).
3615/// Eliminates per-pixel `powf(2.4)` calls for the common 8-bit path.
3616const SRGB_U8_TO_LINEAR: [f32; 256] = {
3617    let mut table = [0.0f32; 256];
3618    let mut i = 0u16;
3619    while i < 256 {
3620        let c = i as f64 / 255.0;
3621        // Use f64 for accuracy during const eval, then truncate to f32.
3622        // powf is not const, so we use exp(2.4 * ln(x)) via a manual series.
3623        // For const context, we precompute using the piecewise sRGB TF.
3624        table[i as usize] = if c <= 0.04045 {
3625            (c / 12.92) as f32
3626        } else {
3627            // ((c + 0.055) / 1.055)^2.4
3628            // = exp(2.4 * ln((c + 0.055) / 1.055))
3629            // Approximate via repeated squaring: x^2.4 = x^2 * x^0.4
3630            // x^0.4 = (x^0.5)^0.8 = ((x^0.5)^0.5)^... too complex for const.
3631            // Instead, use the identity: x^2.4 = (x^12)^(1/5)
3632            // and compute fifth root via Newton's method in f64.
3633            let base = (c + 0.055) / 1.055;
3634            // x^12 = ((x^2)^2)^3
3635            let x2 = base * base;
3636            let x4 = x2 * x2;
3637            let x8 = x4 * x4;
3638            let x12 = x8 * x4;
3639            // Fifth root of x^12 = x^(12/5) = x^2.4
3640            // Newton: y_{n+1} = y_n - (y_n^5 - x12) / (5 * y_n^4)
3641            //       = (4*y_n + x12/y_n^4) / 5
3642            let mut y = base * base; // initial guess ~x^2
3643            // 8 iterations of Newton's method for fifth root (converges in ~6 for f64)
3644            let mut iter = 0;
3645            while iter < 8 {
3646                let y2 = y * y;
3647                let y4 = y2 * y2;
3648                y = (4.0 * y + x12 / y4) / 5.0;
3649                iter += 1;
3650            }
3651            y as f32
3652        };
3653        i += 1;
3654    }
3655    table
3656};
3657
3658/// sRGB u8 → linear f32 via LUT.
3659#[inline]
3660fn srgb_to_linear(c: u8) -> f32 {
3661    SRGB_U8_TO_LINEAR[c as usize]
3662}
3663
3664fn srgb_u8_to_linear_f32(data: &[u8], channels: usize) -> Vec<f32> {
3665    let num_pixels = data.len() / channels;
3666    let mut out = vec![0.0f32; num_pixels * 3];
3667    let lut = &SRGB_U8_TO_LINEAR;
3668    // zip chunks to eliminate output bounds checks; u8 index into [f32; 256] is always in bounds
3669    for (px, rgb) in data.chunks_exact(channels).zip(out.chunks_exact_mut(3)) {
3670        rgb[0] = lut[px[0] as usize];
3671        rgb[1] = lut[px[1] as usize];
3672        rgb[2] = lut[px[2] as usize];
3673    }
3674    out
3675}
3676
3677/// sRGB u16 → linear f32 (IEC 61966-2-1).
3678fn srgb_u16_to_linear_f32(data: &[u8], channels: usize) -> Vec<f32> {
3679    let pixels: &[u16] = bytemuck::cast_slice(data);
3680    pixels
3681        .chunks(channels)
3682        .flat_map(|px| {
3683            [
3684                srgb_to_linear_f(px[0] as f32 / 65535.0),
3685                srgb_to_linear_f(px[1] as f32 / 65535.0),
3686                srgb_to_linear_f(px[2] as f32 / 65535.0),
3687            ]
3688        })
3689        .collect()
3690}
3691
3692/// sRGB transfer function: normalized float [0,1] → linear float.
3693#[inline]
3694fn srgb_to_linear_f(c: f32) -> f32 {
3695    if c <= 0.04045 {
3696        c / 12.92
3697    } else {
3698        jxl_simd::fast_powf((c + 0.055) / 1.055, 2.4)
3699    }
3700}
3701
3702/// Gamma u8 → linear f32 RGB. `linear = (encoded/255)^(1/gamma)`
3703fn gamma_u8_to_linear_f32(data: &[u8], channels: usize, gamma: f32) -> Vec<f32> {
3704    // Build 256-entry LUT for u8 values (avoids per-pixel powf)
3705    let inv_gamma = 1.0 / gamma;
3706    let lut: [f32; 256] =
3707        core::array::from_fn(|i| jxl_simd::fast_powf(i as f32 / 255.0, inv_gamma));
3708    data.chunks(channels)
3709        .flat_map(|px| {
3710            [
3711                lut[px[0] as usize],
3712                lut[px[1] as usize],
3713                lut[px[2] as usize],
3714            ]
3715        })
3716        .collect()
3717}
3718
3719/// Gamma u16 → linear f32 RGB. `linear = (encoded/65535)^(1/gamma)`
3720fn gamma_u16_to_linear_f32(data: &[u8], channels: usize, gamma: f32) -> Vec<f32> {
3721    let inv_gamma = 1.0 / gamma;
3722    let pixels: &[u16] = bytemuck::cast_slice(data);
3723    pixels
3724        .chunks(channels)
3725        .flat_map(|px| {
3726            [
3727                jxl_simd::fast_powf(px[0] as f32 / 65535.0, inv_gamma),
3728                jxl_simd::fast_powf(px[1] as f32 / 65535.0, inv_gamma),
3729                jxl_simd::fast_powf(px[2] as f32 / 65535.0, inv_gamma),
3730            ]
3731        })
3732        .collect()
3733}
3734
3735/// Gamma u8 grayscale → linear f32 RGB (gray→R=G=B). `linear = (encoded/255)^(1/gamma)`
3736fn gamma_gray_u8_to_linear_f32_rgb(data: &[u8], stride: usize, gamma: f32) -> Vec<f32> {
3737    let inv_gamma = 1.0 / gamma;
3738    let lut: [f32; 256] =
3739        core::array::from_fn(|i| jxl_simd::fast_powf(i as f32 / 255.0, inv_gamma));
3740    data.chunks(stride)
3741        .flat_map(|px| {
3742            let v = lut[px[0] as usize];
3743            [v, v, v]
3744        })
3745        .collect()
3746}
3747
3748/// Gamma u16 grayscale → linear f32 RGB (gray→R=G=B). `linear = (encoded/65535)^(1/gamma)`
3749fn gamma_gray_u16_to_linear_f32_rgb(data: &[u8], stride: usize, gamma: f32) -> Vec<f32> {
3750    let inv_gamma = 1.0 / gamma;
3751    let pixels: &[u16] = bytemuck::cast_slice(data);
3752    pixels
3753        .chunks(stride)
3754        .flat_map(|px| {
3755            let v = jxl_simd::fast_powf(px[0] as f32 / 65535.0, inv_gamma);
3756            [v, v, v]
3757        })
3758        .collect()
3759}
3760
3761/// Extract alpha channel from interleaved 16-bit pixel data as u8 (quantized).
3762fn extract_alpha_u16(data: &[u8], stride: usize, alpha_offset: usize) -> Vec<u8> {
3763    let pixels: &[u16] = bytemuck::cast_slice(data);
3764    pixels
3765        .chunks(stride)
3766        .map(|px| (px[alpha_offset] >> 8) as u8)
3767        .collect()
3768}
3769
3770/// Swap B and R channels in-place equivalent: BGR(A) → RGB(A).
3771fn bgr_to_rgb(data: &[u8], stride: usize) -> Vec<u8> {
3772    let mut out = data.to_vec();
3773    for chunk in out.chunks_mut(stride) {
3774        chunk.swap(0, 2);
3775    }
3776    out
3777}
3778
3779/// Extract a single channel from interleaved pixel data.
3780fn extract_alpha(data: &[u8], stride: usize, alpha_offset: usize) -> Vec<u8> {
3781    data.chunks(stride).map(|px| px[alpha_offset]).collect()
3782}
3783
3784/// Extract alpha from interleaved f32 pixel data, converting to u8 (0..255).
3785fn extract_alpha_f32(data: &[f32], stride: usize, alpha_offset: usize) -> Vec<u8> {
3786    data.chunks(stride)
3787        .map(|px| (px[alpha_offset].clamp(0.0, 1.0) * 255.0 + 0.5) as u8)
3788        .collect()
3789}
3790
3791/// Expand 8-bit sRGB grayscale to linear f32 RGB (gray→R=G=B).
3792fn gray_u8_to_linear_f32_rgb(data: &[u8], stride: usize) -> Vec<f32> {
3793    data.chunks(stride)
3794        .flat_map(|px| {
3795            let v = srgb_to_linear(px[0]);
3796            [v, v, v]
3797        })
3798        .collect()
3799}
3800
3801/// Expand 16-bit sRGB grayscale to linear f32 RGB (gray→R=G=B).
3802fn gray_u16_to_linear_f32_rgb(data: &[u8], stride: usize) -> Vec<f32> {
3803    let pixels: &[u16] = bytemuck::cast_slice(data);
3804    pixels
3805        .chunks(stride)
3806        .flat_map(|px| {
3807            let v = srgb_to_linear_f(px[0] as f32 / 65535.0);
3808            [v, v, v]
3809        })
3810        .collect()
3811}
3812
3813/// Expand linear f32 grayscale to linear f32 RGB (gray→R=G=B).
3814fn gray_f32_to_linear_f32_rgb(data: &[f32], stride: usize) -> Vec<f32> {
3815    data.chunks(stride)
3816        .flat_map(|px| {
3817            let v = px[0];
3818            [v, v, v]
3819        })
3820        .collect()
3821}
3822
3823// ── Tests ───────────────────────────────────────────────────────────────────
3824
3825#[cfg(test)]
3826mod tests {
3827    use super::*;
3828
3829    #[test]
3830    fn test_lossless_config_builder_and_getters() {
3831        let cfg = LosslessConfig::new()
3832            .with_effort(5)
3833            .with_ans(false)
3834            .with_squeeze(true)
3835            .with_tree_learning(true);
3836        assert_eq!(cfg.effort(), 5);
3837        assert!(!cfg.ans());
3838        assert!(cfg.squeeze());
3839        assert!(cfg.tree_learning());
3840    }
3841
3842    #[test]
3843    fn test_lossy_config_builder_and_getters() {
3844        let cfg = LossyConfig::new(2.0)
3845            .with_effort(3)
3846            .with_gaborish(false)
3847            .with_noise(true);
3848        assert_eq!(cfg.distance(), 2.0);
3849        assert_eq!(cfg.effort(), 3);
3850        assert!(!cfg.gaborish());
3851        assert!(cfg.noise());
3852    }
3853
3854    #[test]
3855    fn test_pixel_layout_helpers() {
3856        assert_eq!(PixelLayout::Rgb8.bytes_per_pixel(), 3);
3857        assert_eq!(PixelLayout::Rgba8.bytes_per_pixel(), 4);
3858        assert_eq!(PixelLayout::Bgr8.bytes_per_pixel(), 3);
3859        assert_eq!(PixelLayout::Bgra8.bytes_per_pixel(), 4);
3860        assert_eq!(PixelLayout::Gray8.bytes_per_pixel(), 1);
3861        assert_eq!(PixelLayout::GrayAlpha8.bytes_per_pixel(), 2);
3862        assert_eq!(PixelLayout::Rgb16.bytes_per_pixel(), 6);
3863        assert_eq!(PixelLayout::Rgba16.bytes_per_pixel(), 8);
3864        assert_eq!(PixelLayout::Gray16.bytes_per_pixel(), 2);
3865        assert_eq!(PixelLayout::GrayAlpha16.bytes_per_pixel(), 4);
3866        assert_eq!(PixelLayout::RgbLinearF32.bytes_per_pixel(), 12);
3867        assert_eq!(PixelLayout::RgbaLinearF32.bytes_per_pixel(), 16);
3868        assert_eq!(PixelLayout::GrayLinearF32.bytes_per_pixel(), 4);
3869        assert_eq!(PixelLayout::GrayAlphaLinearF32.bytes_per_pixel(), 8);
3870        // Linear
3871        assert!(!PixelLayout::Rgb8.is_linear());
3872        assert!(PixelLayout::RgbLinearF32.is_linear());
3873        assert!(PixelLayout::RgbaLinearF32.is_linear());
3874        assert!(PixelLayout::GrayLinearF32.is_linear());
3875        assert!(PixelLayout::GrayAlphaLinearF32.is_linear());
3876        assert!(!PixelLayout::Rgb16.is_linear());
3877        // Alpha
3878        assert!(!PixelLayout::Rgb8.has_alpha());
3879        assert!(PixelLayout::Rgba8.has_alpha());
3880        assert!(PixelLayout::Bgra8.has_alpha());
3881        assert!(PixelLayout::GrayAlpha8.has_alpha());
3882        assert!(PixelLayout::Rgba16.has_alpha());
3883        assert!(PixelLayout::GrayAlpha16.has_alpha());
3884        assert!(PixelLayout::RgbaLinearF32.has_alpha());
3885        assert!(PixelLayout::GrayAlphaLinearF32.has_alpha());
3886        assert!(!PixelLayout::Rgb16.has_alpha());
3887        assert!(!PixelLayout::RgbLinearF32.has_alpha());
3888        // 16-bit
3889        assert!(PixelLayout::Rgb16.is_16bit());
3890        assert!(PixelLayout::Rgba16.is_16bit());
3891        assert!(PixelLayout::Gray16.is_16bit());
3892        assert!(PixelLayout::GrayAlpha16.is_16bit());
3893        assert!(!PixelLayout::Rgb8.is_16bit());
3894        assert!(!PixelLayout::RgbLinearF32.is_16bit());
3895        // f32
3896        assert!(PixelLayout::RgbLinearF32.is_f32());
3897        assert!(PixelLayout::RgbaLinearF32.is_f32());
3898        assert!(PixelLayout::GrayLinearF32.is_f32());
3899        assert!(PixelLayout::GrayAlphaLinearF32.is_f32());
3900        assert!(!PixelLayout::Rgb8.is_f32());
3901        assert!(!PixelLayout::Rgb16.is_f32());
3902        // Grayscale
3903        assert!(PixelLayout::Gray8.is_grayscale());
3904        assert!(PixelLayout::GrayAlpha8.is_grayscale());
3905        assert!(PixelLayout::Gray16.is_grayscale());
3906        assert!(PixelLayout::GrayAlpha16.is_grayscale());
3907        assert!(PixelLayout::GrayLinearF32.is_grayscale());
3908        assert!(PixelLayout::GrayAlphaLinearF32.is_grayscale());
3909        assert!(!PixelLayout::Rgb16.is_grayscale());
3910        assert!(!PixelLayout::RgbLinearF32.is_grayscale());
3911    }
3912
3913    #[test]
3914    fn test_quality_to_distance() {
3915        assert!(Quality::Distance(1.0).to_distance().unwrap() == 1.0);
3916        assert!(Quality::Distance(-1.0).to_distance().is_err());
3917        assert!(Quality::Percent(100).to_distance().is_err()); // lossless invalid for lossy
3918        assert!(Quality::Percent(90).to_distance().unwrap() == 1.0);
3919    }
3920
3921    #[test]
3922    fn test_pixel_validation() {
3923        let cfg = LosslessConfig::new();
3924        let req = cfg.encode_request(2, 2, PixelLayout::Rgb8);
3925        assert!(req.validate_pixels(&[0u8; 12]).is_ok());
3926    }
3927
3928    #[test]
3929    fn test_pixel_validation_wrong_size() {
3930        let cfg = LosslessConfig::new();
3931        let req = cfg.encode_request(2, 2, PixelLayout::Rgb8);
3932        assert!(req.validate_pixels(&[0u8; 11]).is_err());
3933    }
3934
3935    #[test]
3936    fn test_limits_check() {
3937        let limits = Limits::new().with_max_width(100);
3938        let cfg = LosslessConfig::new();
3939        let req = cfg
3940            .encode_request(200, 100, PixelLayout::Rgb8)
3941            .with_limits(&limits);
3942        assert!(req.check_limits().is_err());
3943    }
3944
3945    #[test]
3946    fn test_lossless_encode_rgb8_small() {
3947        // 4x4 red image
3948        let pixels = [255u8, 0, 0].repeat(16);
3949        let result = LosslessConfig::new()
3950            .encode_request(4, 4, PixelLayout::Rgb8)
3951            .encode(&pixels);
3952        assert!(result.is_ok());
3953        let jxl = result.unwrap();
3954        assert_eq!(&jxl[..2], &[0xFF, 0x0A]); // JXL signature
3955    }
3956
3957    #[test]
3958    fn test_lossy_encode_rgb8_small() {
3959        // 8x8 gradient
3960        let mut pixels = Vec::with_capacity(8 * 8 * 3);
3961        for y in 0..8u8 {
3962            for x in 0..8u8 {
3963                pixels.push(x * 32);
3964                pixels.push(y * 32);
3965                pixels.push(128);
3966            }
3967        }
3968        let result = LossyConfig::new(2.0)
3969            .with_gaborish(false)
3970            .encode_request(8, 8, PixelLayout::Rgb8)
3971            .encode(&pixels);
3972        assert!(result.is_ok());
3973        let jxl = result.unwrap();
3974        assert_eq!(&jxl[..2], &[0xFF, 0x0A]);
3975    }
3976
3977    #[test]
3978    fn test_fluent_lossless() {
3979        let pixels = vec![128u8; 4 * 4 * 3];
3980        let result = LosslessConfig::new().encode(&pixels, 4, 4, PixelLayout::Rgb8);
3981        assert!(result.is_ok());
3982    }
3983
3984    #[test]
3985    fn test_lossy_gray8() {
3986        // Grayscale input → RGB expansion → VarDCT (XYB)
3987        let pixels = vec![128u8; 8 * 8];
3988        let result = LossyConfig::new(2.0)
3989            .with_gaborish(false)
3990            .encode_request(8, 8, PixelLayout::Gray8)
3991            .encode(&pixels);
3992        assert!(result.is_ok(), "lossy Gray8 should encode: {result:?}");
3993    }
3994
3995    #[test]
3996    fn test_lossy_gray_alpha8() {
3997        let pixels: Vec<u8> = (0..8 * 8).flat_map(|_| [128u8, 255]).collect();
3998        let result = LossyConfig::new(2.0)
3999            .with_gaborish(false)
4000            .encode_request(8, 8, PixelLayout::GrayAlpha8)
4001            .encode(&pixels);
4002        assert!(result.is_ok(), "lossy GrayAlpha8 should encode: {result:?}");
4003    }
4004
4005    #[test]
4006    fn test_lossy_gray16() {
4007        let pixels_u16: Vec<u16> = (0..8 * 8).map(|_| 32768u16).collect();
4008        let pixels: &[u8] = bytemuck::cast_slice(&pixels_u16);
4009        let result = LossyConfig::new(2.0)
4010            .with_gaborish(false)
4011            .encode_request(8, 8, PixelLayout::Gray16)
4012            .encode(pixels);
4013        assert!(result.is_ok(), "lossy Gray16 should encode: {result:?}");
4014    }
4015
4016    #[test]
4017    fn test_lossy_rgba_linear_f32() {
4018        let pixels_f32: Vec<f32> = (0..8 * 8).flat_map(|_| [0.5f32, 0.3, 0.7, 1.0]).collect();
4019        let pixels: &[u8] = bytemuck::cast_slice(&pixels_f32);
4020        let result = LossyConfig::new(2.0)
4021            .with_gaborish(false)
4022            .encode_request(8, 8, PixelLayout::RgbaLinearF32)
4023            .encode(pixels);
4024        assert!(
4025            result.is_ok(),
4026            "lossy RgbaLinearF32 should encode: {result:?}"
4027        );
4028    }
4029
4030    #[test]
4031    fn test_lossy_gray_linear_f32() {
4032        let pixels_f32: Vec<f32> = (0..8 * 8).map(|_| 0.5f32).collect();
4033        let pixels: &[u8] = bytemuck::cast_slice(&pixels_f32);
4034        let result = LossyConfig::new(2.0)
4035            .with_gaborish(false)
4036            .encode_request(8, 8, PixelLayout::GrayLinearF32)
4037            .encode(pixels);
4038        assert!(
4039            result.is_ok(),
4040            "lossy GrayLinearF32 should encode: {result:?}"
4041        );
4042    }
4043
4044    #[test]
4045    fn test_lossless_grayalpha8() {
4046        let pixels: Vec<u8> = (0..8 * 8).flat_map(|_| [200u8, 255]).collect();
4047        let result = LosslessConfig::new().encode(&pixels, 8, 8, PixelLayout::GrayAlpha8);
4048        assert!(
4049            result.is_ok(),
4050            "lossless GrayAlpha8 should encode: {result:?}"
4051        );
4052    }
4053
4054    #[test]
4055    fn test_lossless_grayalpha16() {
4056        let pixels_u16: Vec<u16> = (0..8 * 8).flat_map(|_| [32768u16, 65535]).collect();
4057        let pixels: &[u8] = bytemuck::cast_slice(&pixels_u16);
4058        let result = LosslessConfig::new().encode(pixels, 8, 8, PixelLayout::GrayAlpha16);
4059        assert!(
4060            result.is_ok(),
4061            "lossless GrayAlpha16 should encode: {result:?}"
4062        );
4063    }
4064
4065    #[test]
4066    fn test_bgra_lossless() {
4067        // 4x4 red image in BGRA (B=0, G=0, R=255, A=255)
4068        let pixels = [0u8, 0, 255, 255].repeat(16);
4069        let result = LosslessConfig::new().encode(&pixels, 4, 4, PixelLayout::Bgra8);
4070        assert!(result.is_ok());
4071        let jxl = result.unwrap();
4072        assert_eq!(&jxl[..2], &[0xFF, 0x0A]);
4073    }
4074
4075    #[test]
4076    fn test_lossy_alpha_encodes() {
4077        // Lossy+alpha: VarDCT RGB + modular alpha extra channel
4078        let pixels = [255u8, 0, 0, 255].repeat(64);
4079        let result =
4080            LossyConfig::new(2.0)
4081                .with_gaborish(false)
4082                .encode(&pixels, 8, 8, PixelLayout::Bgra8);
4083        assert!(
4084            result.is_ok(),
4085            "BGRA lossy encode failed: {:?}",
4086            result.err()
4087        );
4088
4089        let result2 = LossyConfig::new(2.0).encode(&pixels, 8, 8, PixelLayout::Rgba8);
4090        assert!(
4091            result2.is_ok(),
4092            "RGBA lossy encode failed: {:?}",
4093            result2.err()
4094        );
4095    }
4096
4097    #[test]
4098    fn test_stop_cancellation() {
4099        use enough::Unstoppable;
4100        // Unstoppable should not cancel
4101        let pixels = vec![128u8; 4 * 4 * 3];
4102        let cfg = LosslessConfig::new();
4103        let result = cfg
4104            .encode_request(4, 4, PixelLayout::Rgb8)
4105            .with_stop(&Unstoppable)
4106            .encode(&pixels);
4107        assert!(result.is_ok());
4108    }
4109
4110    #[test]
4111    fn test_lossy_palette_encode() {
4112        // 16x16 RGB image with 4 colors + slight noise
4113        let colors = [[255u8, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0]];
4114        let mut pixels = Vec::with_capacity(16 * 16 * 3);
4115        for y in 0..16u8 {
4116            for x in 0..16u8 {
4117                let ci = ((y / 4) * 4 + x / 4) as usize % 4;
4118                let noise = ((x.wrapping_mul(7).wrapping_add(y.wrapping_mul(13))) % 5) as i16 - 2;
4119                for &channel in &colors[ci][..3] {
4120                    let v = (channel as i16 + noise).clamp(0, 255) as u8;
4121                    pixels.push(v);
4122                }
4123            }
4124        }
4125        let cfg = LosslessConfig::new()
4126            .with_lossy_palette(true)
4127            .with_ans(true);
4128        let result = cfg.encode(&pixels, 16, 16, PixelLayout::Rgb8);
4129        assert!(
4130            result.is_ok(),
4131            "lossy palette encode failed: {:?}",
4132            result.err()
4133        );
4134        let jxl = result.unwrap();
4135        assert_eq!(&jxl[..2], &[0xFF, 0x0A], "JXL signature");
4136
4137        // Verify jxl-oxide can parse and decode it
4138        let cursor = std::io::Cursor::new(&jxl);
4139        let reader = std::io::BufReader::new(cursor);
4140        let image = jxl_oxide::JxlImage::builder()
4141            .read(reader)
4142            .expect("jxl-oxide parse");
4143        assert!(
4144            image.width() > 0,
4145            "decoded image should have non-zero width"
4146        );
4147    }
4148
4149    #[test]
4150    fn test_lossy_palette_multi_group() {
4151        // 300x300 RGB image with ~20 dominant colors + noise (>256x256 = multi-group)
4152        let colors = [
4153            [255u8, 0, 0],
4154            [0, 255, 0],
4155            [0, 0, 255],
4156            [255, 255, 0],
4157            [255, 0, 255],
4158            [0, 255, 255],
4159            [128, 128, 128],
4160            [64, 64, 64],
4161        ];
4162        let mut pixels = Vec::with_capacity(300 * 300 * 3);
4163        for y in 0..300u32 {
4164            for x in 0..300u32 {
4165                let ci = ((y / 40) * 8 + x / 40) as usize % colors.len();
4166                let noise = ((x.wrapping_mul(7).wrapping_add(y.wrapping_mul(13))) % 7) as i16 - 3;
4167                for &channel in &colors[ci][..3] {
4168                    let v = (channel as i16 + noise).clamp(0, 255) as u8;
4169                    pixels.push(v);
4170                }
4171            }
4172        }
4173
4174        // Encode with lossy palette + ANS (multi-group)
4175        let cfg = LosslessConfig::new()
4176            .with_lossy_palette(true)
4177            .with_ans(true);
4178        let jxl = cfg
4179            .encode(&pixels, 300, 300, PixelLayout::Rgb8)
4180            .expect("lossy palette multi-group encode");
4181        assert_eq!(&jxl[..2], &[0xFF, 0x0A], "JXL signature");
4182        assert!(jxl.len() < 300 * 300 * 3, "should compress");
4183
4184        // Save to disk for inspection
4185        let out = crate::test_helpers::output_dir("lossy_palette");
4186        let jxl_out = out.join("lossy_palette_multi.jxl");
4187        let png_out = out.join("lossy_palette_multi.png");
4188        std::fs::write(&jxl_out, &jxl).ok();
4189        eprintln!(
4190            "LOSSY_PALETTE_MULTI test: encoded {} bytes ({}x{})",
4191            jxl.len(),
4192            300,
4193            300
4194        );
4195
4196        // Try djxl decode first for better error messages
4197        let djxl_result = std::process::Command::new("djxl")
4198            .args([jxl_out.to_str().unwrap(), png_out.to_str().unwrap()])
4199            .output();
4200        if let Ok(output) = djxl_result {
4201            eprintln!(
4202                "djxl: status={}, stderr={}",
4203                output.status,
4204                String::from_utf8_lossy(&output.stderr)
4205            );
4206        }
4207
4208        // Verify jxl-rs can decode it
4209        let decoded = crate::test_helpers::decode_with_jxl_rs(&jxl).expect("jxl-rs decode failed");
4210        assert_eq!(decoded.width, 300);
4211        assert_eq!(decoded.height, 300);
4212        assert_eq!(decoded.channels, 3);
4213
4214        // Verify lossy quality: each pixel should be within 50 of original (delta palette error)
4215        // decoded.pixels is f32 in [0.0, 1.0] — convert to u8 for comparison
4216        let mut max_error = 0i32;
4217        let mut error_pos = (0, 0, 0);
4218        for (i, (&orig, &dec)) in pixels.iter().zip(decoded.pixels.iter()).enumerate() {
4219            let dec_u8 = (dec * 255.0).round().clamp(0.0, 255.0) as u8;
4220            let diff = (orig as i32 - dec_u8 as i32).abs();
4221            if diff > max_error {
4222                max_error = diff;
4223                let pixel = i / 3;
4224                error_pos = (pixel % 300, pixel / 300, i % 3);
4225            }
4226        }
4227        let err_idx = error_pos.1 * 300 * 3 + error_pos.0 * 3 + error_pos.2;
4228        let dec_u8 = (decoded.pixels[err_idx] * 255.0).round().clamp(0.0, 255.0) as u8;
4229        eprintln!(
4230            "max_error={} at ({},{}) ch={}, orig={} decoded={}",
4231            max_error, error_pos.0, error_pos.1, error_pos.2, pixels[err_idx], dec_u8,
4232        );
4233        assert!(
4234            max_error <= 80,
4235            "lossy palette max error {} too large (expected <= 80)",
4236            max_error
4237        );
4238    }
4239
4240    #[test]
4241    fn test_palette_256_colors_regression() {
4242        // Regression test for palette+ANS checksum mismatch with many unique colors.
4243        // Root cause was u2S bit width bug in write_palette_transform (fixed Feb 17, 2026):
4244        // nb_colors selectors 1-2 used 11/14 bits instead of 10/12 bits. Triggered when
4245        // nb_colors >= 256 (selector 1). Two test cases:
4246        //
4247        // 1. 32x32 with 256 unique colors via standard API (passes 50% heuristic)
4248        // 2. 16x16 with 256 unique colors via internal API (bypasses heuristic)
4249        use crate::modular::channel::{Channel, ModularImage};
4250        use crate::modular::encode::write_modular_stream_with_palette;
4251
4252        // Test 1: 32x32 through standard API (256 colors, each used 4x)
4253        let mut pixels = Vec::with_capacity(32 * 32 * 3);
4254        for i in 0..1024u32 {
4255            let idx = (i / 4) as u8;
4256            pixels.push(idx);
4257            pixels.push(((idx as u32 * 7 + 13) & 0xFF) as u8);
4258            pixels.push(((idx as u32 * 31 + 97) & 0xFF) as u8);
4259        }
4260        let cfg = LosslessConfig::new().with_ans(true);
4261        let jxl = cfg
4262            .encode(&pixels, 32, 32, PixelLayout::Rgb8)
4263            .expect("palette 256-colors encode");
4264        let decoded = crate::test_helpers::decode_with_jxl_rs(&jxl).expect("jxl-rs decode failed");
4265        for (i, (&orig, &dec)) in pixels.iter().zip(decoded.pixels.iter()).enumerate() {
4266            let dec_u8 = (dec * 255.0).round().clamp(0.0, 255.0) as u8;
4267            assert_eq!(
4268                orig, dec_u8,
4269                "32x32: mismatch at byte {}: orig={} decoded={}",
4270                i, orig, dec_u8
4271            );
4272        }
4273
4274        // Test 2: 16x16 via internal API (bypasses 50% heuristic)
4275        let mut channels = Vec::new();
4276        for c in 0..3 {
4277            let mut ch = Channel::new(16, 16).unwrap();
4278            for y in 0..16 {
4279                for x in 0..16 {
4280                    let idx = y * 16 + x;
4281                    let val = match c {
4282                        0 => idx as i32,
4283                        1 => ((idx * 3 + 17) & 0xFF) as i32,
4284                        2 => (255 - idx) as i32,
4285                        _ => 0,
4286                    };
4287                    ch.set(x, y, val);
4288                }
4289            }
4290            channels.push(ch);
4291        }
4292        let image = ModularImage {
4293            channels,
4294            bit_depth: 8,
4295            is_grayscale: false,
4296            has_alpha: false,
4297        };
4298        let mut writer = crate::bit_writer::BitWriter::new();
4299        write_modular_stream_with_palette(&image, &mut writer, true, 0, 3)
4300            .expect("palette encode with 256 unique colors must not fail");
4301    }
4302
4303    #[test]
4304    fn test_16bit_tree_learning() {
4305        // Test multiple 16-bit scenarios that previously failed
4306        for &(w, h, layout, label) in &[
4307            (32u32, 32u32, PixelLayout::Rgb16, "32x32 RGB16"),
4308            (8, 8, PixelLayout::Rgba16, "8x8 RGBA16"),
4309            (8, 8, PixelLayout::Rgb16, "8x8 RGB16"),
4310            (16, 16, PixelLayout::Gray16, "16x16 Gray16"),
4311        ] {
4312            let nc = layout.bytes_per_pixel()
4313                / if layout.is_16bit() {
4314                    2
4315                } else if layout.is_f32() {
4316                    4
4317                } else {
4318                    1
4319                };
4320            let mut pixels = vec![0u16; (w * h) as usize * nc];
4321            for y in 0..h {
4322                for x in 0..w {
4323                    let idx = ((y * w + x) as usize) * nc;
4324                    pixels[idx] = (x * 2048) as u16;
4325                    if nc >= 2 {
4326                        pixels[idx + 1] = (y * 2048) as u16;
4327                    }
4328                    if nc >= 3 {
4329                        pixels[idx + 2] = ((x + y) * 1024) as u16;
4330                    }
4331                    if nc >= 4 {
4332                        pixels[idx + 3] = 65535; // opaque alpha
4333                    }
4334                }
4335            }
4336            let bytes: Vec<u8> = pixels.iter().flat_map(|v| v.to_ne_bytes()).collect();
4337
4338            let cfg = LosslessConfig::new().with_effort(7).with_ans(true);
4339            let jxl = cfg
4340                .encode(&bytes, w, h, layout)
4341                .unwrap_or_else(|e| panic!("{}: encode failed: {}", label, e));
4342
4343            let decoded = crate::test_helpers::decode_with_jxl_rs(&jxl)
4344                .unwrap_or_else(|e| panic!("{}: jxl-rs decode failed: {}", label, e));
4345            assert_eq!(decoded.width, w as usize, "{}: width", label);
4346            assert_eq!(decoded.height, h as usize, "{}: height", label);
4347
4348            let scale = 65535.0;
4349            let mut mismatches = 0;
4350            for (i, (&orig, &dec_f)) in pixels.iter().zip(decoded.pixels.iter()).enumerate() {
4351                let dec = (dec_f * scale).round().clamp(0.0, scale) as u16;
4352                if orig != dec && mismatches < 3 {
4353                    eprintln!("{}: mismatch[{}]: orig={} dec={}", label, i, orig, dec);
4354                    mismatches += 1;
4355                }
4356            }
4357            assert_eq!(mismatches, 0, "{}: {} mismatches", label, mismatches);
4358            eprintln!("{}: PASS ({} bytes)", label, jxl.len());
4359        }
4360    }
4361
4362    #[test]
4363    fn test_srgb_lut_matches_powf() {
4364        for i in 0u16..256 {
4365            let lut_val = SRGB_U8_TO_LINEAR[i as usize];
4366            let fast_val = srgb_to_linear_f(i as f32 / 255.0);
4367            let diff = (lut_val - fast_val).abs();
4368            // LUT uses f64 exact powf, srgb_to_linear_f uses fast_powf (~3e-5 relative error)
4369            let tol = fast_val.abs() * 5e-5 + 1e-7;
4370            assert!(
4371                diff <= tol,
4372                "sRGB LUT mismatch at {i}: LUT={lut_val}, fast={fast_val}, diff={diff}"
4373            );
4374        }
4375    }
4376
4377    #[test]
4378    fn test_quality_to_distance_f32_mapping() {
4379        // Verify the piecewise mapping at key points.
4380        assert_eq!(quality_to_distance(100.0), 0.0);
4381        assert_eq!(quality_to_distance(90.0), 1.0); // visually lossless
4382        assert_eq!(quality_to_distance(80.0), 1.5);
4383        assert_eq!(quality_to_distance(70.0), 2.0);
4384        assert_eq!(quality_to_distance(50.0), 4.0);
4385        assert_eq!(quality_to_distance(0.0), 9.0);
4386        // Clamped above 100
4387        assert_eq!(quality_to_distance(110.0), 0.0);
4388    }
4389
4390    #[test]
4391    fn test_calibrated_jxl_quality() {
4392        // Boundary: below table minimum clamps to first entry's output.
4393        assert_eq!(calibrated_jxl_quality(0.0), 5.0);
4394        // Boundary: above table maximum clamps to last entry's output.
4395        assert_eq!(calibrated_jxl_quality(100.0), 93.8);
4396        // Exact table entry.
4397        assert_eq!(calibrated_jxl_quality(90.0), 84.2);
4398        // Interpolated mid-point between (50, 48.5) and (55, 51.9).
4399        let mid = calibrated_jxl_quality(52.5);
4400        let expected = 48.5 + 0.5 * (51.9 - 48.5);
4401        assert!(
4402            (mid - expected).abs() < 0.01,
4403            "expected {expected}, got {mid}"
4404        );
4405    }
4406
4407    #[test]
4408    fn test_interp_quality_edge_cases() {
4409        let table = &[(10.0f32, 20.0f32), (20.0, 40.0), (30.0, 60.0)];
4410        // Below table
4411        assert_eq!(interp_quality(table, 5.0), 20.0);
4412        // Above table
4413        assert_eq!(interp_quality(table, 35.0), 60.0);
4414        // Exact match
4415        assert_eq!(interp_quality(table, 20.0), 40.0);
4416        // Midpoint
4417        assert!((interp_quality(table, 15.0) - 30.0).abs() < 0.001);
4418    }
4419}