Skip to main content

jxl_encoder/
api.rs

1// Copyright (c) Imazen LLC and the JPEG XL Project Authors.
2// Algorithms and constants derived from libjxl (BSD-3-Clause).
3// Licensed under AGPL-3.0-or-later. Commercial licenses at https://www.imazen.io/pricing
4
5//! Three-layer public API: Config → Request → Encoder.
6//!
7//! ```rust,no_run
8//! use jxl_encoder::{LosslessConfig, LossyConfig, PixelLayout};
9//!
10//! # let pixels = vec![0u8; 800 * 600 * 3];
11//! // Simple — one line, no request visible
12//! let jxl = LossyConfig::new(1.0)
13//!     .encode(&pixels, 800, 600, PixelLayout::Rgb8)?;
14//!
15//! // Full control — request layer for metadata, limits, cancellation
16//! let jxl = LosslessConfig::new()
17//!     .encode_request(800, 600, PixelLayout::Rgb8)
18//!     .encode(&pixels)?;
19//! # Ok::<_, jxl_encoder::At<jxl_encoder::EncodeError>>(())
20//! ```
21
22pub use crate::entropy_coding::Lz77Method;
23pub use enough::{Stop, Unstoppable};
24pub use whereat::{At, ResultAtExt, at};
25
26// ── Error type ──────────────────────────────────────────────────────────────
27
28/// Encode error type.
29#[derive(Debug)]
30#[non_exhaustive]
31pub enum EncodeError {
32    /// Input validation failed (wrong buffer size, zero dimensions, etc.).
33    InvalidInput { message: String },
34    /// Config validation failed (contradictory options, out-of-range values).
35    InvalidConfig { message: String },
36    /// Pixel layout not supported for this config/mode.
37    UnsupportedPixelLayout(PixelLayout),
38    /// A configured limit was exceeded.
39    LimitExceeded { message: String },
40    /// Encoding was cancelled via [`Stop`].
41    Cancelled,
42    /// Allocation failure.
43    Oom(std::collections::TryReserveError),
44    /// I/O error.
45    #[cfg(feature = "std")]
46    Io(std::io::Error),
47    /// Internal encoder error (should not happen — file a bug).
48    Internal { message: String },
49}
50
51impl core::fmt::Display for EncodeError {
52    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
53        match self {
54            Self::InvalidInput { message } => write!(f, "invalid input: {message}"),
55            Self::InvalidConfig { message } => write!(f, "invalid config: {message}"),
56            Self::UnsupportedPixelLayout(layout) => {
57                write!(f, "unsupported pixel layout: {layout:?}")
58            }
59            Self::LimitExceeded { message } => write!(f, "limit exceeded: {message}"),
60            Self::Cancelled => write!(f, "encoding cancelled"),
61            Self::Oom(e) => write!(f, "out of memory: {e}"),
62            #[cfg(feature = "std")]
63            Self::Io(e) => write!(f, "I/O error: {e}"),
64            Self::Internal { message } => write!(f, "internal error: {message}"),
65        }
66    }
67}
68
69impl core::error::Error for EncodeError {
70    fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
71        match self {
72            Self::Oom(e) => Some(e),
73            #[cfg(feature = "std")]
74            Self::Io(e) => Some(e),
75            _ => None,
76        }
77    }
78}
79
80impl From<crate::error::Error> for EncodeError {
81    fn from(e: crate::error::Error) -> Self {
82        match e {
83            crate::error::Error::InvalidImageDimensions(w, h) => Self::InvalidInput {
84                message: format!("invalid dimensions: {w}x{h}"),
85            },
86            crate::error::Error::ImageTooLarge(w, h, mw, mh) => Self::LimitExceeded {
87                message: format!("image {w}x{h} exceeds max {mw}x{mh}"),
88            },
89            crate::error::Error::DimensionOverflow {
90                width,
91                height,
92                channels,
93            } => Self::InvalidInput {
94                message: format!("dimension overflow: {width}x{height}x{channels} exceeds usize"),
95            },
96            crate::error::Error::InvalidInput(msg) => Self::InvalidInput { message: msg },
97            crate::error::Error::OutOfMemory(e) => Self::Oom(e),
98            #[cfg(feature = "std")]
99            crate::error::Error::IoError(e) => Self::Io(e),
100            crate::error::Error::Cancelled => Self::Cancelled,
101            other => Self::Internal {
102                message: format!("{other}"),
103            },
104        }
105    }
106}
107
108#[cfg(feature = "std")]
109impl From<std::io::Error> for EncodeError {
110    fn from(e: std::io::Error) -> Self {
111        Self::Io(e)
112    }
113}
114
115impl From<enough::StopReason> for EncodeError {
116    fn from(_: enough::StopReason) -> Self {
117        Self::Cancelled
118    }
119}
120
121/// Result type for encoding operations.
122///
123/// Errors carry location traces via [`whereat::At`] for lightweight
124/// production-safe error tracking without debuginfo or backtraces.
125pub type Result<T> = core::result::Result<T, At<EncodeError>>;
126
127// ── EncodeResult / EncodeStats ──────────────────────────────────────────────
128
129/// Result of an encode operation. Holds encoded data and metrics.
130///
131/// After `encode()`, `data()` returns the JXL bytes. After `encode_into()`
132/// or `encode_to()`, `data()` returns `None` (data already delivered).
133/// Use `take_data()` to move the vec out without cloning.
134#[derive(Clone, Debug)]
135pub struct EncodeResult {
136    data: Option<Vec<u8>>,
137    stats: EncodeStats,
138}
139
140impl EncodeResult {
141    /// Encoded JXL bytes (borrowing). None if data was written elsewhere.
142    pub fn data(&self) -> Option<&[u8]> {
143        self.data.as_deref()
144    }
145
146    /// Take the owned data vec, leaving None in its place.
147    pub fn take_data(&mut self) -> Option<Vec<u8>> {
148        self.data.take()
149    }
150
151    /// Encode metrics.
152    pub fn stats(&self) -> &EncodeStats {
153        &self.stats
154    }
155}
156
157/// Encode metrics collected during encoding.
158#[derive(Clone, Debug, Default)]
159#[non_exhaustive]
160pub struct EncodeStats {
161    codestream_size: usize,
162    output_size: usize,
163    mode: EncodeMode,
164    /// Index = raw strategy code (0..19), value = first-block count.
165    strategy_counts: [u32; 19],
166    gaborish: bool,
167    ans: bool,
168    butteraugli_iters: u32,
169    pixel_domain_loss: bool,
170}
171
172impl EncodeStats {
173    /// Size of the JXL codestream in bytes (before container wrapping).
174    pub fn codestream_size(&self) -> usize {
175        self.codestream_size
176    }
177
178    /// Size of the final output in bytes (after container wrapping, if any).
179    pub fn output_size(&self) -> usize {
180        self.output_size
181    }
182
183    /// Whether the encode was lossy or lossless.
184    pub fn mode(&self) -> EncodeMode {
185        self.mode
186    }
187
188    /// Per-strategy first-block counts, indexed by raw strategy code (0..19).
189    pub fn strategy_counts(&self) -> &[u32; 19] {
190        &self.strategy_counts
191    }
192
193    /// Whether gaborish pre-filtering was enabled.
194    pub fn gaborish(&self) -> bool {
195        self.gaborish
196    }
197
198    /// Whether ANS entropy coding was used.
199    pub fn ans(&self) -> bool {
200        self.ans
201    }
202
203    /// Number of butteraugli quantization loop iterations performed.
204    pub fn butteraugli_iters(&self) -> u32 {
205        self.butteraugli_iters
206    }
207
208    /// Whether pixel-domain loss was enabled.
209    pub fn pixel_domain_loss(&self) -> bool {
210        self.pixel_domain_loss
211    }
212}
213
214/// Encoding mode.
215#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
216pub enum EncodeMode {
217    /// Lossy (VarDCT) encoding.
218    #[default]
219    Lossy,
220    /// Lossless (modular) encoding.
221    Lossless,
222}
223
224// ── PixelLayout ─────────────────────────────────────────────────────────────
225
226/// Describes the pixel format of input data.
227#[derive(Clone, Copy, Debug, PartialEq, Eq)]
228#[non_exhaustive]
229pub enum PixelLayout {
230    /// 8-bit sRGB, 3 bytes per pixel (R, G, B).
231    Rgb8,
232    /// 8-bit sRGB + alpha, 4 bytes per pixel (R, G, B, A).
233    Rgba8,
234    /// 8-bit sRGB in BGR order, 3 bytes per pixel (B, G, R).
235    Bgr8,
236    /// 8-bit sRGB in BGRA order, 4 bytes per pixel (B, G, R, A).
237    Bgra8,
238    /// 8-bit grayscale, 1 byte per pixel.
239    Gray8,
240    /// 8-bit grayscale + alpha, 2 bytes per pixel.
241    GrayAlpha8,
242    /// 16-bit sRGB, 6 bytes per pixel (R, G, B) — native-endian u16.
243    Rgb16,
244    /// 16-bit sRGB + alpha, 8 bytes per pixel (R, G, B, A) — native-endian u16.
245    Rgba16,
246    /// 16-bit grayscale, 2 bytes per pixel — native-endian u16.
247    Gray16,
248    /// 16-bit grayscale + alpha, 4 bytes per pixel — native-endian u16.
249    GrayAlpha16,
250    /// Linear f32 RGB, 12 bytes per pixel. Skips sRGB→linear conversion.
251    RgbLinearF32,
252    /// Linear f32 RGBA, 16 bytes per pixel. Skips sRGB→linear conversion.
253    RgbaLinearF32,
254    /// Linear f32 grayscale, 4 bytes per pixel.
255    GrayLinearF32,
256    /// Linear f32 grayscale + alpha, 8 bytes per pixel.
257    GrayAlphaLinearF32,
258}
259
260impl PixelLayout {
261    /// Bytes per pixel for this layout.
262    pub const fn bytes_per_pixel(self) -> usize {
263        match self {
264            Self::Rgb8 | Self::Bgr8 => 3,
265            Self::Rgba8 | Self::Bgra8 => 4,
266            Self::Gray8 => 1,
267            Self::GrayAlpha8 => 2,
268            Self::Rgb16 => 6,
269            Self::Rgba16 => 8,
270            Self::Gray16 => 2,
271            Self::GrayAlpha16 => 4,
272            Self::RgbLinearF32 => 12,
273            Self::RgbaLinearF32 => 16,
274            Self::GrayLinearF32 => 4,
275            Self::GrayAlphaLinearF32 => 8,
276        }
277    }
278
279    /// Whether this layout uses linear (not gamma-encoded) values.
280    pub const fn is_linear(self) -> bool {
281        matches!(
282            self,
283            Self::RgbLinearF32
284                | Self::RgbaLinearF32
285                | Self::GrayLinearF32
286                | Self::GrayAlphaLinearF32
287        )
288    }
289
290    /// Whether this layout uses 16-bit samples.
291    pub const fn is_16bit(self) -> bool {
292        matches!(
293            self,
294            Self::Rgb16 | Self::Rgba16 | Self::Gray16 | Self::GrayAlpha16
295        )
296    }
297
298    /// Whether this layout uses f32 samples.
299    pub const fn is_f32(self) -> bool {
300        matches!(
301            self,
302            Self::RgbLinearF32
303                | Self::RgbaLinearF32
304                | Self::GrayLinearF32
305                | Self::GrayAlphaLinearF32
306        )
307    }
308
309    /// Whether this layout includes an alpha channel.
310    pub const fn has_alpha(self) -> bool {
311        matches!(
312            self,
313            Self::Rgba8
314                | Self::Bgra8
315                | Self::GrayAlpha8
316                | Self::Rgba16
317                | Self::GrayAlpha16
318                | Self::RgbaLinearF32
319                | Self::GrayAlphaLinearF32
320        )
321    }
322
323    /// Whether this layout is grayscale.
324    pub const fn is_grayscale(self) -> bool {
325        matches!(
326            self,
327            Self::Gray8
328                | Self::GrayAlpha8
329                | Self::Gray16
330                | Self::GrayAlpha16
331                | Self::GrayLinearF32
332                | Self::GrayAlphaLinearF32
333        )
334    }
335}
336
337// ── Quality ─────────────────────────────────────────────────────────────────
338
339/// Quality specification for lossy encoding.
340#[derive(Clone, Copy, Debug)]
341#[non_exhaustive]
342pub enum Quality {
343    /// Butteraugli distance (1.0 = high quality, lower = better).
344    Distance(f32),
345    /// Percentage scale (0–100, 100 = mathematically lossless, invalid for lossy).
346    Percent(u32),
347}
348
349impl Quality {
350    /// Convert to butteraugli distance.
351    fn to_distance(self) -> core::result::Result<f32, EncodeError> {
352        match self {
353            Self::Distance(d) => {
354                if d <= 0.0 {
355                    return Err(EncodeError::InvalidConfig {
356                        message: format!("lossy distance must be > 0.0, got {d}"),
357                    });
358                }
359                Ok(d)
360            }
361            Self::Percent(q) => {
362                if q >= 100 {
363                    return Err(EncodeError::InvalidConfig {
364                        message: "quality 100 is lossless; use LosslessConfig instead".into(),
365                    });
366                }
367                Ok(percent_to_distance(q))
368            }
369        }
370    }
371}
372
373fn percent_to_distance(quality: u32) -> f32 {
374    if quality >= 100 {
375        0.0
376    } else if quality >= 90 {
377        (100 - quality) as f32 / 10.0
378    } else if quality >= 70 {
379        1.0 + (90 - quality) as f32 / 20.0
380    } else {
381        2.0 + (70 - quality) as f32 / 10.0
382    }
383}
384
385/// Convert quality on 0–100 scale to JXL butteraugli distance.
386///
387/// Matches the jxl-encoder's own `percent_to_distance` piecewise mapping:
388/// - 90–100 → distance 0.0–1.0  (perceptually lossless zone)
389/// - 70–90  → distance 1.0–2.0  (high quality)
390/// - 0–70   → distance 2.0–9.0  (lower quality)
391#[must_use]
392pub fn quality_to_distance(quality: f32) -> f32 {
393    let q = quality.clamp(0.0, 100.0);
394    if q >= 100.0 {
395        0.0
396    } else if q >= 90.0 {
397        (100.0 - q) / 10.0
398    } else if q >= 70.0 {
399        1.0 + (90.0 - q) / 20.0
400    } else {
401        2.0 + (70.0 - q) / 10.0
402    }
403}
404
405/// Map generic quality (libjpeg-turbo scale) to JXL native quality.
406///
407/// Calibrated on CID22-512 corpus (209 images) to produce the same median
408/// SSIMULACRA2 as libjpeg-turbo at each quality level. The native quality
409/// is then mapped to Butteraugli distance by [`quality_to_distance`].
410#[must_use]
411pub fn calibrated_jxl_quality(generic_q: f32) -> f32 {
412    let clamped = generic_q.clamp(0.0, 100.0);
413    const TABLE: &[(f32, f32)] = &[
414        (5.0, 5.0),
415        (10.0, 5.0),
416        (15.0, 5.0),
417        (20.0, 5.0),
418        (25.0, 9.3),
419        (30.0, 22.7),
420        (35.0, 33.0),
421        (40.0, 38.8),
422        (45.0, 43.8),
423        (50.0, 48.5),
424        (55.0, 51.9),
425        (60.0, 55.1),
426        (65.0, 58.0),
427        (70.0, 61.3),
428        (72.0, 63.2),
429        (75.0, 65.5),
430        (78.0, 67.9),
431        (80.0, 69.1),
432        (82.0, 71.8),
433        (85.0, 76.1),
434        (87.0, 79.3),
435        (90.0, 84.2),
436        (92.0, 86.9),
437        (95.0, 91.2),
438        (97.0, 92.8),
439        (99.0, 93.8),
440    ];
441    interp_quality(TABLE, clamped)
442}
443
444/// Piecewise linear interpolation with clamping at table bounds.
445fn interp_quality(table: &[(f32, f32)], x: f32) -> f32 {
446    if x <= table[0].0 {
447        return table[0].1;
448    }
449    if x >= table[table.len() - 1].0 {
450        return table[table.len() - 1].1;
451    }
452    for i in 1..table.len() {
453        if x <= table[i].0 {
454            let (x0, y0) = table[i - 1];
455            let (x1, y1) = table[i];
456            let t = (x - x0) / (x1 - x0);
457            return y0 + t * (y1 - y0);
458        }
459    }
460    table[table.len() - 1].1
461}
462
463// ── Supporting types ────────────────────────────────────────────────────────
464
465/// Image metadata (ICC, EXIF, XMP, tone mapping) to embed in the JXL file.
466#[derive(Clone, Debug, Default)]
467pub struct ImageMetadata<'a> {
468    icc_profile: Option<&'a [u8]>,
469    exif: Option<&'a [u8]>,
470    xmp: Option<&'a [u8]>,
471    /// Peak display luminance in nits (cd/m²). `None` uses the JXL default (255.0 = SDR).
472    intensity_target: Option<f32>,
473    /// Minimum display luminance in nits. `None` uses the JXL default (0.0).
474    min_nits: Option<f32>,
475    /// Intrinsic display size `(width, height)`, if different from coded dimensions.
476    intrinsic_size: Option<(u32, u32)>,
477}
478
479impl<'a> ImageMetadata<'a> {
480    /// Create empty metadata.
481    pub fn new() -> Self {
482        Self::default()
483    }
484
485    /// Attach an ICC color profile.
486    pub fn with_icc_profile(mut self, data: &'a [u8]) -> Self {
487        self.icc_profile = Some(data);
488        self
489    }
490
491    /// Attach EXIF data.
492    pub fn with_exif(mut self, data: &'a [u8]) -> Self {
493        self.exif = Some(data);
494        self
495    }
496
497    /// Attach XMP data.
498    pub fn with_xmp(mut self, data: &'a [u8]) -> Self {
499        self.xmp = Some(data);
500        self
501    }
502
503    /// Get the ICC color profile, if set.
504    pub fn icc_profile(&self) -> Option<&[u8]> {
505        self.icc_profile
506    }
507
508    /// Get the EXIF data, if set.
509    pub fn exif(&self) -> Option<&[u8]> {
510        self.exif
511    }
512
513    /// Get the XMP data, if set.
514    pub fn xmp(&self) -> Option<&[u8]> {
515        self.xmp
516    }
517
518    /// Set the peak display luminance in nits (cd/m²) for HDR content.
519    ///
520    /// Written to the JXL codestream `ToneMapping.intensity_target` field.
521    /// Default is 255.0 (SDR). Set to e.g. 4000.0 or 10000.0 for HDR.
522    pub fn with_intensity_target(mut self, nits: f32) -> Self {
523        self.intensity_target = Some(nits);
524        self
525    }
526
527    /// Set the minimum display luminance in nits.
528    ///
529    /// Written to the JXL codestream `ToneMapping.min_nits` field.
530    /// Default is 0.0.
531    pub fn with_min_nits(mut self, nits: f32) -> Self {
532        self.min_nits = Some(nits);
533        self
534    }
535
536    /// Get the intensity target, if set.
537    pub fn intensity_target(&self) -> Option<f32> {
538        self.intensity_target
539    }
540
541    /// Get the min nits, if set.
542    pub fn min_nits(&self) -> Option<f32> {
543        self.min_nits
544    }
545
546    /// Set the intrinsic display size.
547    ///
548    /// When set, the image should be rendered at this `(width, height)` rather
549    /// than the coded dimensions. Written to the JXL codestream `intrinsic_size` field.
550    pub fn with_intrinsic_size(mut self, width: u32, height: u32) -> Self {
551        self.intrinsic_size = Some((width, height));
552        self
553    }
554
555    /// Get the intrinsic size, if set.
556    pub fn intrinsic_size(&self) -> Option<(u32, u32)> {
557        self.intrinsic_size
558    }
559}
560
561/// Resource limits for encoding.
562#[derive(Clone, Debug, Default)]
563pub struct Limits {
564    max_width: Option<u64>,
565    max_height: Option<u64>,
566    max_pixels: Option<u64>,
567    max_memory_bytes: Option<u64>,
568}
569
570impl Limits {
571    /// Create limits with no restrictions (all `None`).
572    pub fn new() -> Self {
573        Self::default()
574    }
575
576    /// Set maximum image width.
577    pub fn with_max_width(mut self, w: u64) -> Self {
578        self.max_width = Some(w);
579        self
580    }
581
582    /// Set maximum image height.
583    pub fn with_max_height(mut self, h: u64) -> Self {
584        self.max_height = Some(h);
585        self
586    }
587
588    /// Set maximum total pixels (width × height).
589    pub fn with_max_pixels(mut self, p: u64) -> Self {
590        self.max_pixels = Some(p);
591        self
592    }
593
594    /// Set maximum memory bytes the encoder may allocate.
595    pub fn with_max_memory_bytes(mut self, bytes: u64) -> Self {
596        self.max_memory_bytes = Some(bytes);
597        self
598    }
599
600    /// Get maximum width, if set.
601    pub fn max_width(&self) -> Option<u64> {
602        self.max_width
603    }
604
605    /// Get maximum height, if set.
606    pub fn max_height(&self) -> Option<u64> {
607        self.max_height
608    }
609
610    /// Get maximum pixels, if set.
611    pub fn max_pixels(&self) -> Option<u64> {
612        self.max_pixels
613    }
614
615    /// Get maximum memory bytes, if set.
616    pub fn max_memory_bytes(&self) -> Option<u64> {
617        self.max_memory_bytes
618    }
619}
620
621// ── Animation ──────────────────────────────────────────────────────────────
622
623/// Animation timing parameters.
624#[derive(Clone, Debug)]
625pub struct AnimationParams {
626    /// Ticks per second numerator (default 100 = 10ms precision).
627    pub tps_numerator: u32,
628    /// Ticks per second denominator (default 1).
629    pub tps_denominator: u32,
630    /// Number of loops: 0 = infinite (default), >0 = play N times.
631    pub num_loops: u32,
632}
633
634impl Default for AnimationParams {
635    fn default() -> Self {
636        Self {
637            tps_numerator: 100,
638            tps_denominator: 1,
639            num_loops: 0,
640        }
641    }
642}
643
644/// A single frame in an animation sequence.
645pub struct AnimationFrame<'a> {
646    /// Raw pixel data (must match width/height/layout from the encode call).
647    pub pixels: &'a [u8],
648    /// Duration of this frame in ticks (tps_numerator/tps_denominator seconds per tick).
649    pub duration: u32,
650}
651
652// ── LosslessConfig ──────────────────────────────────────────────────────────
653
654/// Lossless (modular) encoding configuration.
655///
656/// Has a sensible `Default` — lossless has no quality ambiguity.
657#[derive(Clone, Debug)]
658pub struct LosslessConfig {
659    effort: u8,
660    mode: EncoderMode,
661    use_ans: bool,
662    squeeze: bool,
663    tree_learning: bool,
664    lz77: bool,
665    lz77_method: Lz77Method,
666    patches: bool,
667    lossy_palette: bool,
668    threads: usize,
669}
670
671impl Default for LosslessConfig {
672    fn default() -> Self {
673        Self::with_effort_level(7)
674    }
675}
676
677impl LosslessConfig {
678    fn with_effort_level(effort: u8) -> Self {
679        let profile = crate::effort::EffortProfile::lossless(effort, EncoderMode::Reference);
680        Self {
681            effort: profile.effort,
682            mode: EncoderMode::Reference,
683            use_ans: profile.use_ans,
684            tree_learning: profile.tree_learning,
685            squeeze: false, // squeeze hurts even with tree learning (14-62% larger on both photos and screenshots)
686            lz77: profile.lz77,
687            lz77_method: profile.lz77_method,
688            patches: profile.patches,
689            lossy_palette: false,
690            threads: 0,
691        }
692    }
693
694    /// Create a new lossless config with defaults (effort 7).
695    pub fn new() -> Self {
696        Self::default()
697    }
698
699    /// Set effort level (1–10). Higher effort = slower, better compression.
700    ///
701    /// This adjusts all effort-dependent defaults:
702    /// - **e1–3**: Huffman encoding
703    /// - **e4–6**: + ANS entropy coding
704    /// - **e7**: + content-adaptive tree learning, LZ77 RLE
705    /// - **e8**: + LZ77 greedy hash chain
706    /// - **e9–10**: + LZ77 optimal (Viterbi DP)
707    ///
708    /// Individual `with_*()` calls after `with_effort()` override these defaults.
709    pub fn with_effort(self, effort: u8) -> Self {
710        let mut new = Self::with_effort_level(effort);
711        // Preserve settings that aren't effort-derived
712        new.mode = self.mode;
713        new.squeeze = self.squeeze;
714        new
715    }
716
717    /// Set encoder mode (default: [`EncoderMode::Reference`]).
718    ///
719    /// `Reference` matches libjxl's algorithm choices for comparable output.
720    /// `Experimental` enables encoder-specific improvements.
721    pub fn with_mode(mut self, mode: EncoderMode) -> Self {
722        self.mode = mode;
723        self
724    }
725
726    /// Current encoder mode.
727    pub fn mode(&self) -> EncoderMode {
728        self.mode
729    }
730
731    /// Enable/disable patches (dictionary-based repeated pattern detection).
732    /// Default: true at effort >= 5. Huge wins on screenshots, zero cost on photos.
733    pub fn with_patches(mut self, enable: bool) -> Self {
734        self.patches = enable;
735        self
736    }
737
738    /// Enable/disable ANS entropy coding (default: true).
739    pub fn with_ans(mut self, enable: bool) -> Self {
740        self.use_ans = enable;
741        self
742    }
743
744    /// Enable/disable squeeze (Haar wavelet) transform (default: false).
745    ///
746    /// Squeeze is disabled by default because tree learning provides better
747    /// compression on both photos and screenshots. Squeeze can still be
748    /// enabled via `.with_squeeze(true)` for experimentation.
749    pub fn with_squeeze(mut self, enable: bool) -> Self {
750        self.squeeze = enable;
751        self
752    }
753
754    /// Enable/disable content-adaptive tree learning (default: false).
755    pub fn with_tree_learning(mut self, enable: bool) -> Self {
756        self.tree_learning = enable;
757        self
758    }
759
760    /// Enable/disable LZ77 backward references (default: false).
761    pub fn with_lz77(mut self, enable: bool) -> Self {
762        self.lz77 = enable;
763        self
764    }
765
766    /// Set LZ77 method (default: Greedy). Only effective when LZ77 is enabled.
767    pub fn with_lz77_method(mut self, method: Lz77Method) -> Self {
768        self.lz77_method = method;
769        self
770    }
771
772    /// Enable/disable lossy delta palette (default: false).
773    ///
774    /// When enabled, uses quantized palette with delta entries and error diffusion
775    /// for near-lossless encoding. This is NOT pixel-exact — it trades some color
776    /// accuracy for significantly smaller files on images with many colors.
777    /// Matching libjxl's modular lossy palette mode.
778    pub fn with_lossy_palette(mut self, enable: bool) -> Self {
779        self.lossy_palette = enable;
780        self
781    }
782
783    /// Set thread count for parallel encoding.
784    ///
785    /// - `0` (default): use the ambient rayon pool. The caller can control
786    ///   thread count by wrapping the encode call in `pool.install(|| ...)`.
787    /// - `1`: force sequential encoding (no rayon).
788    /// - `N >= 2`: create a dedicated N-thread pool for this encode.
789    ///
790    /// Requires the `parallel` feature. When `parallel` is not enabled,
791    /// this value is ignored and encoding is always sequential.
792    pub fn with_threads(mut self, threads: usize) -> Self {
793        self.threads = threads;
794        self
795    }
796
797    // ── Getters ───────────────────────────────────────────────────────
798
799    /// Current effort level.
800    pub fn effort(&self) -> u8 {
801        self.effort
802    }
803
804    /// Whether ANS entropy coding is enabled.
805    pub fn ans(&self) -> bool {
806        self.use_ans
807    }
808
809    /// Whether squeeze (Haar wavelet) transform is enabled.
810    pub fn squeeze(&self) -> bool {
811        self.squeeze
812    }
813
814    /// Whether content-adaptive tree learning is enabled.
815    pub fn tree_learning(&self) -> bool {
816        self.tree_learning
817    }
818
819    /// Whether LZ77 backward references are enabled.
820    pub fn lz77(&self) -> bool {
821        self.lz77
822    }
823
824    /// Current LZ77 method.
825    pub fn lz77_method(&self) -> Lz77Method {
826        self.lz77_method
827    }
828
829    /// Whether patches (dictionary-based repeated pattern detection) are enabled.
830    pub fn patches(&self) -> bool {
831        self.patches
832    }
833
834    /// Whether lossy delta palette is enabled.
835    pub fn lossy_palette(&self) -> bool {
836        self.lossy_palette
837    }
838
839    /// Thread count (0 = auto, 1 = sequential).
840    pub fn threads(&self) -> usize {
841        self.threads
842    }
843
844    // ── Request / fluent encode ─────────────────────────────────────
845
846    /// Create an encode request for an image with this config.
847    ///
848    /// Use this when you need to attach metadata, limits, or cancellation.
849    pub fn encode_request(
850        &self,
851        width: u32,
852        height: u32,
853        layout: PixelLayout,
854    ) -> EncodeRequest<'_> {
855        EncodeRequest {
856            config: ConfigRef::Lossless(self),
857            width,
858            height,
859            layout,
860            metadata: None,
861            limits: None,
862            stop: None,
863            source_gamma: None,
864            color_encoding: None,
865        }
866    }
867
868    /// Encode pixels directly with this config. Shortcut for simple cases.
869    ///
870    /// ```rust,no_run
871    /// # let pixels = vec![0u8; 100 * 100 * 3];
872    /// let jxl = jxl_encoder::LosslessConfig::new()
873    ///     .encode(&pixels, 100, 100, jxl_encoder::PixelLayout::Rgb8)?;
874    /// # Ok::<_, jxl_encoder::At<jxl_encoder::EncodeError>>(())
875    /// ```
876    #[track_caller]
877    pub fn encode(
878        &self,
879        pixels: &[u8],
880        width: u32,
881        height: u32,
882        layout: PixelLayout,
883    ) -> Result<Vec<u8>> {
884        self.encode_request(width, height, layout).encode(pixels)
885    }
886
887    /// Encode pixels, appending to an existing buffer.
888    #[track_caller]
889    pub fn encode_into(
890        &self,
891        pixels: &[u8],
892        width: u32,
893        height: u32,
894        layout: PixelLayout,
895        out: &mut Vec<u8>,
896    ) -> Result<()> {
897        self.encode_request(width, height, layout)
898            .encode_into(pixels, out)
899            .map(|_| ())
900    }
901
902    /// Encode a multi-frame animation as a lossless JXL.
903    ///
904    /// Each frame must have the same dimensions and pixel layout.
905    /// Returns the complete JXL codestream bytes.
906    #[track_caller]
907    pub fn encode_animation(
908        &self,
909        width: u32,
910        height: u32,
911        layout: PixelLayout,
912        animation: &AnimationParams,
913        frames: &[AnimationFrame<'_>],
914    ) -> Result<Vec<u8>> {
915        encode_animation_lossless(self, width, height, layout, animation, frames).map_err(at)
916    }
917}
918
919// ── EncoderMode ──────────────────────────────────────────────────────────────
920
921/// Controls whether the encoder matches libjxl's algorithm choices or uses
922/// its own improvements.
923///
924/// Both modes produce valid JPEG XL bitstreams decodable by any conformant
925/// decoder. The difference is in *encoder-side* decisions: strategy selection
926/// heuristics, cost models, entropy coding parameters, tree learning, etc.
927#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
928pub enum EncoderMode {
929    /// Match libjxl's algorithm choices at the configured effort level.
930    ///
931    /// Output is statistically equivalent to `cjxl` at the same effort and
932    /// distance — same RD curve within measurement noise. Use this when
933    /// comparing against libjxl or when reproducibility matters.
934    #[default]
935    Reference,
936
937    /// Use encoder-specific improvements and research features.
938    ///
939    /// May produce better rate-distortion performance than libjxl at the
940    /// same effort level, but output will differ. Use this for production
941    /// encoding where quality per byte is the goal.
942    Experimental,
943}
944
945// ── ProgressiveMode ──────────────────────────────────────────────────────────
946
947/// Progressive encoding mode for VarDCT.
948///
949/// Progressive encoding splits AC coefficients across multiple passes by
950/// reducing precision. Decoders can render a coarse preview after early passes,
951/// improving user experience for web delivery.
952///
953/// The shift mechanism works by right-shifting quantized coefficients before
954/// encoding in early passes. The decoder left-shifts and accumulates, so the
955/// final result is exact (lossless reconstruction of the quantized coefficients).
956#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
957pub enum ProgressiveMode {
958    /// Single pass (default). No progressive rendering.
959    #[default]
960    Single,
961    /// 2-pass quantized progressive.
962    ///
963    /// - Pass 0: All AC coefficients right-shifted by 1 bit (coarse)
964    /// - Pass 1: Residual at full precision
965    ///
966    /// Provides quick 2x-downsampled preview, then full quality refinement.
967    QuantizedAcFullAc,
968    /// 3-pass progressive (DC/VLF → LF → Full AC).
969    ///
970    /// - Pass 0: All AC coefficients right-shifted by 2 bits (very coarse, 8x downsample hint)
971    /// - Pass 1: Residual right-shifted by 1 bit (medium, 4x downsample hint)
972    /// - Pass 2: Final residual at full precision
973    ///
974    /// Provides staged refinement: blurry preview → sharper → final.
975    DcVlfLfAc,
976}
977
978// ── LossyConfig ─────────────────────────────────────────────────────────────
979
980/// Lossy (VarDCT) encoding configuration.
981///
982/// No `Default` — distance/quality is a required choice.
983#[derive(Clone, Debug)]
984pub struct LossyConfig {
985    distance: f32,
986    effort: u8,
987    mode: EncoderMode,
988    use_ans: bool,
989    gaborish: bool,
990    noise: bool,
991    denoise: bool,
992    error_diffusion: bool,
993    pixel_domain_loss: bool,
994    lz77: bool,
995    lz77_method: Lz77Method,
996    force_strategy: Option<u8>,
997    max_strategy_size: Option<u8>,
998    patches: bool,
999    splines: Option<Vec<crate::vardct::splines::Spline>>,
1000    progressive: ProgressiveMode,
1001    lf_frame: bool,
1002    #[cfg(feature = "butteraugli-loop")]
1003    butteraugli_iters: u32,
1004    #[cfg(feature = "butteraugli-loop")]
1005    butteraugli_iters_explicit: bool,
1006    #[cfg(feature = "ssim2-loop")]
1007    ssim2_iters: u32,
1008    #[cfg(feature = "zensim-loop")]
1009    zensim_iters: u32,
1010    threads: usize,
1011}
1012
1013impl LossyConfig {
1014    /// Create with butteraugli distance (1.0 = high quality). Default effort 7.
1015    pub fn new(distance: f32) -> Self {
1016        Self::new_with_effort(distance, 7)
1017    }
1018
1019    fn new_with_effort(distance: f32, effort: u8) -> Self {
1020        let profile = crate::effort::EffortProfile::lossy(effort, EncoderMode::Reference);
1021        Self {
1022            distance,
1023            effort: profile.effort,
1024            mode: EncoderMode::Reference,
1025            use_ans: profile.use_ans,
1026            gaborish: profile.gaborish,
1027            noise: false,
1028            denoise: false,
1029            error_diffusion: profile.error_diffusion,
1030            pixel_domain_loss: profile.pixel_domain_loss,
1031            lz77: profile.lz77,
1032            lz77_method: profile.lz77_method,
1033            force_strategy: None,
1034            max_strategy_size: None,
1035            patches: profile.patches,
1036            splines: None,
1037            progressive: ProgressiveMode::Single,
1038            lf_frame: false,
1039            #[cfg(feature = "butteraugli-loop")]
1040            butteraugli_iters: profile.butteraugli_iters,
1041            #[cfg(feature = "butteraugli-loop")]
1042            butteraugli_iters_explicit: false,
1043            #[cfg(feature = "ssim2-loop")]
1044            ssim2_iters: 0,
1045            #[cfg(feature = "zensim-loop")]
1046            zensim_iters: 0,
1047            threads: 0,
1048        }
1049    }
1050
1051    /// Create from a [`Quality`] specification.
1052    pub fn from_quality(quality: Quality) -> core::result::Result<Self, EncodeError> {
1053        let distance = quality.to_distance()?;
1054        Ok(Self::new(distance))
1055    }
1056
1057    /// Set effort level (1–10). Higher effort = slower, better compression.
1058    ///
1059    /// This adjusts all effort-dependent defaults:
1060    /// - **e1–3**: DCT8 only, Huffman, no gaborish/patches/butteraugli
1061    /// - **e4**: + ANS entropy coding, custom coefficient orders
1062    /// - **e5**: + gaborish, pixel-domain loss, AC strategy search, AdjustQuantBlockAC
1063    /// - **e6**: + DCT4x8/AFV strategies, non-aligned eval, EPF dynamic sharpness
1064    /// - **e7**: + patches, error diffusion, CfL two-pass, LZ77 RLE, DCT64 strategies
1065    /// - **e8**: + butteraugli loop (2 iters), LZ77 greedy, WP param search (2 modes)
1066    /// - **e9–10**: + LZ77 optimal (Viterbi DP), 4 butteraugli iters, WP search (5 modes)
1067    ///
1068    /// Individual `with_*()` calls after `with_effort()` override these defaults.
1069    pub fn with_effort(self, effort: u8) -> Self {
1070        let mut new = Self::new_with_effort(self.distance, effort);
1071        // Preserve settings that are never effort-derived (always opt-in)
1072        new.mode = self.mode;
1073        new.noise = self.noise;
1074        new.denoise = self.denoise;
1075        new.force_strategy = self.force_strategy;
1076        new.max_strategy_size = self.max_strategy_size;
1077        new.splines = self.splines;
1078        new.progressive = self.progressive;
1079        // Preserve explicit butteraugli override
1080        #[cfg(feature = "butteraugli-loop")]
1081        if self.butteraugli_iters_explicit {
1082            new.butteraugli_iters = self.butteraugli_iters;
1083            new.butteraugli_iters_explicit = true;
1084        }
1085        #[cfg(feature = "ssim2-loop")]
1086        {
1087            new.ssim2_iters = self.ssim2_iters;
1088        }
1089        #[cfg(feature = "zensim-loop")]
1090        {
1091            new.zensim_iters = self.zensim_iters;
1092        }
1093        new
1094    }
1095
1096    /// Set encoder mode (default: [`EncoderMode::Reference`]).
1097    ///
1098    /// `Reference` matches libjxl's algorithm choices for comparable output.
1099    /// `Experimental` enables encoder-specific improvements.
1100    pub fn with_mode(mut self, mode: EncoderMode) -> Self {
1101        self.mode = mode;
1102        self
1103    }
1104
1105    /// Current encoder mode.
1106    pub fn mode(&self) -> EncoderMode {
1107        self.mode
1108    }
1109
1110    /// Enable/disable ANS entropy coding (default: true).
1111    pub fn with_ans(mut self, enable: bool) -> Self {
1112        self.use_ans = enable;
1113        self
1114    }
1115
1116    /// Enable/disable gaborish inverse pre-filter (default: true).
1117    pub fn with_gaborish(mut self, enable: bool) -> Self {
1118        self.gaborish = enable;
1119        self
1120    }
1121
1122    /// Enable/disable noise synthesis (default: false).
1123    pub fn with_noise(mut self, enable: bool) -> Self {
1124        self.noise = enable;
1125        self
1126    }
1127
1128    /// Enable/disable Wiener denoising pre-filter (default: false). Implies noise.
1129    pub fn with_denoise(mut self, enable: bool) -> Self {
1130        self.denoise = enable;
1131        if enable {
1132            self.noise = true;
1133        }
1134        self
1135    }
1136
1137    /// Enable/disable error diffusion in AC quantization (default: false).
1138    ///
1139    /// Error diffusion propagates 1/4 of the quantization error to the next
1140    /// coefficient in zigzag order. Note: libjxl's `QuantizeBlockAC` accepts
1141    /// this parameter but never references it — the feature is effectively a
1142    /// no-op in the reference encoder. Our implementation actually performs
1143    /// the diffusion, which can hurt quality on certain content (bright features
1144    /// in dark regions), especially when combined with gaborish.
1145    pub fn with_error_diffusion(mut self, enable: bool) -> Self {
1146        self.error_diffusion = enable;
1147        self
1148    }
1149
1150    /// Enable/disable pixel-domain loss in strategy selection (default: true).
1151    pub fn with_pixel_domain_loss(mut self, enable: bool) -> Self {
1152        self.pixel_domain_loss = enable;
1153        self
1154    }
1155
1156    /// Enable/disable LZ77 backward references (default: false).
1157    pub fn with_lz77(mut self, enable: bool) -> Self {
1158        self.lz77 = enable;
1159        self
1160    }
1161
1162    /// Set LZ77 method (default: Greedy).
1163    pub fn with_lz77_method(mut self, method: Lz77Method) -> Self {
1164        self.lz77_method = method;
1165        self
1166    }
1167
1168    /// Force a specific AC strategy for all blocks. `None` for auto-selection.
1169    pub fn with_force_strategy(mut self, strategy: Option<u8>) -> Self {
1170        self.force_strategy = strategy;
1171        self
1172    }
1173
1174    /// Limit the maximum AC strategy transform size.
1175    ///
1176    /// Controls the largest DCT transform the encoder will consider:
1177    /// - `8`: Only 8×8-class transforms (DCT8, DCT4x4, DCT4x8, AFV, IDENTITY, DCT2x2)
1178    /// - `16`: Up to 16×16 (adds DCT16x16, DCT16x8, DCT8x16)
1179    /// - `32`: Up to 32×32 (adds DCT32x32, DCT32x16, DCT16x32)
1180    /// - `64`: No restriction (adds DCT64x64, DCT64x32, DCT32x64) — the default
1181    ///
1182    /// `None` means no restriction (same as `64`). Values are clamped to the
1183    /// nearest valid size.
1184    pub fn with_max_strategy_size(mut self, size: Option<u8>) -> Self {
1185        self.max_strategy_size = size;
1186        self
1187    }
1188
1189    /// Enable/disable patches (dictionary-based repeated pattern detection).
1190    /// Default: true. Huge wins on screenshots, zero cost on photos.
1191    pub fn with_patches(mut self, enable: bool) -> Self {
1192        self.patches = enable;
1193        self
1194    }
1195
1196    /// Set manual splines to overlay on the image.
1197    ///
1198    /// Splines are Gaussian-blurred parametric curves overlaid additively.
1199    /// They encode thin features (power lines, horizons) efficiently.
1200    /// The encoder subtracts splines from XYB before VarDCT; the decoder
1201    /// adds them back after reconstruction. Default: `None`.
1202    pub fn with_splines(mut self, splines: Vec<crate::vardct::splines::Spline>) -> Self {
1203        self.splines = Some(splines);
1204        self
1205    }
1206
1207    /// Set progressive encoding mode (default: Single = no progressive).
1208    ///
1209    /// Progressive encoding splits AC coefficients across multiple passes,
1210    /// allowing decoders to render coarse previews before the full file is received.
1211    pub fn with_progressive(mut self, mode: ProgressiveMode) -> Self {
1212        self.progressive = mode;
1213        self
1214    }
1215
1216    /// Enable LfFrame (separate DC frame).
1217    ///
1218    /// When true, DC coefficients are encoded as a separate modular frame
1219    /// before the main VarDCT frame, matching libjxl's `progressive_dc >= 1`.
1220    pub fn with_lf_frame(mut self, enable: bool) -> Self {
1221        self.lf_frame = enable;
1222        self
1223    }
1224
1225    /// Set butteraugli quantization loop iterations explicitly.
1226    ///
1227    /// Overrides the automatic effort-based default (effort 7: 0, effort 8: 2, effort 9+: 4).
1228    /// Requires the `butteraugli-loop` feature.
1229    #[cfg(feature = "butteraugli-loop")]
1230    pub fn with_butteraugli_iters(mut self, n: u32) -> Self {
1231        self.butteraugli_iters = n;
1232        self.butteraugli_iters_explicit = true;
1233        self
1234    }
1235
1236    /// Set SSIM2 quantization loop iterations.
1237    ///
1238    /// Alternative to butteraugli loop: uses per-block linear RGB RMSE + full-image SSIM2.
1239    /// Requires the `ssim2-loop` feature.
1240    #[cfg(feature = "ssim2-loop")]
1241    pub fn with_ssim2_iters(mut self, n: u32) -> Self {
1242        self.ssim2_iters = n;
1243        self
1244    }
1245
1246    /// Set zensim quantization loop iterations.
1247    ///
1248    /// Alternative to butteraugli loop: uses zensim's psychovisual metric for
1249    /// both global quality tracking and per-pixel spatial error map (diffmap in XYB space).
1250    /// Also refines AC strategy by splitting large transforms with high perceptual error.
1251    /// Can stack with butteraugli loop (butteraugli runs first, then zensim fine-tunes).
1252    /// Requires the `zensim-loop` feature.
1253    #[cfg(feature = "zensim-loop")]
1254    pub fn with_zensim_iters(mut self, n: u32) -> Self {
1255        self.zensim_iters = n;
1256        self
1257    }
1258
1259    /// Set thread count for parallel encoding.
1260    ///
1261    /// - `0` (default): use the ambient rayon pool. The caller can control
1262    ///   thread count by wrapping the encode call in `pool.install(|| ...)`.
1263    /// - `1`: force sequential encoding (no rayon).
1264    /// - `N >= 2`: create a dedicated N-thread pool for this encode.
1265    ///
1266    /// Requires the `parallel` feature. When `parallel` is not enabled,
1267    /// this value is ignored and encoding is always sequential.
1268    pub fn with_threads(mut self, threads: usize) -> Self {
1269        self.threads = threads;
1270        self
1271    }
1272
1273    // ── Getters ───────────────────────────────────────────────────────
1274
1275    /// Current butteraugli distance.
1276    pub fn distance(&self) -> f32 {
1277        self.distance
1278    }
1279
1280    /// Current effort level.
1281    pub fn effort(&self) -> u8 {
1282        self.effort
1283    }
1284
1285    /// Whether ANS entropy coding is enabled.
1286    pub fn ans(&self) -> bool {
1287        self.use_ans
1288    }
1289
1290    /// Whether gaborish inverse pre-filter is enabled.
1291    pub fn gaborish(&self) -> bool {
1292        self.gaborish
1293    }
1294
1295    /// Whether noise synthesis is enabled.
1296    pub fn noise(&self) -> bool {
1297        self.noise
1298    }
1299
1300    /// Whether Wiener denoising pre-filter is enabled.
1301    pub fn denoise(&self) -> bool {
1302        self.denoise
1303    }
1304
1305    /// Whether error diffusion in AC quantization is enabled.
1306    pub fn error_diffusion(&self) -> bool {
1307        self.error_diffusion
1308    }
1309
1310    /// Whether pixel-domain loss is enabled.
1311    pub fn pixel_domain_loss(&self) -> bool {
1312        self.pixel_domain_loss
1313    }
1314
1315    /// Whether LZ77 backward references are enabled.
1316    pub fn lz77(&self) -> bool {
1317        self.lz77
1318    }
1319
1320    /// Current LZ77 method.
1321    pub fn lz77_method(&self) -> Lz77Method {
1322        self.lz77_method
1323    }
1324
1325    /// Forced AC strategy, if any.
1326    pub fn force_strategy(&self) -> Option<u8> {
1327        self.force_strategy
1328    }
1329
1330    /// Maximum AC strategy transform size, if set.
1331    pub fn max_strategy_size(&self) -> Option<u8> {
1332        self.max_strategy_size
1333    }
1334
1335    /// Current progressive mode.
1336    pub fn progressive(&self) -> ProgressiveMode {
1337        self.progressive
1338    }
1339
1340    /// Whether LfFrame (separate DC frame) is enabled.
1341    pub fn lf_frame(&self) -> bool {
1342        self.lf_frame
1343    }
1344
1345    /// Butteraugli quantization loop iterations.
1346    #[cfg(feature = "butteraugli-loop")]
1347    pub fn butteraugli_iters(&self) -> u32 {
1348        self.butteraugli_iters
1349    }
1350
1351    /// Thread count (0 = auto, 1 = sequential).
1352    pub fn threads(&self) -> usize {
1353        self.threads
1354    }
1355
1356    // ── Request / fluent encode ─────────────────────────────────────
1357
1358    /// Create an encode request for an image with this config.
1359    ///
1360    /// Use this when you need to attach metadata, limits, or cancellation.
1361    pub fn encode_request(
1362        &self,
1363        width: u32,
1364        height: u32,
1365        layout: PixelLayout,
1366    ) -> EncodeRequest<'_> {
1367        EncodeRequest {
1368            config: ConfigRef::Lossy(self),
1369            width,
1370            height,
1371            layout,
1372            metadata: None,
1373            limits: None,
1374            stop: None,
1375            source_gamma: None,
1376            color_encoding: None,
1377        }
1378    }
1379
1380    /// Encode pixels directly with this config. Shortcut for simple cases.
1381    ///
1382    /// ```rust,no_run
1383    /// # let pixels = vec![0u8; 100 * 100 * 3];
1384    /// let jxl = jxl_encoder::LossyConfig::new(1.0)
1385    ///     .encode(&pixels, 100, 100, jxl_encoder::PixelLayout::Rgb8)?;
1386    /// # Ok::<_, jxl_encoder::At<jxl_encoder::EncodeError>>(())
1387    /// ```
1388    #[track_caller]
1389    pub fn encode(
1390        &self,
1391        pixels: &[u8],
1392        width: u32,
1393        height: u32,
1394        layout: PixelLayout,
1395    ) -> Result<Vec<u8>> {
1396        self.encode_request(width, height, layout).encode(pixels)
1397    }
1398
1399    /// Encode pixels, appending to an existing buffer.
1400    #[track_caller]
1401    pub fn encode_into(
1402        &self,
1403        pixels: &[u8],
1404        width: u32,
1405        height: u32,
1406        layout: PixelLayout,
1407        out: &mut Vec<u8>,
1408    ) -> Result<()> {
1409        self.encode_request(width, height, layout)
1410            .encode_into(pixels, out)
1411            .map(|_| ())
1412    }
1413
1414    /// Encode a multi-frame animation as a lossy JXL.
1415    ///
1416    /// Each frame must have the same dimensions and pixel layout.
1417    /// Returns the complete JXL codestream bytes.
1418    #[track_caller]
1419    pub fn encode_animation(
1420        &self,
1421        width: u32,
1422        height: u32,
1423        layout: PixelLayout,
1424        animation: &AnimationParams,
1425        frames: &[AnimationFrame<'_>],
1426    ) -> Result<Vec<u8>> {
1427        encode_animation_lossy(self, width, height, layout, animation, frames).map_err(at)
1428    }
1429}
1430
1431// ── EncodeRequest ───────────────────────────────────────────────────────────
1432
1433/// Internal config reference (lossy or lossless).
1434#[derive(Clone, Copy, Debug)]
1435enum ConfigRef<'a> {
1436    Lossless(&'a LosslessConfig),
1437    Lossy(&'a LossyConfig),
1438}
1439
1440/// An encoding request — binds config + image dimensions + pixel layout.
1441///
1442/// Created via [`LosslessConfig::encode_request`] or [`LossyConfig::encode_request`].
1443pub struct EncodeRequest<'a> {
1444    config: ConfigRef<'a>,
1445    width: u32,
1446    height: u32,
1447    layout: PixelLayout,
1448    metadata: Option<&'a ImageMetadata<'a>>,
1449    limits: Option<&'a Limits>,
1450    stop: Option<&'a dyn Stop>,
1451    source_gamma: Option<f32>,
1452    color_encoding: Option<crate::headers::color_encoding::ColorEncoding>,
1453}
1454
1455impl<'a> EncodeRequest<'a> {
1456    /// Attach image metadata (ICC, EXIF, XMP).
1457    pub fn with_metadata(mut self, meta: &'a ImageMetadata<'a>) -> Self {
1458        self.metadata = Some(meta);
1459        self
1460    }
1461
1462    /// Attach resource limits.
1463    pub fn with_limits(mut self, limits: &'a Limits) -> Self {
1464        self.limits = Some(limits);
1465        self
1466    }
1467
1468    /// Attach a cooperative cancellation token.
1469    ///
1470    /// The encoder will check this periodically and return
1471    /// [`EncodeError::Cancelled`] if stopped.
1472    pub fn with_stop(mut self, stop: &'a dyn Stop) -> Self {
1473        self.stop = Some(stop);
1474        self
1475    }
1476
1477    /// Specify that source pixels use a custom gamma transfer function.
1478    ///
1479    /// When set, the encoder linearizes u8/u16 pixels with `pixel ^ (1/gamma)`
1480    /// instead of the sRGB transfer function, and writes `have_gamma=true` in
1481    /// the JXL header. This matches cjxl's behavior for PNGs with gAMA chunks.
1482    ///
1483    /// Example: `0.45455` for standard gamma 2.2 encoding (gAMA=45455).
1484    pub fn with_source_gamma(mut self, gamma: f32) -> Self {
1485        self.source_gamma = Some(gamma);
1486        self
1487    }
1488
1489    /// Override the color encoding written to the JXL header.
1490    ///
1491    /// When set, this color encoding is used instead of the default (sRGB for
1492    /// u8/u16, linear sRGB for f32) or any gamma derived from
1493    /// [`with_source_gamma`](Self::with_source_gamma).
1494    ///
1495    /// Use this for HDR content (PQ, HLG) or non-sRGB primaries (BT.2020, Display P3).
1496    ///
1497    /// Note: this only affects the signaled color encoding in the JXL header.
1498    /// Pixel linearization for lossy encoding is still controlled by
1499    /// `with_source_gamma()`. For float input, pixels are assumed already linear.
1500    pub fn with_color_encoding(
1501        mut self,
1502        ce: crate::headers::color_encoding::ColorEncoding,
1503    ) -> Self {
1504        self.color_encoding = Some(ce);
1505        self
1506    }
1507
1508    /// Encode pixels and return the JXL bytes.
1509    #[track_caller]
1510    pub fn encode(self, pixels: &[u8]) -> Result<Vec<u8>> {
1511        self.encode_inner(pixels)
1512            .map(|mut r| r.take_data().unwrap())
1513            .map_err(at)
1514    }
1515
1516    /// Encode pixels and return the JXL bytes together with [`EncodeStats`].
1517    #[track_caller]
1518    pub fn encode_with_stats(self, pixels: &[u8]) -> Result<EncodeResult> {
1519        self.encode_inner(pixels).map_err(at)
1520    }
1521
1522    /// Encode pixels, appending to an existing buffer. Returns metrics.
1523    #[track_caller]
1524    pub fn encode_into(self, pixels: &[u8], out: &mut Vec<u8>) -> Result<EncodeResult> {
1525        let mut result = self.encode_inner(pixels).map_err(at)?;
1526        if let Some(data) = result.data.take() {
1527            out.extend_from_slice(&data);
1528        }
1529        Ok(result)
1530    }
1531
1532    /// Encode pixels, writing to a `std::io::Write` destination. Returns metrics.
1533    #[cfg(feature = "std")]
1534    #[track_caller]
1535    pub fn encode_to(self, pixels: &[u8], mut dest: impl std::io::Write) -> Result<EncodeResult> {
1536        let mut result = self.encode_inner(pixels).map_err(at)?;
1537        if let Some(data) = result.data.take() {
1538            dest.write_all(&data)
1539                .map_err(|e| at(EncodeError::from(e)))?;
1540        }
1541        Ok(result)
1542    }
1543
1544    fn encode_inner(&self, pixels: &[u8]) -> core::result::Result<EncodeResult, EncodeError> {
1545        self.validate_pixels(pixels)?;
1546        self.check_limits()?;
1547
1548        let threads = match self.config {
1549            ConfigRef::Lossless(cfg) => cfg.threads,
1550            ConfigRef::Lossy(cfg) => cfg.threads,
1551        };
1552
1553        let (codestream, mut stats) = run_with_threads(threads, || match self.config {
1554            ConfigRef::Lossless(cfg) => self.encode_lossless(cfg, pixels),
1555            ConfigRef::Lossy(cfg) => self.encode_lossy(cfg, pixels),
1556        })?;
1557
1558        stats.codestream_size = codestream.len();
1559
1560        // Wrap in container if metadata (EXIF/XMP) is present
1561        let output = if let Some(meta) = self.metadata
1562            && (meta.exif.is_some() || meta.xmp.is_some())
1563        {
1564            crate::container::wrap_in_container(&codestream, meta.exif, meta.xmp)
1565        } else {
1566            codestream
1567        };
1568
1569        stats.output_size = output.len();
1570
1571        Ok(EncodeResult {
1572            data: Some(output),
1573            stats,
1574        })
1575    }
1576
1577    fn validate_pixels(&self, pixels: &[u8]) -> core::result::Result<(), EncodeError> {
1578        let w = self.width as usize;
1579        let h = self.height as usize;
1580        if w == 0 || h == 0 {
1581            return Err(EncodeError::InvalidInput {
1582                message: format!("zero dimensions: {w}x{h}"),
1583            });
1584        }
1585        // JXL spec limits each dimension to 2^30.
1586        const MAX_JXL_DIM: u32 = 1 << 30;
1587        if self.width > MAX_JXL_DIM || self.height > MAX_JXL_DIM {
1588            return Err(EncodeError::LimitExceeded {
1589                message: format!(
1590                    "image {}x{} exceeds JXL spec maximum of {MAX_JXL_DIM} per dimension",
1591                    self.width, self.height
1592                ),
1593            });
1594        }
1595        let expected = w
1596            .checked_mul(h)
1597            .and_then(|n| n.checked_mul(self.layout.bytes_per_pixel()));
1598        match expected {
1599            Some(expected) if pixels.len() == expected => Ok(()),
1600            Some(expected) => Err(EncodeError::InvalidInput {
1601                message: format!(
1602                    "pixel buffer size mismatch: expected {expected} bytes for {w}x{h} {:?}, got {}",
1603                    self.layout,
1604                    pixels.len()
1605                ),
1606            }),
1607            None => Err(EncodeError::InvalidInput {
1608                message: "image dimensions overflow".into(),
1609            }),
1610        }
1611    }
1612
1613    fn check_limits(&self) -> core::result::Result<(), EncodeError> {
1614        let Some(limits) = self.limits else {
1615            return Ok(());
1616        };
1617        let w = self.width as u64;
1618        let h = self.height as u64;
1619        if let Some(max_w) = limits.max_width
1620            && w > max_w
1621        {
1622            return Err(EncodeError::LimitExceeded {
1623                message: format!("width {w} > max {max_w}"),
1624            });
1625        }
1626        if let Some(max_h) = limits.max_height
1627            && h > max_h
1628        {
1629            return Err(EncodeError::LimitExceeded {
1630                message: format!("height {h} > max {max_h}"),
1631            });
1632        }
1633        if let Some(max_px) = limits.max_pixels
1634            && w * h > max_px
1635        {
1636            return Err(EncodeError::LimitExceeded {
1637                message: format!("pixels {}x{} = {} > max {max_px}", w, h, w * h),
1638            });
1639        }
1640        if let Some(max_mem) = limits.max_memory_bytes {
1641            // Conservative estimate: ~40 bytes per pixel covers XYB (3×f32=12),
1642            // quantization fields, strategy maps, and entropy coding buffers.
1643            let estimated = w.saturating_mul(h).saturating_mul(40);
1644            if estimated > max_mem {
1645                return Err(EncodeError::LimitExceeded {
1646                    message: format!(
1647                        "estimated memory {estimated} bytes > max {max_mem} bytes \
1648                         (for {w}x{h} image)"
1649                    ),
1650                });
1651            }
1652        }
1653        Ok(())
1654    }
1655
1656    // ── Lossless path ───────────────────────────────────────────────────
1657
1658    fn encode_lossless(
1659        &self,
1660        cfg: &LosslessConfig,
1661        pixels: &[u8],
1662    ) -> core::result::Result<(Vec<u8>, EncodeStats), EncodeError> {
1663        use crate::bit_writer::BitWriter;
1664        use crate::headers::color_encoding::ColorSpace;
1665        use crate::headers::{ColorEncoding, FileHeader};
1666        use crate::modular::channel::ModularImage;
1667        use crate::modular::frame::{FrameEncoder, FrameEncoderOptions};
1668
1669        let w = self.width as usize;
1670        let h = self.height as usize;
1671
1672        // Normalize pixels to RGB8 for detection if needed (BGR swap)
1673        let rgb_pixels;
1674        let detection_pixels: &[u8] = match self.layout {
1675            PixelLayout::Bgr8 => {
1676                rgb_pixels = bgr_to_rgb(pixels, 3);
1677                &rgb_pixels
1678            }
1679            PixelLayout::Bgra8 => {
1680                rgb_pixels = bgr_to_rgb(pixels, 4);
1681                &rgb_pixels
1682            }
1683            _ => {
1684                rgb_pixels = Vec::new();
1685                let _ = &rgb_pixels;
1686                pixels
1687            }
1688        };
1689
1690        // Build ModularImage from pixel layout
1691        let mut image = match self.layout {
1692            PixelLayout::Rgb8 => ModularImage::from_rgb8(pixels, w, h),
1693            PixelLayout::Rgba8 => ModularImage::from_rgba8(pixels, w, h),
1694            PixelLayout::Bgr8 => ModularImage::from_rgb8(&bgr_to_rgb(pixels, 3), w, h),
1695            PixelLayout::Bgra8 => ModularImage::from_rgba8(&bgr_to_rgb(pixels, 4), w, h),
1696            PixelLayout::Gray8 => ModularImage::from_gray8(pixels, w, h),
1697            PixelLayout::GrayAlpha8 => ModularImage::from_grayalpha8(pixels, w, h),
1698            PixelLayout::Rgb16 => ModularImage::from_rgb16_native(pixels, w, h),
1699            PixelLayout::Rgba16 => ModularImage::from_rgba16_native(pixels, w, h),
1700            PixelLayout::Gray16 => ModularImage::from_gray16_native(pixels, w, h),
1701            PixelLayout::GrayAlpha16 => ModularImage::from_grayalpha16_native(pixels, w, h),
1702            other => return Err(EncodeError::UnsupportedPixelLayout(other)),
1703        }
1704        .map_err(EncodeError::from)?;
1705
1706        // Detect patches for lossless mode (RGB 8-bit only, non-grayscale)
1707        let num_channels = self.layout.bytes_per_pixel();
1708        let can_use_patches =
1709            cfg.patches && !image.is_grayscale && image.bit_depth <= 8 && num_channels >= 3;
1710        let patches_data = if can_use_patches {
1711            crate::vardct::patches::find_and_build_lossless(
1712                detection_pixels,
1713                w,
1714                h,
1715                num_channels,
1716                image.bit_depth,
1717            )
1718        } else {
1719            None
1720        };
1721
1722        // Build file header
1723        let mut file_header = if image.is_grayscale {
1724            FileHeader::new_gray(self.width, self.height)
1725        } else if image.has_alpha {
1726            FileHeader::new_rgba(self.width, self.height)
1727        } else {
1728            FileHeader::new_rgb(self.width, self.height)
1729        };
1730        if image.bit_depth == 16 {
1731            file_header.metadata.bit_depth = crate::headers::file_header::BitDepth::uint16();
1732            for ec in &mut file_header.metadata.extra_channels {
1733                ec.bit_depth = crate::headers::file_header::BitDepth::uint16();
1734            }
1735        }
1736        if let Some(meta) = self.metadata {
1737            if meta.icc_profile.is_some() {
1738                file_header.metadata.color_encoding.want_icc = true;
1739            }
1740            if let Some(it) = meta.intensity_target {
1741                file_header.metadata.intensity_target = it;
1742            }
1743            if let Some(mn) = meta.min_nits {
1744                file_header.metadata.min_nits = mn;
1745            }
1746            if let Some((w, h)) = meta.intrinsic_size {
1747                file_header.metadata.have_intrinsic_size = true;
1748                file_header.metadata.intrinsic_width = w;
1749                file_header.metadata.intrinsic_height = h;
1750            }
1751        }
1752
1753        // Write codestream
1754        let mut writer = BitWriter::new();
1755        file_header.write(&mut writer).map_err(EncodeError::from)?;
1756        if let Some(meta) = self.metadata
1757            && let Some(icc) = meta.icc_profile
1758        {
1759            crate::icc::write_icc(icc, &mut writer).map_err(EncodeError::from)?;
1760        }
1761        writer.zero_pad_to_byte();
1762
1763        // Write reference frame and subtract patches from image if detected
1764        if let Some(ref pd) = patches_data {
1765            let lossless_profile = crate::effort::EffortProfile::lossless(cfg.effort, cfg.mode);
1766            crate::vardct::patches::encode_reference_frame_rgb(
1767                pd,
1768                image.bit_depth,
1769                cfg.use_ans,
1770                lossless_profile.patch_ref_tree_learning,
1771                &mut writer,
1772            )
1773            .map_err(EncodeError::from)?;
1774            writer.zero_pad_to_byte();
1775            let bd = image.bit_depth;
1776            crate::vardct::patches::subtract_patches_modular(&mut image, pd, bd);
1777        }
1778
1779        // Encode frame
1780        let use_tree_learning = cfg.tree_learning;
1781        let frame_encoder = FrameEncoder::new(
1782            w,
1783            h,
1784            FrameEncoderOptions {
1785                use_modular: true,
1786                effort: cfg.effort,
1787                use_ans: cfg.use_ans,
1788                use_tree_learning,
1789                use_squeeze: cfg.squeeze,
1790                enable_lz77: cfg.lz77,
1791                lz77_method: cfg.lz77_method,
1792                lossy_palette: cfg.lossy_palette,
1793                encoder_mode: cfg.mode,
1794                profile: crate::effort::EffortProfile::lossless(cfg.effort, cfg.mode),
1795                have_animation: false,
1796                duration: 0,
1797                is_last: true,
1798                crop: None,
1799                skip_rct: false,
1800            },
1801        );
1802        let color_encoding = if let Some(ce) = self.color_encoding.clone() {
1803            // Explicit color encoding overrides source_gamma and defaults.
1804            // Adjust for grayscale if needed.
1805            if image.is_grayscale && ce.color_space != ColorSpace::Gray {
1806                ColorEncoding {
1807                    color_space: ColorSpace::Gray,
1808                    ..ce
1809                }
1810            } else {
1811                ce
1812            }
1813        } else if let Some(gamma) = self.source_gamma {
1814            if image.is_grayscale {
1815                ColorEncoding::gray_with_gamma(gamma)
1816            } else {
1817                ColorEncoding::with_gamma(gamma)
1818            }
1819        } else if image.is_grayscale {
1820            ColorEncoding::gray()
1821        } else {
1822            ColorEncoding::srgb()
1823        };
1824        frame_encoder
1825            .encode_modular_with_patches(
1826                &image,
1827                &color_encoding,
1828                &mut writer,
1829                patches_data.as_ref(),
1830            )
1831            .map_err(EncodeError::from)?;
1832
1833        let stats = EncodeStats {
1834            mode: EncodeMode::Lossless,
1835            ans: cfg.use_ans,
1836            ..Default::default()
1837        };
1838        Ok((writer.finish_with_padding(), stats))
1839    }
1840
1841    // ── Lossy path ──────────────────────────────────────────────────────
1842
1843    fn encode_lossy(
1844        &self,
1845        cfg: &LossyConfig,
1846        pixels: &[u8],
1847    ) -> core::result::Result<(Vec<u8>, EncodeStats), EncodeError> {
1848        let w = self.width as usize;
1849        let h = self.height as usize;
1850
1851        // Build linear f32 RGB and extract alpha from input layout.
1852        // Grayscale layouts are expanded to RGB (R=G=B) for VarDCT encoding.
1853        // When source_gamma is set, use gamma linearization instead of sRGB TF.
1854        let gamma = self.source_gamma;
1855        let (linear_rgb, alpha, bit_depth_16) = match self.layout {
1856            PixelLayout::Rgb8 => {
1857                let linear = if let Some(g) = gamma {
1858                    gamma_u8_to_linear_f32(pixels, 3, g)
1859                } else {
1860                    srgb_u8_to_linear_f32(pixels, 3)
1861                };
1862                (linear, None, false)
1863            }
1864            PixelLayout::Bgr8 => {
1865                let rgb = bgr_to_rgb(pixels, 3);
1866                let linear = if let Some(g) = gamma {
1867                    gamma_u8_to_linear_f32(&rgb, 3, g)
1868                } else {
1869                    srgb_u8_to_linear_f32(&rgb, 3)
1870                };
1871                (linear, None, false)
1872            }
1873            PixelLayout::Rgba8 => {
1874                let rgb = if let Some(g) = gamma {
1875                    gamma_u8_to_linear_f32(pixels, 4, g)
1876                } else {
1877                    srgb_u8_to_linear_f32(pixels, 4)
1878                };
1879                let alpha = extract_alpha(pixels, 4, 3);
1880                (rgb, Some(alpha), false)
1881            }
1882            PixelLayout::Bgra8 => {
1883                let swapped = bgr_to_rgb(pixels, 4);
1884                let rgb = if let Some(g) = gamma {
1885                    gamma_u8_to_linear_f32(&swapped, 4, g)
1886                } else {
1887                    srgb_u8_to_linear_f32(&swapped, 4)
1888                };
1889                let alpha = extract_alpha(pixels, 4, 3);
1890                (rgb, Some(alpha), false)
1891            }
1892            PixelLayout::Gray8 => {
1893                let rgb = if let Some(g) = gamma {
1894                    gamma_gray_u8_to_linear_f32_rgb(pixels, 1, g)
1895                } else {
1896                    gray_u8_to_linear_f32_rgb(pixels, 1)
1897                };
1898                (rgb, None, false)
1899            }
1900            PixelLayout::GrayAlpha8 => {
1901                let rgb = if let Some(g) = gamma {
1902                    gamma_gray_u8_to_linear_f32_rgb(pixels, 2, g)
1903                } else {
1904                    gray_u8_to_linear_f32_rgb(pixels, 2)
1905                };
1906                let alpha = extract_alpha(pixels, 2, 1);
1907                (rgb, Some(alpha), false)
1908            }
1909            PixelLayout::Rgb16 => {
1910                let linear = if let Some(g) = gamma {
1911                    gamma_u16_to_linear_f32(pixels, 3, g)
1912                } else {
1913                    srgb_u16_to_linear_f32(pixels, 3)
1914                };
1915                (linear, None, true)
1916            }
1917            PixelLayout::Rgba16 => {
1918                let rgb = if let Some(g) = gamma {
1919                    gamma_u16_to_linear_f32(pixels, 4, g)
1920                } else {
1921                    srgb_u16_to_linear_f32(pixels, 4)
1922                };
1923                let alpha = extract_alpha_u16(pixels, 4, 3);
1924                (rgb, Some(alpha), true)
1925            }
1926            PixelLayout::Gray16 => {
1927                let rgb = if let Some(g) = gamma {
1928                    gamma_gray_u16_to_linear_f32_rgb(pixels, 1, g)
1929                } else {
1930                    gray_u16_to_linear_f32_rgb(pixels, 1)
1931                };
1932                (rgb, None, true)
1933            }
1934            PixelLayout::GrayAlpha16 => {
1935                let rgb = if let Some(g) = gamma {
1936                    gamma_gray_u16_to_linear_f32_rgb(pixels, 2, g)
1937                } else {
1938                    gray_u16_to_linear_f32_rgb(pixels, 2)
1939                };
1940                let alpha = extract_alpha_u16(pixels, 2, 1);
1941                (rgb, Some(alpha), true)
1942            }
1943            PixelLayout::RgbLinearF32 => {
1944                let floats: &[f32] = bytemuck::cast_slice(pixels);
1945                (floats.to_vec(), None, false)
1946            }
1947            PixelLayout::RgbaLinearF32 => {
1948                let floats: &[f32] = bytemuck::cast_slice(pixels);
1949                let rgb: Vec<f32> = floats
1950                    .chunks(4)
1951                    .flat_map(|px| [px[0], px[1], px[2]])
1952                    .collect();
1953                let alpha = extract_alpha_f32(floats, 4, 3);
1954                (rgb, Some(alpha), false)
1955            }
1956            PixelLayout::GrayLinearF32 => {
1957                let floats: &[f32] = bytemuck::cast_slice(pixels);
1958                (gray_f32_to_linear_f32_rgb(floats, 1), None, false)
1959            }
1960            PixelLayout::GrayAlphaLinearF32 => {
1961                let floats: &[f32] = bytemuck::cast_slice(pixels);
1962                let rgb = gray_f32_to_linear_f32_rgb(floats, 2);
1963                let alpha = extract_alpha_f32(floats, 2, 1);
1964                (rgb, Some(alpha), false)
1965            }
1966        };
1967
1968        let mut profile = crate::effort::EffortProfile::lossy(cfg.effort, cfg.mode);
1969
1970        // Apply max_strategy_size to profile flags
1971        if let Some(max_size) = cfg.max_strategy_size {
1972            if max_size < 16 {
1973                profile.try_dct16 = false;
1974            }
1975            if max_size < 32 {
1976                profile.try_dct32 = false;
1977            }
1978            if max_size < 64 {
1979                profile.try_dct64 = false;
1980            }
1981        }
1982
1983        let mut enc = crate::vardct::VarDctEncoder::new(cfg.distance);
1984        enc.effort = cfg.effort;
1985        enc.profile = profile;
1986        enc.use_ans = cfg.use_ans;
1987        enc.optimize_codes = enc.profile.optimize_codes;
1988        enc.custom_orders = enc.profile.custom_orders;
1989        enc.ac_strategy_enabled = enc.profile.ac_strategy_enabled;
1990        enc.enable_noise = cfg.noise;
1991        enc.enable_denoise = cfg.denoise;
1992        // libjxl gates gaborish at distance > 0.5 (enc_frame.cc:281)
1993        enc.enable_gaborish = cfg.gaborish && cfg.distance > 0.5;
1994        enc.error_diffusion = cfg.error_diffusion;
1995        enc.pixel_domain_loss = cfg.pixel_domain_loss;
1996        enc.enable_lz77 = cfg.lz77;
1997        enc.lz77_method = cfg.lz77_method;
1998        enc.force_strategy = cfg.force_strategy;
1999        enc.enable_patches = cfg.patches;
2000        enc.encoder_mode = cfg.mode;
2001        enc.splines = cfg.splines.clone();
2002        enc.is_grayscale = self.layout.is_grayscale();
2003        enc.progressive = cfg.progressive;
2004        enc.use_lf_frame = cfg.lf_frame;
2005        #[cfg(feature = "butteraugli-loop")]
2006        {
2007            enc.butteraugli_iters = cfg.butteraugli_iters;
2008        }
2009        #[cfg(feature = "ssim2-loop")]
2010        {
2011            enc.ssim2_iters = cfg.ssim2_iters;
2012        }
2013        #[cfg(feature = "zensim-loop")]
2014        {
2015            enc.zensim_iters = cfg.zensim_iters;
2016        }
2017
2018        enc.bit_depth_16 = bit_depth_16;
2019        enc.source_gamma = self.source_gamma;
2020        enc.color_encoding = self.color_encoding.clone();
2021
2022        // Tone mapping and intrinsic size from metadata
2023        if let Some(meta) = self.metadata {
2024            if let Some(it) = meta.intensity_target {
2025                enc.intensity_target = it;
2026            }
2027            if let Some(mn) = meta.min_nits {
2028                enc.min_nits = mn;
2029            }
2030            if meta.intrinsic_size.is_some() {
2031                enc.intrinsic_size = meta.intrinsic_size;
2032            }
2033        }
2034
2035        // ICC profile from metadata
2036        if let Some(meta) = self.metadata
2037            && let Some(icc) = meta.icc_profile
2038        {
2039            enc.icc_profile = Some(icc.to_vec());
2040        }
2041
2042        let output = enc
2043            .encode(w, h, &linear_rgb, alpha.as_deref())
2044            .map_err(EncodeError::from)?;
2045
2046        #[cfg(feature = "butteraugli-loop")]
2047        let butteraugli_iters_actual = cfg.butteraugli_iters;
2048        #[cfg(not(feature = "butteraugli-loop"))]
2049        let butteraugli_iters_actual = 0u32;
2050
2051        let stats = EncodeStats {
2052            mode: EncodeMode::Lossy,
2053            strategy_counts: output.strategy_counts,
2054            gaborish: cfg.gaborish,
2055            ans: cfg.use_ans,
2056            butteraugli_iters: butteraugli_iters_actual,
2057            pixel_domain_loss: cfg.pixel_domain_loss,
2058            ..Default::default()
2059        };
2060        Ok((output.data, stats))
2061    }
2062}
2063
2064// ── Streaming Encoders ──────────────────────────────────────────────────────
2065
2066/// Streaming lossy (VarDCT) encoder.
2067///
2068/// Accepts pixel rows incrementally via [`push_rows`](Self::push_rows), then
2069/// encodes on [`finish`](Self::finish). This allows callers to free source pixel
2070/// buffers as rows are pushed, rather than materializing the entire image in
2071/// memory before encoding.
2072///
2073/// ```rust,no_run
2074/// use jxl_encoder::{LossyConfig, PixelLayout};
2075///
2076/// let mut enc = LossyConfig::new(1.0)
2077///     .encoder(800, 600, PixelLayout::Rgb8)?;
2078///
2079/// // Push rows from a streaming source (e.g. PNG decoder)
2080/// # let row_bytes = 800 * 3;
2081/// # let source_rows = vec![0u8; row_bytes * 600];
2082/// for chunk in source_rows.chunks(row_bytes * 100) {
2083///     enc.push_rows(chunk, 100)?;
2084/// }
2085///
2086/// let jxl_bytes = enc.finish()?;
2087/// # Ok::<_, jxl_encoder::At<jxl_encoder::EncodeError>>(())
2088/// ```
2089pub struct LossyEncoder {
2090    cfg: LossyConfig,
2091    width: u32,
2092    height: u32,
2093    layout: PixelLayout,
2094    rows_pushed: u32,
2095    linear_rgb: Vec<f32>,
2096    alpha: Option<Vec<u8>>,
2097    bit_depth_16: bool,
2098    icc_profile: Option<Vec<u8>>,
2099    exif: Option<Vec<u8>>,
2100    xmp: Option<Vec<u8>>,
2101    source_gamma: Option<f32>,
2102    color_encoding: Option<crate::headers::color_encoding::ColorEncoding>,
2103    intensity_target: f32,
2104    min_nits: f32,
2105    intrinsic_size: Option<(u32, u32)>,
2106}
2107
2108impl LossyEncoder {
2109    /// Attach an ICC color profile.
2110    pub fn with_icc_profile(mut self, data: &[u8]) -> Self {
2111        self.icc_profile = Some(data.to_vec());
2112        self
2113    }
2114
2115    /// Attach EXIF data.
2116    pub fn with_exif(mut self, data: &[u8]) -> Self {
2117        self.exif = Some(data.to_vec());
2118        self
2119    }
2120
2121    /// Attach XMP data.
2122    pub fn with_xmp(mut self, data: &[u8]) -> Self {
2123        self.xmp = Some(data.to_vec());
2124        self
2125    }
2126
2127    /// Specify that source pixels use a custom gamma transfer function.
2128    pub fn with_source_gamma(mut self, gamma: f32) -> Self {
2129        self.source_gamma = Some(gamma);
2130        self
2131    }
2132
2133    /// Override the color encoding written to the JXL header.
2134    pub fn with_color_encoding(
2135        mut self,
2136        ce: crate::headers::color_encoding::ColorEncoding,
2137    ) -> Self {
2138        self.color_encoding = Some(ce);
2139        self
2140    }
2141
2142    /// Set the peak display luminance in nits for HDR content.
2143    pub fn with_intensity_target(mut self, nits: f32) -> Self {
2144        self.intensity_target = nits;
2145        self
2146    }
2147
2148    /// Set the minimum display luminance in nits.
2149    pub fn with_min_nits(mut self, nits: f32) -> Self {
2150        self.min_nits = nits;
2151        self
2152    }
2153
2154    /// Set the intrinsic display size.
2155    pub fn with_intrinsic_size(mut self, width: u32, height: u32) -> Self {
2156        self.intrinsic_size = Some((width, height));
2157        self
2158    }
2159
2160    /// Number of rows pushed so far.
2161    pub fn rows_pushed(&self) -> u32 {
2162        self.rows_pushed
2163    }
2164
2165    /// Total expected height.
2166    pub fn height(&self) -> u32 {
2167        self.height
2168    }
2169
2170    /// Push pixel rows into the encoder.
2171    ///
2172    /// `pixels` must contain exactly `width * num_rows * bytes_per_pixel` bytes.
2173    /// Rows are converted to the internal linear f32 format immediately, so the
2174    /// caller can free the source buffer after this call returns.
2175    #[track_caller]
2176    pub fn push_rows(&mut self, pixels: &[u8], num_rows: u32) -> Result<()> {
2177        self.push_rows_inner(pixels, num_rows).map_err(at)
2178    }
2179
2180    fn push_rows_inner(
2181        &mut self,
2182        pixels: &[u8],
2183        num_rows: u32,
2184    ) -> core::result::Result<(), EncodeError> {
2185        if num_rows == 0 {
2186            return Ok(());
2187        }
2188        let remaining = self.height - self.rows_pushed;
2189        if num_rows > remaining {
2190            return Err(EncodeError::InvalidInput {
2191                message: format!(
2192                    "push_rows: {num_rows} rows would exceed image height \
2193                     ({} pushed + {num_rows} > {})",
2194                    self.rows_pushed, self.height
2195                ),
2196            });
2197        }
2198        let w = self.width as usize;
2199        let n = num_rows as usize;
2200        let expected = w
2201            .checked_mul(n)
2202            .and_then(|wn| wn.checked_mul(self.layout.bytes_per_pixel()));
2203        match expected {
2204            Some(expected) if pixels.len() == expected => {}
2205            Some(expected) => {
2206                return Err(EncodeError::InvalidInput {
2207                    message: format!(
2208                        "push_rows: expected {expected} bytes for {w}x{n} {:?}, got {}",
2209                        self.layout,
2210                        pixels.len()
2211                    ),
2212                });
2213            }
2214            None => {
2215                return Err(EncodeError::InvalidInput {
2216                    message: "push_rows: row dimensions overflow".into(),
2217                });
2218            }
2219        }
2220
2221        let gamma = self.source_gamma;
2222
2223        // Convert and append linear RGB
2224        let new_linear: Vec<f32> = match self.layout {
2225            PixelLayout::Rgb8 => {
2226                if let Some(g) = gamma {
2227                    gamma_u8_to_linear_f32(pixels, 3, g)
2228                } else {
2229                    srgb_u8_to_linear_f32(pixels, 3)
2230                }
2231            }
2232            PixelLayout::Bgr8 => {
2233                let rgb = bgr_to_rgb(pixels, 3);
2234                if let Some(g) = gamma {
2235                    gamma_u8_to_linear_f32(&rgb, 3, g)
2236                } else {
2237                    srgb_u8_to_linear_f32(&rgb, 3)
2238                }
2239            }
2240            PixelLayout::Rgba8 => {
2241                if let Some(g) = gamma {
2242                    gamma_u8_to_linear_f32(pixels, 4, g)
2243                } else {
2244                    srgb_u8_to_linear_f32(pixels, 4)
2245                }
2246            }
2247            PixelLayout::Bgra8 => {
2248                let swapped = bgr_to_rgb(pixels, 4);
2249                if let Some(g) = gamma {
2250                    gamma_u8_to_linear_f32(&swapped, 4, g)
2251                } else {
2252                    srgb_u8_to_linear_f32(&swapped, 4)
2253                }
2254            }
2255            PixelLayout::Gray8 => {
2256                if let Some(g) = gamma {
2257                    gamma_gray_u8_to_linear_f32_rgb(pixels, 1, g)
2258                } else {
2259                    gray_u8_to_linear_f32_rgb(pixels, 1)
2260                }
2261            }
2262            PixelLayout::GrayAlpha8 => {
2263                if let Some(g) = gamma {
2264                    gamma_gray_u8_to_linear_f32_rgb(pixels, 2, g)
2265                } else {
2266                    gray_u8_to_linear_f32_rgb(pixels, 2)
2267                }
2268            }
2269            PixelLayout::Rgb16 => {
2270                if let Some(g) = gamma {
2271                    gamma_u16_to_linear_f32(pixels, 3, g)
2272                } else {
2273                    srgb_u16_to_linear_f32(pixels, 3)
2274                }
2275            }
2276            PixelLayout::Rgba16 => {
2277                if let Some(g) = gamma {
2278                    gamma_u16_to_linear_f32(pixels, 4, g)
2279                } else {
2280                    srgb_u16_to_linear_f32(pixels, 4)
2281                }
2282            }
2283            PixelLayout::Gray16 => {
2284                if let Some(g) = gamma {
2285                    gamma_gray_u16_to_linear_f32_rgb(pixels, 1, g)
2286                } else {
2287                    gray_u16_to_linear_f32_rgb(pixels, 1)
2288                }
2289            }
2290            PixelLayout::GrayAlpha16 => {
2291                if let Some(g) = gamma {
2292                    gamma_gray_u16_to_linear_f32_rgb(pixels, 2, g)
2293                } else {
2294                    gray_u16_to_linear_f32_rgb(pixels, 2)
2295                }
2296            }
2297            PixelLayout::RgbLinearF32 => {
2298                let floats: &[f32] = bytemuck::cast_slice(pixels);
2299                floats.to_vec()
2300            }
2301            PixelLayout::RgbaLinearF32 => {
2302                let floats: &[f32] = bytemuck::cast_slice(pixels);
2303                floats
2304                    .chunks(4)
2305                    .flat_map(|px| [px[0], px[1], px[2]])
2306                    .collect()
2307            }
2308            PixelLayout::GrayLinearF32 => {
2309                let floats: &[f32] = bytemuck::cast_slice(pixels);
2310                gray_f32_to_linear_f32_rgb(floats, 1)
2311            }
2312            PixelLayout::GrayAlphaLinearF32 => {
2313                let floats: &[f32] = bytemuck::cast_slice(pixels);
2314                gray_f32_to_linear_f32_rgb(floats, 2)
2315            }
2316        };
2317        self.linear_rgb.extend_from_slice(&new_linear);
2318
2319        // Extract and append alpha
2320        match self.layout {
2321            PixelLayout::Rgba8 | PixelLayout::Bgra8 => {
2322                let new_alpha = extract_alpha(pixels, 4, 3);
2323                self.alpha
2324                    .get_or_insert_with(Vec::new)
2325                    .extend_from_slice(&new_alpha);
2326            }
2327            PixelLayout::GrayAlpha8 => {
2328                let new_alpha = extract_alpha(pixels, 2, 1);
2329                self.alpha
2330                    .get_or_insert_with(Vec::new)
2331                    .extend_from_slice(&new_alpha);
2332            }
2333            PixelLayout::Rgba16 => {
2334                let new_alpha = extract_alpha_u16(pixels, 4, 3);
2335                self.alpha
2336                    .get_or_insert_with(Vec::new)
2337                    .extend_from_slice(&new_alpha);
2338            }
2339            PixelLayout::GrayAlpha16 => {
2340                let new_alpha = extract_alpha_u16(pixels, 2, 1);
2341                self.alpha
2342                    .get_or_insert_with(Vec::new)
2343                    .extend_from_slice(&new_alpha);
2344            }
2345            PixelLayout::RgbaLinearF32 => {
2346                let floats: &[f32] = bytemuck::cast_slice(pixels);
2347                let new_alpha = extract_alpha_f32(floats, 4, 3);
2348                self.alpha
2349                    .get_or_insert_with(Vec::new)
2350                    .extend_from_slice(&new_alpha);
2351            }
2352            PixelLayout::GrayAlphaLinearF32 => {
2353                let floats: &[f32] = bytemuck::cast_slice(pixels);
2354                let new_alpha = extract_alpha_f32(floats, 2, 1);
2355                self.alpha
2356                    .get_or_insert_with(Vec::new)
2357                    .extend_from_slice(&new_alpha);
2358            }
2359            _ => {}
2360        }
2361
2362        self.rows_pushed += num_rows;
2363        Ok(())
2364    }
2365
2366    /// Encode the accumulated pixels and return the JXL bytes.
2367    ///
2368    /// All rows must have been pushed via [`push_rows`](Self::push_rows) before
2369    /// calling this. Returns an error if the image is incomplete.
2370    #[track_caller]
2371    pub fn finish(self) -> Result<Vec<u8>> {
2372        self.finish_inner()
2373            .map(|mut r| r.take_data().unwrap())
2374            .map_err(at)
2375    }
2376
2377    /// Encode and return JXL bytes together with [`EncodeStats`].
2378    #[track_caller]
2379    pub fn finish_with_stats(self) -> Result<EncodeResult> {
2380        self.finish_inner().map_err(at)
2381    }
2382
2383    /// Encode, appending to an existing buffer.
2384    #[track_caller]
2385    pub fn finish_into(self, out: &mut Vec<u8>) -> Result<EncodeResult> {
2386        let mut result = self.finish_inner().map_err(at)?;
2387        if let Some(data) = result.data.take() {
2388            out.extend_from_slice(&data);
2389        }
2390        Ok(result)
2391    }
2392
2393    /// Encode, writing to a `std::io::Write` destination.
2394    #[cfg(feature = "std")]
2395    #[track_caller]
2396    pub fn finish_to(self, mut dest: impl std::io::Write) -> Result<EncodeResult> {
2397        let mut result = self.finish_inner().map_err(at)?;
2398        if let Some(data) = result.data.take() {
2399            dest.write_all(&data)
2400                .map_err(|e| at(EncodeError::from(e)))?;
2401        }
2402        Ok(result)
2403    }
2404
2405    fn finish_inner(self) -> core::result::Result<EncodeResult, EncodeError> {
2406        if self.rows_pushed != self.height {
2407            return Err(EncodeError::InvalidInput {
2408                message: format!(
2409                    "incomplete image: {} of {} rows pushed",
2410                    self.rows_pushed, self.height
2411                ),
2412            });
2413        }
2414
2415        let cfg = &self.cfg;
2416        let w = self.width as usize;
2417        let h = self.height as usize;
2418        let linear_rgb = self.linear_rgb;
2419        let alpha = self.alpha;
2420
2421        let (codestream, mut stats) = run_with_threads(cfg.threads, || {
2422            let mut profile = crate::effort::EffortProfile::lossy(cfg.effort, cfg.mode);
2423            if let Some(max_size) = cfg.max_strategy_size {
2424                if max_size < 16 {
2425                    profile.try_dct16 = false;
2426                }
2427                if max_size < 32 {
2428                    profile.try_dct32 = false;
2429                }
2430                if max_size < 64 {
2431                    profile.try_dct64 = false;
2432                }
2433            }
2434
2435            let mut enc = crate::vardct::VarDctEncoder::new(cfg.distance);
2436            enc.effort = cfg.effort;
2437            enc.profile = profile;
2438            enc.use_ans = cfg.use_ans;
2439            enc.optimize_codes = enc.profile.optimize_codes;
2440            enc.custom_orders = enc.profile.custom_orders;
2441            enc.ac_strategy_enabled = enc.profile.ac_strategy_enabled;
2442            enc.enable_noise = cfg.noise;
2443            enc.enable_denoise = cfg.denoise;
2444            enc.enable_gaborish = cfg.gaborish && cfg.distance > 0.5;
2445            enc.error_diffusion = cfg.error_diffusion;
2446            enc.pixel_domain_loss = cfg.pixel_domain_loss;
2447            enc.enable_lz77 = cfg.lz77;
2448            enc.lz77_method = cfg.lz77_method;
2449            enc.force_strategy = cfg.force_strategy;
2450            enc.enable_patches = cfg.patches;
2451            enc.encoder_mode = cfg.mode;
2452            enc.splines = cfg.splines.clone();
2453            enc.is_grayscale = self.layout.is_grayscale();
2454            enc.progressive = cfg.progressive;
2455            enc.use_lf_frame = cfg.lf_frame;
2456            #[cfg(feature = "butteraugli-loop")]
2457            {
2458                enc.butteraugli_iters = cfg.butteraugli_iters;
2459            }
2460            enc.bit_depth_16 = self.bit_depth_16;
2461            enc.source_gamma = self.source_gamma;
2462            enc.color_encoding = self.color_encoding.clone();
2463            enc.intensity_target = self.intensity_target;
2464            enc.min_nits = self.min_nits;
2465            enc.intrinsic_size = self.intrinsic_size;
2466            if let Some(ref icc) = self.icc_profile {
2467                enc.icc_profile = Some(icc.clone());
2468            }
2469
2470            let output = enc
2471                .encode(w, h, &linear_rgb, alpha.as_deref())
2472                .map_err(EncodeError::from)?;
2473
2474            #[cfg(feature = "butteraugli-loop")]
2475            let butteraugli_iters_actual = cfg.butteraugli_iters;
2476            #[cfg(not(feature = "butteraugli-loop"))]
2477            let butteraugli_iters_actual = 0u32;
2478
2479            let stats = EncodeStats {
2480                mode: EncodeMode::Lossy,
2481                strategy_counts: output.strategy_counts,
2482                gaborish: cfg.gaborish,
2483                ans: cfg.use_ans,
2484                butteraugli_iters: butteraugli_iters_actual,
2485                pixel_domain_loss: cfg.pixel_domain_loss,
2486                ..Default::default()
2487            };
2488            Ok::<_, EncodeError>((output.data, stats))
2489        })?;
2490
2491        stats.codestream_size = codestream.len();
2492
2493        let output = if self.exif.is_some() || self.xmp.is_some() {
2494            crate::container::wrap_in_container(
2495                &codestream,
2496                self.exif.as_deref(),
2497                self.xmp.as_deref(),
2498            )
2499        } else {
2500            codestream
2501        };
2502
2503        stats.output_size = output.len();
2504        Ok(EncodeResult {
2505            data: Some(output),
2506            stats,
2507        })
2508    }
2509}
2510
2511impl LossyConfig {
2512    /// Create a streaming encoder for incremental row input.
2513    ///
2514    /// Pixels are converted to the internal format as rows are pushed via
2515    /// [`LossyEncoder::push_rows`], allowing callers to free source buffers
2516    /// incrementally rather than materializing the entire image.
2517    #[track_caller]
2518    pub fn encoder(&self, width: u32, height: u32, layout: PixelLayout) -> Result<LossyEncoder> {
2519        if width == 0 || height == 0 {
2520            return Err(at(EncodeError::InvalidInput {
2521                message: format!("zero dimensions: {width}x{height}"),
2522            }));
2523        }
2524        let w = width as usize;
2525        let h = height as usize;
2526        let rgb_capacity = w.checked_mul(h).and_then(|n| n.checked_mul(3));
2527        let Some(rgb_capacity) = rgb_capacity else {
2528            return Err(at(EncodeError::InvalidInput {
2529                message: "image dimensions overflow".into(),
2530            }));
2531        };
2532
2533        let bit_depth_16 = layout.is_16bit();
2534        let has_alpha = layout.has_alpha();
2535        let alpha = if has_alpha {
2536            let mut v = Vec::new();
2537            v.try_reserve(w * h)
2538                .map_err(|e| at(EncodeError::from(crate::error::Error::from(e))))?;
2539            Some(v)
2540        } else {
2541            None
2542        };
2543
2544        let mut linear_rgb = Vec::new();
2545        linear_rgb
2546            .try_reserve(rgb_capacity)
2547            .map_err(|e| at(EncodeError::from(crate::error::Error::from(e))))?;
2548
2549        Ok(LossyEncoder {
2550            cfg: self.clone(),
2551            width,
2552            height,
2553            layout,
2554            rows_pushed: 0,
2555            linear_rgb,
2556            alpha,
2557            bit_depth_16,
2558            icc_profile: None,
2559            exif: None,
2560            xmp: None,
2561            source_gamma: None,
2562            color_encoding: None,
2563            intensity_target: 255.0,
2564            min_nits: 0.0,
2565            intrinsic_size: None,
2566        })
2567    }
2568}
2569
2570/// Streaming lossless (modular) encoder.
2571///
2572/// Accepts pixel rows incrementally via [`push_rows`](Self::push_rows), then
2573/// encodes on [`finish`](Self::finish). This allows callers to free source pixel
2574/// buffers as rows are pushed, rather than materializing the entire image in
2575/// memory before encoding.
2576///
2577/// ```rust,no_run
2578/// use jxl_encoder::{LosslessConfig, PixelLayout};
2579///
2580/// let mut enc = LosslessConfig::new()
2581///     .encoder(800, 600, PixelLayout::Rgb8)?;
2582///
2583/// # let row_bytes = 800 * 3;
2584/// # let source_rows = vec![0u8; row_bytes * 600];
2585/// for chunk in source_rows.chunks(row_bytes * 100) {
2586///     enc.push_rows(chunk, 100)?;
2587/// }
2588///
2589/// let jxl_bytes = enc.finish()?;
2590/// # Ok::<_, jxl_encoder::At<jxl_encoder::EncodeError>>(())
2591/// ```
2592pub struct LosslessEncoder {
2593    cfg: LosslessConfig,
2594    width: u32,
2595    height: u32,
2596    layout: PixelLayout,
2597    rows_pushed: u32,
2598    channels: Vec<crate::modular::channel::Channel>,
2599    num_source_channels: usize,
2600    bit_depth: u32,
2601    is_grayscale: bool,
2602    has_alpha: bool,
2603    icc_profile: Option<Vec<u8>>,
2604    exif: Option<Vec<u8>>,
2605    xmp: Option<Vec<u8>>,
2606    source_gamma: Option<f32>,
2607    color_encoding: Option<crate::headers::color_encoding::ColorEncoding>,
2608    intensity_target: f32,
2609    min_nits: f32,
2610    intrinsic_size: Option<(u32, u32)>,
2611}
2612
2613impl LosslessEncoder {
2614    /// Attach an ICC color profile.
2615    pub fn with_icc_profile(mut self, data: &[u8]) -> Self {
2616        self.icc_profile = Some(data.to_vec());
2617        self
2618    }
2619
2620    /// Attach EXIF data.
2621    pub fn with_exif(mut self, data: &[u8]) -> Self {
2622        self.exif = Some(data.to_vec());
2623        self
2624    }
2625
2626    /// Attach XMP data.
2627    pub fn with_xmp(mut self, data: &[u8]) -> Self {
2628        self.xmp = Some(data.to_vec());
2629        self
2630    }
2631
2632    /// Specify that source pixels use a custom gamma transfer function.
2633    pub fn with_source_gamma(mut self, gamma: f32) -> Self {
2634        self.source_gamma = Some(gamma);
2635        self
2636    }
2637
2638    /// Override the color encoding written to the JXL header.
2639    pub fn with_color_encoding(
2640        mut self,
2641        ce: crate::headers::color_encoding::ColorEncoding,
2642    ) -> Self {
2643        self.color_encoding = Some(ce);
2644        self
2645    }
2646
2647    /// Set the peak display luminance in nits for HDR content.
2648    pub fn with_intensity_target(mut self, nits: f32) -> Self {
2649        self.intensity_target = nits;
2650        self
2651    }
2652
2653    /// Set the minimum display luminance in nits.
2654    pub fn with_min_nits(mut self, nits: f32) -> Self {
2655        self.min_nits = nits;
2656        self
2657    }
2658
2659    /// Set the intrinsic display size.
2660    pub fn with_intrinsic_size(mut self, width: u32, height: u32) -> Self {
2661        self.intrinsic_size = Some((width, height));
2662        self
2663    }
2664
2665    /// Number of rows pushed so far.
2666    pub fn rows_pushed(&self) -> u32 {
2667        self.rows_pushed
2668    }
2669
2670    /// Total expected height.
2671    pub fn height(&self) -> u32 {
2672        self.height
2673    }
2674
2675    /// Push pixel rows into the encoder.
2676    ///
2677    /// `pixels` must contain exactly `width * num_rows * bytes_per_pixel` bytes.
2678    /// Rows are deinterleaved into per-channel planes immediately, so the caller
2679    /// can free the source buffer after this call returns.
2680    #[track_caller]
2681    pub fn push_rows(&mut self, pixels: &[u8], num_rows: u32) -> Result<()> {
2682        self.push_rows_inner(pixels, num_rows).map_err(at)
2683    }
2684
2685    fn push_rows_inner(
2686        &mut self,
2687        pixels: &[u8],
2688        num_rows: u32,
2689    ) -> core::result::Result<(), EncodeError> {
2690        if num_rows == 0 {
2691            return Ok(());
2692        }
2693        let remaining = self.height - self.rows_pushed;
2694        if num_rows > remaining {
2695            return Err(EncodeError::InvalidInput {
2696                message: format!(
2697                    "push_rows: {num_rows} rows would exceed image height \
2698                     ({} pushed + {num_rows} > {})",
2699                    self.rows_pushed, self.height
2700                ),
2701            });
2702        }
2703        let w = self.width as usize;
2704        let n = num_rows as usize;
2705        let bpp = self.layout.bytes_per_pixel();
2706        let expected = w.checked_mul(n).and_then(|wn| wn.checked_mul(bpp));
2707        match expected {
2708            Some(expected) if pixels.len() == expected => {}
2709            Some(expected) => {
2710                return Err(EncodeError::InvalidInput {
2711                    message: format!(
2712                        "push_rows: expected {expected} bytes for {w}x{n} {:?}, got {}",
2713                        self.layout,
2714                        pixels.len()
2715                    ),
2716                });
2717            }
2718            None => {
2719                return Err(EncodeError::InvalidInput {
2720                    message: "push_rows: row dimensions overflow".into(),
2721                });
2722            }
2723        }
2724
2725        let y_start = self.rows_pushed as usize;
2726        let nc = self.num_source_channels;
2727
2728        match self.layout {
2729            PixelLayout::Rgb8 | PixelLayout::Bgr8 => {
2730                let is_bgr = matches!(self.layout, PixelLayout::Bgr8);
2731                for y in 0..n {
2732                    let row_offset = y * w * 3;
2733                    let dst_y = y_start + y;
2734                    for x in 0..w {
2735                        let src = row_offset + x * 3;
2736                        let (r, g, b) = if is_bgr {
2737                            (pixels[src + 2], pixels[src + 1], pixels[src])
2738                        } else {
2739                            (pixels[src], pixels[src + 1], pixels[src + 2])
2740                        };
2741                        self.channels[0].set(x, dst_y, r as i32);
2742                        self.channels[1].set(x, dst_y, g as i32);
2743                        self.channels[2].set(x, dst_y, b as i32);
2744                    }
2745                }
2746            }
2747            PixelLayout::Rgba8 | PixelLayout::Bgra8 => {
2748                let is_bgr = matches!(self.layout, PixelLayout::Bgra8);
2749                for y in 0..n {
2750                    let row_offset = y * w * 4;
2751                    let dst_y = y_start + y;
2752                    for x in 0..w {
2753                        let src = row_offset + x * 4;
2754                        let (r, g, b) = if is_bgr {
2755                            (pixels[src + 2], pixels[src + 1], pixels[src])
2756                        } else {
2757                            (pixels[src], pixels[src + 1], pixels[src + 2])
2758                        };
2759                        self.channels[0].set(x, dst_y, r as i32);
2760                        self.channels[1].set(x, dst_y, g as i32);
2761                        self.channels[2].set(x, dst_y, b as i32);
2762                        self.channels[3].set(x, dst_y, pixels[src + 3] as i32);
2763                    }
2764                }
2765            }
2766            PixelLayout::Gray8 => {
2767                for y in 0..n {
2768                    let row_offset = y * w;
2769                    let dst_y = y_start + y;
2770                    for x in 0..w {
2771                        self.channels[0].set(x, dst_y, pixels[row_offset + x] as i32);
2772                    }
2773                }
2774            }
2775            PixelLayout::GrayAlpha8 => {
2776                for y in 0..n {
2777                    let row_offset = y * w * 2;
2778                    let dst_y = y_start + y;
2779                    for x in 0..w {
2780                        let src = row_offset + x * 2;
2781                        self.channels[0].set(x, dst_y, pixels[src] as i32);
2782                        self.channels[1].set(x, dst_y, pixels[src + 1] as i32);
2783                    }
2784                }
2785            }
2786            PixelLayout::Rgb16
2787            | PixelLayout::Rgba16
2788            | PixelLayout::Gray16
2789            | PixelLayout::GrayAlpha16 => {
2790                let pixels_u16: &[u16] = bytemuck::cast_slice(pixels);
2791                for y in 0..n {
2792                    let row_offset = y * w * nc;
2793                    let dst_y = y_start + y;
2794                    for x in 0..w {
2795                        let src = row_offset + x * nc;
2796                        for c in 0..nc {
2797                            self.channels[c].set(x, dst_y, pixels_u16[src + c] as i32);
2798                        }
2799                    }
2800                }
2801            }
2802            _ => {
2803                return Err(EncodeError::UnsupportedPixelLayout(self.layout));
2804            }
2805        }
2806
2807        self.rows_pushed += num_rows;
2808        Ok(())
2809    }
2810
2811    /// Encode the accumulated pixels and return the JXL bytes.
2812    ///
2813    /// All rows must have been pushed via [`push_rows`](Self::push_rows) before
2814    /// calling this. Returns an error if the image is incomplete.
2815    #[track_caller]
2816    pub fn finish(self) -> Result<Vec<u8>> {
2817        self.finish_inner()
2818            .map(|mut r| r.take_data().unwrap())
2819            .map_err(at)
2820    }
2821
2822    /// Encode and return JXL bytes together with [`EncodeStats`].
2823    #[track_caller]
2824    pub fn finish_with_stats(self) -> Result<EncodeResult> {
2825        self.finish_inner().map_err(at)
2826    }
2827
2828    /// Encode, appending to an existing buffer.
2829    #[track_caller]
2830    pub fn finish_into(self, out: &mut Vec<u8>) -> Result<EncodeResult> {
2831        let mut result = self.finish_inner().map_err(at)?;
2832        if let Some(data) = result.data.take() {
2833            out.extend_from_slice(&data);
2834        }
2835        Ok(result)
2836    }
2837
2838    /// Encode, writing to a `std::io::Write` destination.
2839    #[cfg(feature = "std")]
2840    #[track_caller]
2841    pub fn finish_to(self, mut dest: impl std::io::Write) -> Result<EncodeResult> {
2842        let mut result = self.finish_inner().map_err(at)?;
2843        if let Some(data) = result.data.take() {
2844            dest.write_all(&data)
2845                .map_err(|e| at(EncodeError::from(e)))?;
2846        }
2847        Ok(result)
2848    }
2849
2850    fn finish_inner(self) -> core::result::Result<EncodeResult, EncodeError> {
2851        use crate::bit_writer::BitWriter;
2852        use crate::headers::color_encoding::ColorSpace;
2853        use crate::headers::{ColorEncoding, FileHeader};
2854        use crate::modular::channel::ModularImage;
2855        use crate::modular::frame::{FrameEncoder, FrameEncoderOptions};
2856
2857        if self.rows_pushed != self.height {
2858            return Err(EncodeError::InvalidInput {
2859                message: format!(
2860                    "incomplete image: {} of {} rows pushed",
2861                    self.rows_pushed, self.height
2862                ),
2863            });
2864        }
2865
2866        let cfg = &self.cfg;
2867        let w = self.width as usize;
2868        let h = self.height as usize;
2869
2870        let mut image = ModularImage {
2871            channels: self.channels,
2872            bit_depth: self.bit_depth,
2873            is_grayscale: self.is_grayscale,
2874            has_alpha: self.has_alpha,
2875        };
2876
2877        let (codestream, mut stats) = run_with_threads(cfg.threads, || {
2878            // Reconstruct interleaved pixels for patch detection (8-bit RGB only)
2879            let num_channels = self.layout.bytes_per_pixel();
2880            let can_use_patches =
2881                cfg.patches && !image.is_grayscale && image.bit_depth <= 8 && num_channels >= 3;
2882            let patches_data = if can_use_patches {
2883                let mut detection_pixels = vec![0u8; w * h * num_channels];
2884                let nc = core::cmp::min(num_channels, image.channels.len());
2885                for y in 0..h {
2886                    for x in 0..w {
2887                        for c in 0..nc {
2888                            detection_pixels[(y * w + x) * num_channels + c] =
2889                                image.channels[c].get(x, y) as u8;
2890                        }
2891                        // Fill remaining channels (alpha) from the image
2892                        for c in nc..num_channels {
2893                            if c < image.channels.len() {
2894                                detection_pixels[(y * w + x) * num_channels + c] =
2895                                    image.channels[c].get(x, y) as u8;
2896                            }
2897                        }
2898                    }
2899                }
2900                crate::vardct::patches::find_and_build_lossless(
2901                    &detection_pixels,
2902                    w,
2903                    h,
2904                    num_channels,
2905                    image.bit_depth,
2906                )
2907            } else {
2908                None
2909            };
2910
2911            // Build file header
2912            let mut file_header = if image.is_grayscale {
2913                FileHeader::new_gray(self.width, self.height)
2914            } else if image.has_alpha {
2915                FileHeader::new_rgba(self.width, self.height)
2916            } else {
2917                FileHeader::new_rgb(self.width, self.height)
2918            };
2919            if image.bit_depth == 16 {
2920                file_header.metadata.bit_depth = crate::headers::file_header::BitDepth::uint16();
2921                for ec in &mut file_header.metadata.extra_channels {
2922                    ec.bit_depth = crate::headers::file_header::BitDepth::uint16();
2923                }
2924            }
2925            if self.icc_profile.is_some() {
2926                file_header.metadata.color_encoding.want_icc = true;
2927            }
2928            file_header.metadata.intensity_target = self.intensity_target;
2929            file_header.metadata.min_nits = self.min_nits;
2930            if let Some((w, h)) = self.intrinsic_size {
2931                file_header.metadata.have_intrinsic_size = true;
2932                file_header.metadata.intrinsic_width = w;
2933                file_header.metadata.intrinsic_height = h;
2934            }
2935
2936            let mut writer = BitWriter::new();
2937            file_header.write(&mut writer).map_err(EncodeError::from)?;
2938            if let Some(ref icc) = self.icc_profile {
2939                crate::icc::write_icc(icc, &mut writer).map_err(EncodeError::from)?;
2940            }
2941            writer.zero_pad_to_byte();
2942
2943            // Write reference frame and subtract patches
2944            if let Some(ref pd) = patches_data {
2945                let lossless_profile = crate::effort::EffortProfile::lossless(cfg.effort, cfg.mode);
2946                crate::vardct::patches::encode_reference_frame_rgb(
2947                    pd,
2948                    image.bit_depth,
2949                    cfg.use_ans,
2950                    lossless_profile.patch_ref_tree_learning,
2951                    &mut writer,
2952                )
2953                .map_err(EncodeError::from)?;
2954                writer.zero_pad_to_byte();
2955                let bd = image.bit_depth;
2956                crate::vardct::patches::subtract_patches_modular(&mut image, pd, bd);
2957            }
2958
2959            // Encode frame
2960            let frame_encoder = FrameEncoder::new(
2961                w,
2962                h,
2963                FrameEncoderOptions {
2964                    use_modular: true,
2965                    effort: cfg.effort,
2966                    use_ans: cfg.use_ans,
2967                    use_tree_learning: cfg.tree_learning,
2968                    use_squeeze: cfg.squeeze,
2969                    enable_lz77: cfg.lz77,
2970                    lz77_method: cfg.lz77_method,
2971                    lossy_palette: cfg.lossy_palette,
2972                    encoder_mode: cfg.mode,
2973                    profile: crate::effort::EffortProfile::lossless(cfg.effort, cfg.mode),
2974                    have_animation: false,
2975                    duration: 0,
2976                    is_last: true,
2977                    crop: None,
2978                    skip_rct: false,
2979                },
2980            );
2981            let color_encoding = if let Some(ce) = self.color_encoding.clone() {
2982                if image.is_grayscale && ce.color_space != ColorSpace::Gray {
2983                    ColorEncoding {
2984                        color_space: ColorSpace::Gray,
2985                        ..ce
2986                    }
2987                } else {
2988                    ce
2989                }
2990            } else if let Some(gamma) = self.source_gamma {
2991                if image.is_grayscale {
2992                    ColorEncoding::gray_with_gamma(gamma)
2993                } else {
2994                    ColorEncoding::with_gamma(gamma)
2995                }
2996            } else if image.is_grayscale {
2997                ColorEncoding::gray()
2998            } else {
2999                ColorEncoding::srgb()
3000            };
3001            frame_encoder
3002                .encode_modular_with_patches(
3003                    &image,
3004                    &color_encoding,
3005                    &mut writer,
3006                    patches_data.as_ref(),
3007                )
3008                .map_err(EncodeError::from)?;
3009
3010            let stats = EncodeStats {
3011                mode: EncodeMode::Lossless,
3012                ans: cfg.use_ans,
3013                ..Default::default()
3014            };
3015            Ok::<_, EncodeError>((writer.finish_with_padding(), stats))
3016        })?;
3017
3018        stats.codestream_size = codestream.len();
3019
3020        let output = if self.exif.is_some() || self.xmp.is_some() {
3021            crate::container::wrap_in_container(
3022                &codestream,
3023                self.exif.as_deref(),
3024                self.xmp.as_deref(),
3025            )
3026        } else {
3027            codestream
3028        };
3029
3030        stats.output_size = output.len();
3031        Ok(EncodeResult {
3032            data: Some(output),
3033            stats,
3034        })
3035    }
3036}
3037
3038impl LosslessConfig {
3039    /// Create a streaming encoder for incremental row input.
3040    ///
3041    /// Per-channel planes are pre-allocated and filled as rows are pushed via
3042    /// [`LosslessEncoder::push_rows`], allowing callers to free source buffers
3043    /// incrementally rather than materializing the entire image.
3044    #[track_caller]
3045    pub fn encoder(&self, width: u32, height: u32, layout: PixelLayout) -> Result<LosslessEncoder> {
3046        use crate::modular::channel::Channel;
3047
3048        if width == 0 || height == 0 {
3049            return Err(at(EncodeError::InvalidInput {
3050                message: format!("zero dimensions: {width}x{height}"),
3051            }));
3052        }
3053
3054        let w = width as usize;
3055        let h = height as usize;
3056
3057        let (num_channels, bit_depth, is_grayscale, has_alpha) = match layout {
3058            PixelLayout::Rgb8 | PixelLayout::Bgr8 => (3, 8u32, false, false),
3059            PixelLayout::Rgba8 | PixelLayout::Bgra8 => (4, 8, false, true),
3060            PixelLayout::Gray8 => (1, 8, true, false),
3061            PixelLayout::GrayAlpha8 => (2, 8, true, true),
3062            PixelLayout::Rgb16 => (3, 16, false, false),
3063            PixelLayout::Rgba16 => (4, 16, false, true),
3064            PixelLayout::Gray16 => (1, 16, true, false),
3065            PixelLayout::GrayAlpha16 => (2, 16, true, true),
3066            other => return Err(at(EncodeError::UnsupportedPixelLayout(other))),
3067        };
3068
3069        let mut channels = Vec::with_capacity(num_channels);
3070        for _ in 0..num_channels {
3071            channels.push(Channel::new(w, h).map_err(|e| at(EncodeError::from(e)))?);
3072        }
3073
3074        Ok(LosslessEncoder {
3075            cfg: self.clone(),
3076            width,
3077            height,
3078            layout,
3079            rows_pushed: 0,
3080            channels,
3081            num_source_channels: num_channels,
3082            bit_depth,
3083            is_grayscale,
3084            has_alpha,
3085            icc_profile: None,
3086            exif: None,
3087            xmp: None,
3088            source_gamma: None,
3089            color_encoding: None,
3090            intensity_target: 255.0,
3091            min_nits: 0.0,
3092            intrinsic_size: None,
3093        })
3094    }
3095}
3096
3097// ── Thread pool helper ──────────────────────────────────────────────────────
3098
3099/// Run a closure inside a rayon thread pool when the `parallel` feature
3100/// is enabled and `threads > 1`. Otherwise, just call the closure directly.
3101///
3102/// - `threads == 0`: use the ambient rayon pool (caller controls via
3103///   `pool.install()` or the global default).
3104/// - `threads == 1`: sequential — call `f()` on the current thread.
3105/// - `threads >= 2`: create a dedicated pool with that many threads.
3106#[cfg(feature = "parallel")]
3107fn run_with_threads<T>(threads: usize, f: impl FnOnce() -> T + Send) -> T
3108where
3109    T: Send,
3110{
3111    if threads <= 1 {
3112        return f();
3113    }
3114    match rayon::ThreadPoolBuilder::new().num_threads(threads).build() {
3115        Ok(pool) => pool.install(f),
3116        Err(_) => f(),
3117    }
3118}
3119
3120#[cfg(not(feature = "parallel"))]
3121fn run_with_threads<T>(_threads: usize, f: impl FnOnce() -> T) -> T {
3122    f()
3123}
3124
3125// ── Animation encode implementations ────────────────────────────────────────
3126
3127fn validate_animation_input(
3128    width: u32,
3129    height: u32,
3130    layout: PixelLayout,
3131    frames: &[AnimationFrame<'_>],
3132) -> core::result::Result<(), EncodeError> {
3133    if width == 0 || height == 0 {
3134        return Err(EncodeError::InvalidInput {
3135            message: format!("zero dimensions: {width}x{height}"),
3136        });
3137    }
3138    if frames.is_empty() {
3139        return Err(EncodeError::InvalidInput {
3140            message: "animation requires at least one frame".into(),
3141        });
3142    }
3143    let expected_size = (width as usize)
3144        .checked_mul(height as usize)
3145        .and_then(|n| n.checked_mul(layout.bytes_per_pixel()))
3146        .ok_or_else(|| EncodeError::InvalidInput {
3147            message: "image dimensions overflow".into(),
3148        })?;
3149    for (i, frame) in frames.iter().enumerate() {
3150        if frame.pixels.len() != expected_size {
3151            return Err(EncodeError::InvalidInput {
3152                message: format!(
3153                    "frame {} pixel buffer size mismatch: expected {expected_size}, got {}",
3154                    i,
3155                    frame.pixels.len()
3156                ),
3157            });
3158        }
3159    }
3160    Ok(())
3161}
3162
3163fn encode_animation_lossless(
3164    cfg: &LosslessConfig,
3165    width: u32,
3166    height: u32,
3167    layout: PixelLayout,
3168    animation: &AnimationParams,
3169    frames: &[AnimationFrame<'_>],
3170) -> core::result::Result<Vec<u8>, EncodeError> {
3171    use crate::bit_writer::BitWriter;
3172    use crate::headers::file_header::AnimationHeader;
3173    use crate::headers::{ColorEncoding, FileHeader};
3174    use crate::modular::channel::ModularImage;
3175    use crate::modular::frame::{FrameEncoder, FrameEncoderOptions};
3176
3177    validate_animation_input(width, height, layout, frames)?;
3178
3179    let w = width as usize;
3180    let h = height as usize;
3181    let num_frames = frames.len();
3182
3183    // Build file header with animation
3184    let sample_image = match layout {
3185        PixelLayout::Rgb8 => ModularImage::from_rgb8(frames[0].pixels, w, h),
3186        PixelLayout::Rgba8 => ModularImage::from_rgba8(frames[0].pixels, w, h),
3187        PixelLayout::Bgr8 => ModularImage::from_rgb8(&bgr_to_rgb(frames[0].pixels, 3), w, h),
3188        PixelLayout::Bgra8 => ModularImage::from_rgba8(&bgr_to_rgb(frames[0].pixels, 4), w, h),
3189        PixelLayout::Gray8 => ModularImage::from_gray8(frames[0].pixels, w, h),
3190        PixelLayout::GrayAlpha8 => ModularImage::from_grayalpha8(frames[0].pixels, w, h),
3191        PixelLayout::Rgb16 => ModularImage::from_rgb16_native(frames[0].pixels, w, h),
3192        PixelLayout::Rgba16 => ModularImage::from_rgba16_native(frames[0].pixels, w, h),
3193        PixelLayout::Gray16 => ModularImage::from_gray16_native(frames[0].pixels, w, h),
3194        PixelLayout::GrayAlpha16 => ModularImage::from_grayalpha16_native(frames[0].pixels, w, h),
3195        other => return Err(EncodeError::UnsupportedPixelLayout(other)),
3196    }
3197    .map_err(EncodeError::from)?;
3198
3199    let mut file_header = if sample_image.is_grayscale {
3200        FileHeader::new_gray(width, height)
3201    } else if sample_image.has_alpha {
3202        FileHeader::new_rgba(width, height)
3203    } else {
3204        FileHeader::new_rgb(width, height)
3205    };
3206    if sample_image.bit_depth == 16 {
3207        file_header.metadata.bit_depth = crate::headers::file_header::BitDepth::uint16();
3208        for ec in &mut file_header.metadata.extra_channels {
3209            ec.bit_depth = crate::headers::file_header::BitDepth::uint16();
3210        }
3211    }
3212    file_header.metadata.animation = Some(AnimationHeader {
3213        tps_numerator: animation.tps_numerator,
3214        tps_denominator: animation.tps_denominator,
3215        num_loops: animation.num_loops,
3216        have_timecodes: false,
3217    });
3218
3219    // Write file header
3220    let mut writer = BitWriter::new();
3221    file_header.write(&mut writer).map_err(EncodeError::from)?;
3222    writer.zero_pad_to_byte();
3223
3224    // Encode each frame with crop detection
3225    let color_encoding = ColorEncoding::srgb();
3226    let bpp = layout.bytes_per_pixel();
3227    let mut prev_pixels: Option<&[u8]> = None;
3228
3229    for (i, frame) in frames.iter().enumerate() {
3230        // Detect crop: compare current frame against previous.
3231        // Only use crop when it's smaller than the full frame.
3232        let crop = if let Some(prev) = prev_pixels {
3233            match detect_frame_crop(prev, frame.pixels, w, h, bpp, false) {
3234                Some(crop) if (crop.width as usize) < w || (crop.height as usize) < h => Some(crop),
3235                Some(_) => None, // Crop covers full frame — no benefit
3236                None => {
3237                    // Frames are identical — emit a minimal 1x1 crop to preserve canvas
3238                    Some(FrameCrop {
3239                        x0: 0,
3240                        y0: 0,
3241                        width: 1,
3242                        height: 1,
3243                    })
3244                }
3245            }
3246        } else {
3247            None // Frame 0: always full frame
3248        };
3249
3250        // Build ModularImage from the appropriate pixel region
3251        let (frame_w, frame_h, frame_pixels_owned);
3252        let frame_pixels: &[u8] = if let Some(ref crop) = crop {
3253            frame_w = crop.width as usize;
3254            frame_h = crop.height as usize;
3255            frame_pixels_owned = extract_pixel_crop(frame.pixels, w, crop, bpp);
3256            &frame_pixels_owned
3257        } else {
3258            frame_w = w;
3259            frame_h = h;
3260            frame_pixels_owned = Vec::new();
3261            let _ = &frame_pixels_owned; // suppress unused warning
3262            frame.pixels
3263        };
3264
3265        let image = match layout {
3266            PixelLayout::Rgb8 => ModularImage::from_rgb8(frame_pixels, frame_w, frame_h),
3267            PixelLayout::Rgba8 => ModularImage::from_rgba8(frame_pixels, frame_w, frame_h),
3268            PixelLayout::Bgr8 => {
3269                ModularImage::from_rgb8(&bgr_to_rgb(frame_pixels, 3), frame_w, frame_h)
3270            }
3271            PixelLayout::Bgra8 => {
3272                ModularImage::from_rgba8(&bgr_to_rgb(frame_pixels, 4), frame_w, frame_h)
3273            }
3274            PixelLayout::Gray8 => ModularImage::from_gray8(frame_pixels, frame_w, frame_h),
3275            PixelLayout::GrayAlpha8 => {
3276                ModularImage::from_grayalpha8(frame_pixels, frame_w, frame_h)
3277            }
3278            PixelLayout::Rgb16 => ModularImage::from_rgb16_native(frame_pixels, frame_w, frame_h),
3279            PixelLayout::Rgba16 => ModularImage::from_rgba16_native(frame_pixels, frame_w, frame_h),
3280            PixelLayout::Gray16 => ModularImage::from_gray16_native(frame_pixels, frame_w, frame_h),
3281            PixelLayout::GrayAlpha16 => {
3282                ModularImage::from_grayalpha16_native(frame_pixels, frame_w, frame_h)
3283            }
3284            other => return Err(EncodeError::UnsupportedPixelLayout(other)),
3285        }
3286        .map_err(EncodeError::from)?;
3287
3288        let use_tree_learning = cfg.tree_learning;
3289        let frame_encoder = FrameEncoder::new(
3290            frame_w,
3291            frame_h,
3292            FrameEncoderOptions {
3293                use_modular: true,
3294                effort: cfg.effort,
3295                use_ans: cfg.use_ans,
3296                use_tree_learning,
3297                use_squeeze: cfg.squeeze,
3298                enable_lz77: cfg.lz77,
3299                lz77_method: cfg.lz77_method,
3300                lossy_palette: cfg.lossy_palette,
3301                encoder_mode: cfg.mode,
3302                profile: crate::effort::EffortProfile::lossless(cfg.effort, cfg.mode),
3303                have_animation: true,
3304                duration: frame.duration,
3305                is_last: i == num_frames - 1,
3306                crop,
3307                skip_rct: false,
3308            },
3309        );
3310        frame_encoder
3311            .encode_modular(&image, &color_encoding, &mut writer)
3312            .map_err(EncodeError::from)?;
3313
3314        prev_pixels = Some(frame.pixels);
3315    }
3316
3317    Ok(writer.finish_with_padding())
3318}
3319
3320fn encode_animation_lossy(
3321    cfg: &LossyConfig,
3322    width: u32,
3323    height: u32,
3324    layout: PixelLayout,
3325    animation: &AnimationParams,
3326    frames: &[AnimationFrame<'_>],
3327) -> core::result::Result<Vec<u8>, EncodeError> {
3328    use crate::bit_writer::BitWriter;
3329    use crate::headers::file_header::AnimationHeader;
3330    use crate::headers::frame_header::FrameOptions;
3331
3332    validate_animation_input(width, height, layout, frames)?;
3333
3334    let w = width as usize;
3335    let h = height as usize;
3336    let num_frames = frames.len();
3337
3338    // Set up VarDCT encoder
3339    let mut profile = crate::effort::EffortProfile::lossy(cfg.effort, cfg.mode);
3340
3341    // Apply max_strategy_size to profile flags
3342    if let Some(max_size) = cfg.max_strategy_size {
3343        if max_size < 16 {
3344            profile.try_dct16 = false;
3345        }
3346        if max_size < 32 {
3347            profile.try_dct32 = false;
3348        }
3349        if max_size < 64 {
3350            profile.try_dct64 = false;
3351        }
3352    }
3353
3354    let mut enc = crate::vardct::VarDctEncoder::new(cfg.distance);
3355    enc.effort = cfg.effort;
3356    enc.profile = profile;
3357    enc.use_ans = cfg.use_ans;
3358    enc.optimize_codes = enc.profile.optimize_codes;
3359    enc.custom_orders = enc.profile.custom_orders;
3360    enc.ac_strategy_enabled = enc.profile.ac_strategy_enabled;
3361    enc.enable_noise = cfg.noise;
3362    enc.enable_denoise = cfg.denoise;
3363    // libjxl gates gaborish at distance > 0.5 (enc_frame.cc:281)
3364    enc.enable_gaborish = cfg.gaborish && cfg.distance > 0.5;
3365    enc.error_diffusion = cfg.error_diffusion;
3366    enc.pixel_domain_loss = cfg.pixel_domain_loss;
3367    enc.enable_lz77 = cfg.lz77;
3368    enc.lz77_method = cfg.lz77_method;
3369    enc.force_strategy = cfg.force_strategy;
3370    enc.progressive = cfg.progressive;
3371    enc.use_lf_frame = cfg.lf_frame;
3372    #[cfg(feature = "butteraugli-loop")]
3373    {
3374        enc.butteraugli_iters = cfg.butteraugli_iters;
3375    }
3376    #[cfg(feature = "ssim2-loop")]
3377    {
3378        enc.ssim2_iters = cfg.ssim2_iters;
3379    }
3380    #[cfg(feature = "zensim-loop")]
3381    {
3382        enc.zensim_iters = cfg.zensim_iters;
3383    }
3384
3385    // Detect alpha and 16-bit from layout
3386    let has_alpha = layout.has_alpha();
3387    let bit_depth_16 = matches!(layout, PixelLayout::Rgb16 | PixelLayout::Rgba16);
3388    enc.bit_depth_16 = bit_depth_16;
3389
3390    // Build file header from VarDCT encoder (sets xyb_encoded, rendering_intent, etc.)
3391    // then add animation metadata
3392    let mut file_header = enc.build_file_header(w, h, has_alpha);
3393    file_header.metadata.animation = Some(AnimationHeader {
3394        tps_numerator: animation.tps_numerator,
3395        tps_denominator: animation.tps_denominator,
3396        num_loops: animation.num_loops,
3397        have_timecodes: false,
3398    });
3399
3400    let mut writer = BitWriter::with_capacity(w * h * 4);
3401    file_header.write(&mut writer).map_err(EncodeError::from)?;
3402    if let Some(ref icc) = enc.icc_profile {
3403        crate::icc::write_icc(icc, &mut writer).map_err(EncodeError::from)?;
3404    }
3405    writer.zero_pad_to_byte();
3406
3407    // Encode each frame with crop detection
3408    let bpp = layout.bytes_per_pixel();
3409    let mut prev_pixels: Option<&[u8]> = None;
3410
3411    for (i, frame) in frames.iter().enumerate() {
3412        // Detect crop on raw input pixels (before linear conversion).
3413        // Only use crop when it's smaller than the full frame.
3414        let crop = if let Some(prev) = prev_pixels {
3415            match detect_frame_crop(prev, frame.pixels, w, h, bpp, true) {
3416                Some(crop) if (crop.width as usize) < w || (crop.height as usize) < h => Some(crop),
3417                Some(_) => None, // Crop covers full frame — no benefit
3418                None => {
3419                    // Frames identical — emit minimal 8x8 crop (VarDCT minimum)
3420                    Some(FrameCrop {
3421                        x0: 0,
3422                        y0: 0,
3423                        width: 8.min(width),
3424                        height: 8.min(height),
3425                    })
3426                }
3427            }
3428        } else {
3429            None // Frame 0: always full frame
3430        };
3431
3432        // Extract crop region from raw pixels, then convert to linear
3433        let (frame_w, frame_h) = if let Some(ref crop) = crop {
3434            (crop.width as usize, crop.height as usize)
3435        } else {
3436            (w, h)
3437        };
3438
3439        let crop_pixels_owned;
3440        let src_pixels: &[u8] = if let Some(ref crop) = crop {
3441            crop_pixels_owned = extract_pixel_crop(frame.pixels, w, crop, bpp);
3442            &crop_pixels_owned
3443        } else {
3444            crop_pixels_owned = Vec::new();
3445            let _ = &crop_pixels_owned;
3446            frame.pixels
3447        };
3448
3449        let (linear_rgb, alpha) = match layout {
3450            PixelLayout::Rgb8 => (srgb_u8_to_linear_f32(src_pixels, 3), None),
3451            PixelLayout::Bgr8 => (srgb_u8_to_linear_f32(&bgr_to_rgb(src_pixels, 3), 3), None),
3452            PixelLayout::Rgba8 => {
3453                let rgb = srgb_u8_to_linear_f32(src_pixels, 4);
3454                let alpha = extract_alpha(src_pixels, 4, 3);
3455                (rgb, Some(alpha))
3456            }
3457            PixelLayout::Bgra8 => {
3458                let swapped = bgr_to_rgb(src_pixels, 4);
3459                let rgb = srgb_u8_to_linear_f32(&swapped, 4);
3460                let alpha = extract_alpha(src_pixels, 4, 3);
3461                (rgb, Some(alpha))
3462            }
3463            PixelLayout::Gray8 => (gray_u8_to_linear_f32_rgb(src_pixels, 1), None),
3464            PixelLayout::GrayAlpha8 => {
3465                let rgb = gray_u8_to_linear_f32_rgb(src_pixels, 2);
3466                let alpha = extract_alpha(src_pixels, 2, 1);
3467                (rgb, Some(alpha))
3468            }
3469            PixelLayout::Rgb16 => (srgb_u16_to_linear_f32(src_pixels, 3), None),
3470            PixelLayout::Rgba16 => {
3471                let rgb = srgb_u16_to_linear_f32(src_pixels, 4);
3472                let alpha = extract_alpha_u16(src_pixels, 4, 3);
3473                (rgb, Some(alpha))
3474            }
3475            PixelLayout::Gray16 => (gray_u16_to_linear_f32_rgb(src_pixels, 1), None),
3476            PixelLayout::GrayAlpha16 => {
3477                let rgb = gray_u16_to_linear_f32_rgb(src_pixels, 2);
3478                let alpha = extract_alpha_u16(src_pixels, 2, 1);
3479                (rgb, Some(alpha))
3480            }
3481            PixelLayout::RgbLinearF32 => {
3482                let floats: &[f32] = bytemuck::cast_slice(src_pixels);
3483                (floats.to_vec(), None)
3484            }
3485            PixelLayout::RgbaLinearF32 => {
3486                let floats: &[f32] = bytemuck::cast_slice(src_pixels);
3487                let rgb: Vec<f32> = floats
3488                    .chunks(4)
3489                    .flat_map(|px| [px[0], px[1], px[2]])
3490                    .collect();
3491                let alpha = extract_alpha_f32(floats, 4, 3);
3492                (rgb, Some(alpha))
3493            }
3494            PixelLayout::GrayLinearF32 => {
3495                let floats: &[f32] = bytemuck::cast_slice(src_pixels);
3496                (gray_f32_to_linear_f32_rgb(floats, 1), None)
3497            }
3498            PixelLayout::GrayAlphaLinearF32 => {
3499                let floats: &[f32] = bytemuck::cast_slice(src_pixels);
3500                let rgb = gray_f32_to_linear_f32_rgb(floats, 2);
3501                let alpha = extract_alpha_f32(floats, 2, 1);
3502                (rgb, Some(alpha))
3503            }
3504        };
3505
3506        let frame_options = FrameOptions {
3507            have_animation: true,
3508            have_timecodes: false,
3509            duration: frame.duration,
3510            is_last: i == num_frames - 1,
3511            crop,
3512        };
3513
3514        enc.encode_frame_to_writer(
3515            frame_w,
3516            frame_h,
3517            &linear_rgb,
3518            alpha.as_deref(),
3519            &frame_options,
3520            &mut writer,
3521        )
3522        .map_err(EncodeError::from)?;
3523
3524        prev_pixels = Some(frame.pixels);
3525    }
3526
3527    Ok(writer.finish_with_padding())
3528}
3529
3530// ── Animation frame crop detection ──────────────────────────────────────────
3531
3532use crate::headers::frame_header::FrameCrop;
3533
3534/// Detects the minimal bounding rectangle that differs between two frames.
3535///
3536/// Compares `prev` and `curr` byte-by-byte. Returns `Some(FrameCrop)` with the
3537/// tight bounding box of changed pixels, or `None` if the frames are identical.
3538///
3539/// When `align_to_8x8` is true (for VarDCT), the crop is expanded outward to
3540/// 8x8 block boundaries for better compression.
3541fn detect_frame_crop(
3542    prev: &[u8],
3543    curr: &[u8],
3544    width: usize,
3545    height: usize,
3546    bytes_per_pixel: usize,
3547    align_to_8x8: bool,
3548) -> Option<FrameCrop> {
3549    let stride = width * bytes_per_pixel;
3550    debug_assert_eq!(prev.len(), height * stride);
3551    debug_assert_eq!(curr.len(), height * stride);
3552
3553    // Find top (first row with a difference)
3554    let mut top = height;
3555    let mut bottom = 0;
3556    let mut left = width;
3557    let mut right = 0;
3558
3559    for y in 0..height {
3560        let row_start = y * stride;
3561        let prev_row = &prev[row_start..row_start + stride];
3562        let curr_row = &curr[row_start..row_start + stride];
3563
3564        // Fast row comparison via u64 chunks — lets the compiler auto-vectorize
3565        let (prev_prefix, prev_u64, prev_suffix) = bytemuck::pod_align_to::<u8, u64>(prev_row);
3566        let (curr_prefix, curr_u64, curr_suffix) = bytemuck::pod_align_to::<u8, u64>(curr_row);
3567        if prev_prefix == curr_prefix && prev_u64 == curr_u64 && prev_suffix == curr_suffix {
3568            continue;
3569        }
3570
3571        // This row has differences — find leftmost and rightmost changed pixel
3572        if top == height {
3573            top = y;
3574        }
3575        bottom = y;
3576
3577        // Scan from left to find first differing pixel
3578        for x in 0..width {
3579            let px_start = x * bytes_per_pixel;
3580            if prev_row[px_start..px_start + bytes_per_pixel]
3581                != curr_row[px_start..px_start + bytes_per_pixel]
3582            {
3583                left = left.min(x);
3584                break;
3585            }
3586        }
3587        // Scan from right to find last differing pixel
3588        for x in (0..width).rev() {
3589            let px_start = x * bytes_per_pixel;
3590            if prev_row[px_start..px_start + bytes_per_pixel]
3591                != curr_row[px_start..px_start + bytes_per_pixel]
3592            {
3593                right = right.max(x);
3594                break;
3595            }
3596        }
3597    }
3598
3599    if top == height {
3600        // Frames are identical
3601        return None;
3602    }
3603
3604    // Convert to crop rectangle (inclusive → exclusive for width/height)
3605    let mut crop_x = left as i32;
3606    let mut crop_y = top as i32;
3607    let mut crop_w = (right - left + 1) as u32;
3608    let mut crop_h = (bottom - top + 1) as u32;
3609
3610    if align_to_8x8 {
3611        // Expand to 8x8 block boundaries
3612        let aligned_x = (crop_x / 8) * 8;
3613        let aligned_y = (crop_y / 8) * 8;
3614        let end_x = (crop_x as u32 + crop_w).div_ceil(8) * 8;
3615        let end_y = (crop_y as u32 + crop_h).div_ceil(8) * 8;
3616        crop_x = aligned_x;
3617        crop_y = aligned_y;
3618        crop_w = end_x.min(width as u32) - aligned_x as u32;
3619        crop_h = end_y.min(height as u32) - aligned_y as u32;
3620    }
3621
3622    Some(FrameCrop {
3623        x0: crop_x,
3624        y0: crop_y,
3625        width: crop_w,
3626        height: crop_h,
3627    })
3628}
3629
3630/// Extracts a rectangular crop region from a pixel buffer.
3631///
3632/// `bytes_per_pixel` is the number of bytes per pixel (e.g., 3 for RGB, 4 for RGBA).
3633fn extract_pixel_crop(
3634    pixels: &[u8],
3635    full_width: usize,
3636    crop: &FrameCrop,
3637    bytes_per_pixel: usize,
3638) -> Vec<u8> {
3639    let cx = crop.x0 as usize;
3640    let cy = crop.y0 as usize;
3641    let cw = crop.width as usize;
3642    let ch = crop.height as usize;
3643    let stride = full_width * bytes_per_pixel;
3644
3645    let mut out = Vec::with_capacity(cw * ch * bytes_per_pixel);
3646    for y in cy..cy + ch {
3647        let row_start = y * stride + cx * bytes_per_pixel;
3648        out.extend_from_slice(&pixels[row_start..row_start + cw * bytes_per_pixel]);
3649    }
3650    out
3651}
3652
3653// ── Pixel conversion helpers ────────────────────────────────────────────────
3654
3655/// Pre-computed sRGB u8 → linear f32 lookup table (256 entries).
3656/// Eliminates per-pixel `powf(2.4)` calls for the common 8-bit path.
3657const SRGB_U8_TO_LINEAR: [f32; 256] = {
3658    let mut table = [0.0f32; 256];
3659    let mut i = 0u16;
3660    while i < 256 {
3661        let c = i as f64 / 255.0;
3662        // Use f64 for accuracy during const eval, then truncate to f32.
3663        // powf is not const, so we use exp(2.4 * ln(x)) via a manual series.
3664        // For const context, we precompute using the piecewise sRGB TF.
3665        table[i as usize] = if c <= 0.04045 {
3666            (c / 12.92) as f32
3667        } else {
3668            // ((c + 0.055) / 1.055)^2.4
3669            // = exp(2.4 * ln((c + 0.055) / 1.055))
3670            // Approximate via repeated squaring: x^2.4 = x^2 * x^0.4
3671            // x^0.4 = (x^0.5)^0.8 = ((x^0.5)^0.5)^... too complex for const.
3672            // Instead, use the identity: x^2.4 = (x^12)^(1/5)
3673            // and compute fifth root via Newton's method in f64.
3674            let base = (c + 0.055) / 1.055;
3675            // x^12 = ((x^2)^2)^3
3676            let x2 = base * base;
3677            let x4 = x2 * x2;
3678            let x8 = x4 * x4;
3679            let x12 = x8 * x4;
3680            // Fifth root of x^12 = x^(12/5) = x^2.4
3681            // Newton: y_{n+1} = y_n - (y_n^5 - x12) / (5 * y_n^4)
3682            //       = (4*y_n + x12/y_n^4) / 5
3683            let mut y = base * base; // initial guess ~x^2
3684            // 8 iterations of Newton's method for fifth root (converges in ~6 for f64)
3685            let mut iter = 0;
3686            while iter < 8 {
3687                let y2 = y * y;
3688                let y4 = y2 * y2;
3689                y = (4.0 * y + x12 / y4) / 5.0;
3690                iter += 1;
3691            }
3692            y as f32
3693        };
3694        i += 1;
3695    }
3696    table
3697};
3698
3699/// sRGB u8 → linear f32 via LUT.
3700#[inline]
3701fn srgb_to_linear(c: u8) -> f32 {
3702    SRGB_U8_TO_LINEAR[c as usize]
3703}
3704
3705fn srgb_u8_to_linear_f32(data: &[u8], channels: usize) -> Vec<f32> {
3706    let num_pixels = data.len() / channels;
3707    let mut out = vec![0.0f32; num_pixels * 3];
3708    let lut = &SRGB_U8_TO_LINEAR;
3709    // zip chunks to eliminate output bounds checks; u8 index into [f32; 256] is always in bounds
3710    for (px, rgb) in data.chunks_exact(channels).zip(out.chunks_exact_mut(3)) {
3711        rgb[0] = lut[px[0] as usize];
3712        rgb[1] = lut[px[1] as usize];
3713        rgb[2] = lut[px[2] as usize];
3714    }
3715    out
3716}
3717
3718/// sRGB u16 → linear f32 (IEC 61966-2-1).
3719fn srgb_u16_to_linear_f32(data: &[u8], channels: usize) -> Vec<f32> {
3720    let pixels: &[u16] = bytemuck::cast_slice(data);
3721    pixels
3722        .chunks(channels)
3723        .flat_map(|px| {
3724            [
3725                srgb_to_linear_f(px[0] as f32 / 65535.0),
3726                srgb_to_linear_f(px[1] as f32 / 65535.0),
3727                srgb_to_linear_f(px[2] as f32 / 65535.0),
3728            ]
3729        })
3730        .collect()
3731}
3732
3733/// sRGB transfer function: normalized float [0,1] → linear float.
3734#[inline]
3735fn srgb_to_linear_f(c: f32) -> f32 {
3736    if c <= 0.04045 {
3737        c / 12.92
3738    } else {
3739        jxl_simd::fast_powf((c + 0.055) / 1.055, 2.4)
3740    }
3741}
3742
3743/// Gamma u8 → linear f32 RGB. `linear = (encoded/255)^(1/gamma)`
3744fn gamma_u8_to_linear_f32(data: &[u8], channels: usize, gamma: f32) -> Vec<f32> {
3745    // Build 256-entry LUT for u8 values (avoids per-pixel powf)
3746    let inv_gamma = 1.0 / gamma;
3747    let lut: [f32; 256] =
3748        core::array::from_fn(|i| jxl_simd::fast_powf(i as f32 / 255.0, inv_gamma));
3749    data.chunks(channels)
3750        .flat_map(|px| {
3751            [
3752                lut[px[0] as usize],
3753                lut[px[1] as usize],
3754                lut[px[2] as usize],
3755            ]
3756        })
3757        .collect()
3758}
3759
3760/// Gamma u16 → linear f32 RGB. `linear = (encoded/65535)^(1/gamma)`
3761fn gamma_u16_to_linear_f32(data: &[u8], channels: usize, gamma: f32) -> Vec<f32> {
3762    let inv_gamma = 1.0 / gamma;
3763    let pixels: &[u16] = bytemuck::cast_slice(data);
3764    pixels
3765        .chunks(channels)
3766        .flat_map(|px| {
3767            [
3768                jxl_simd::fast_powf(px[0] as f32 / 65535.0, inv_gamma),
3769                jxl_simd::fast_powf(px[1] as f32 / 65535.0, inv_gamma),
3770                jxl_simd::fast_powf(px[2] as f32 / 65535.0, inv_gamma),
3771            ]
3772        })
3773        .collect()
3774}
3775
3776/// Gamma u8 grayscale → linear f32 RGB (gray→R=G=B). `linear = (encoded/255)^(1/gamma)`
3777fn gamma_gray_u8_to_linear_f32_rgb(data: &[u8], stride: usize, gamma: f32) -> Vec<f32> {
3778    let inv_gamma = 1.0 / gamma;
3779    let lut: [f32; 256] =
3780        core::array::from_fn(|i| jxl_simd::fast_powf(i as f32 / 255.0, inv_gamma));
3781    data.chunks(stride)
3782        .flat_map(|px| {
3783            let v = lut[px[0] as usize];
3784            [v, v, v]
3785        })
3786        .collect()
3787}
3788
3789/// Gamma u16 grayscale → linear f32 RGB (gray→R=G=B). `linear = (encoded/65535)^(1/gamma)`
3790fn gamma_gray_u16_to_linear_f32_rgb(data: &[u8], stride: usize, gamma: f32) -> Vec<f32> {
3791    let inv_gamma = 1.0 / gamma;
3792    let pixels: &[u16] = bytemuck::cast_slice(data);
3793    pixels
3794        .chunks(stride)
3795        .flat_map(|px| {
3796            let v = jxl_simd::fast_powf(px[0] as f32 / 65535.0, inv_gamma);
3797            [v, v, v]
3798        })
3799        .collect()
3800}
3801
3802/// Extract alpha channel from interleaved 16-bit pixel data as u8 (quantized).
3803fn extract_alpha_u16(data: &[u8], stride: usize, alpha_offset: usize) -> Vec<u8> {
3804    let pixels: &[u16] = bytemuck::cast_slice(data);
3805    pixels
3806        .chunks(stride)
3807        .map(|px| (px[alpha_offset] >> 8) as u8)
3808        .collect()
3809}
3810
3811/// Swap B and R channels in-place equivalent: BGR(A) → RGB(A).
3812fn bgr_to_rgb(data: &[u8], stride: usize) -> Vec<u8> {
3813    let mut out = data.to_vec();
3814    for chunk in out.chunks_mut(stride) {
3815        chunk.swap(0, 2);
3816    }
3817    out
3818}
3819
3820/// Extract a single channel from interleaved pixel data.
3821fn extract_alpha(data: &[u8], stride: usize, alpha_offset: usize) -> Vec<u8> {
3822    data.chunks(stride).map(|px| px[alpha_offset]).collect()
3823}
3824
3825/// Extract alpha from interleaved f32 pixel data, converting to u8 (0..255).
3826fn extract_alpha_f32(data: &[f32], stride: usize, alpha_offset: usize) -> Vec<u8> {
3827    data.chunks(stride)
3828        .map(|px| (px[alpha_offset].clamp(0.0, 1.0) * 255.0 + 0.5) as u8)
3829        .collect()
3830}
3831
3832/// Expand 8-bit sRGB grayscale to linear f32 RGB (gray→R=G=B).
3833fn gray_u8_to_linear_f32_rgb(data: &[u8], stride: usize) -> Vec<f32> {
3834    data.chunks(stride)
3835        .flat_map(|px| {
3836            let v = srgb_to_linear(px[0]);
3837            [v, v, v]
3838        })
3839        .collect()
3840}
3841
3842/// Expand 16-bit sRGB grayscale to linear f32 RGB (gray→R=G=B).
3843fn gray_u16_to_linear_f32_rgb(data: &[u8], stride: usize) -> Vec<f32> {
3844    let pixels: &[u16] = bytemuck::cast_slice(data);
3845    pixels
3846        .chunks(stride)
3847        .flat_map(|px| {
3848            let v = srgb_to_linear_f(px[0] as f32 / 65535.0);
3849            [v, v, v]
3850        })
3851        .collect()
3852}
3853
3854/// Expand linear f32 grayscale to linear f32 RGB (gray→R=G=B).
3855fn gray_f32_to_linear_f32_rgb(data: &[f32], stride: usize) -> Vec<f32> {
3856    data.chunks(stride)
3857        .flat_map(|px| {
3858            let v = px[0];
3859            [v, v, v]
3860        })
3861        .collect()
3862}
3863
3864// ── Tests ───────────────────────────────────────────────────────────────────
3865
3866#[cfg(test)]
3867mod tests {
3868    use super::*;
3869
3870    #[test]
3871    fn test_lossless_config_builder_and_getters() {
3872        let cfg = LosslessConfig::new()
3873            .with_effort(5)
3874            .with_ans(false)
3875            .with_squeeze(true)
3876            .with_tree_learning(true);
3877        assert_eq!(cfg.effort(), 5);
3878        assert!(!cfg.ans());
3879        assert!(cfg.squeeze());
3880        assert!(cfg.tree_learning());
3881    }
3882
3883    #[test]
3884    fn test_lossy_config_builder_and_getters() {
3885        let cfg = LossyConfig::new(2.0)
3886            .with_effort(3)
3887            .with_gaborish(false)
3888            .with_noise(true);
3889        assert_eq!(cfg.distance(), 2.0);
3890        assert_eq!(cfg.effort(), 3);
3891        assert!(!cfg.gaborish());
3892        assert!(cfg.noise());
3893    }
3894
3895    #[test]
3896    fn test_pixel_layout_helpers() {
3897        assert_eq!(PixelLayout::Rgb8.bytes_per_pixel(), 3);
3898        assert_eq!(PixelLayout::Rgba8.bytes_per_pixel(), 4);
3899        assert_eq!(PixelLayout::Bgr8.bytes_per_pixel(), 3);
3900        assert_eq!(PixelLayout::Bgra8.bytes_per_pixel(), 4);
3901        assert_eq!(PixelLayout::Gray8.bytes_per_pixel(), 1);
3902        assert_eq!(PixelLayout::GrayAlpha8.bytes_per_pixel(), 2);
3903        assert_eq!(PixelLayout::Rgb16.bytes_per_pixel(), 6);
3904        assert_eq!(PixelLayout::Rgba16.bytes_per_pixel(), 8);
3905        assert_eq!(PixelLayout::Gray16.bytes_per_pixel(), 2);
3906        assert_eq!(PixelLayout::GrayAlpha16.bytes_per_pixel(), 4);
3907        assert_eq!(PixelLayout::RgbLinearF32.bytes_per_pixel(), 12);
3908        assert_eq!(PixelLayout::RgbaLinearF32.bytes_per_pixel(), 16);
3909        assert_eq!(PixelLayout::GrayLinearF32.bytes_per_pixel(), 4);
3910        assert_eq!(PixelLayout::GrayAlphaLinearF32.bytes_per_pixel(), 8);
3911        // Linear
3912        assert!(!PixelLayout::Rgb8.is_linear());
3913        assert!(PixelLayout::RgbLinearF32.is_linear());
3914        assert!(PixelLayout::RgbaLinearF32.is_linear());
3915        assert!(PixelLayout::GrayLinearF32.is_linear());
3916        assert!(PixelLayout::GrayAlphaLinearF32.is_linear());
3917        assert!(!PixelLayout::Rgb16.is_linear());
3918        // Alpha
3919        assert!(!PixelLayout::Rgb8.has_alpha());
3920        assert!(PixelLayout::Rgba8.has_alpha());
3921        assert!(PixelLayout::Bgra8.has_alpha());
3922        assert!(PixelLayout::GrayAlpha8.has_alpha());
3923        assert!(PixelLayout::Rgba16.has_alpha());
3924        assert!(PixelLayout::GrayAlpha16.has_alpha());
3925        assert!(PixelLayout::RgbaLinearF32.has_alpha());
3926        assert!(PixelLayout::GrayAlphaLinearF32.has_alpha());
3927        assert!(!PixelLayout::Rgb16.has_alpha());
3928        assert!(!PixelLayout::RgbLinearF32.has_alpha());
3929        // 16-bit
3930        assert!(PixelLayout::Rgb16.is_16bit());
3931        assert!(PixelLayout::Rgba16.is_16bit());
3932        assert!(PixelLayout::Gray16.is_16bit());
3933        assert!(PixelLayout::GrayAlpha16.is_16bit());
3934        assert!(!PixelLayout::Rgb8.is_16bit());
3935        assert!(!PixelLayout::RgbLinearF32.is_16bit());
3936        // f32
3937        assert!(PixelLayout::RgbLinearF32.is_f32());
3938        assert!(PixelLayout::RgbaLinearF32.is_f32());
3939        assert!(PixelLayout::GrayLinearF32.is_f32());
3940        assert!(PixelLayout::GrayAlphaLinearF32.is_f32());
3941        assert!(!PixelLayout::Rgb8.is_f32());
3942        assert!(!PixelLayout::Rgb16.is_f32());
3943        // Grayscale
3944        assert!(PixelLayout::Gray8.is_grayscale());
3945        assert!(PixelLayout::GrayAlpha8.is_grayscale());
3946        assert!(PixelLayout::Gray16.is_grayscale());
3947        assert!(PixelLayout::GrayAlpha16.is_grayscale());
3948        assert!(PixelLayout::GrayLinearF32.is_grayscale());
3949        assert!(PixelLayout::GrayAlphaLinearF32.is_grayscale());
3950        assert!(!PixelLayout::Rgb16.is_grayscale());
3951        assert!(!PixelLayout::RgbLinearF32.is_grayscale());
3952    }
3953
3954    #[test]
3955    fn test_quality_to_distance() {
3956        assert!(Quality::Distance(1.0).to_distance().unwrap() == 1.0);
3957        assert!(Quality::Distance(-1.0).to_distance().is_err());
3958        assert!(Quality::Percent(100).to_distance().is_err()); // lossless invalid for lossy
3959        assert!(Quality::Percent(90).to_distance().unwrap() == 1.0);
3960    }
3961
3962    #[test]
3963    fn test_pixel_validation() {
3964        let cfg = LosslessConfig::new();
3965        let req = cfg.encode_request(2, 2, PixelLayout::Rgb8);
3966        assert!(req.validate_pixels(&[0u8; 12]).is_ok());
3967    }
3968
3969    #[test]
3970    fn test_pixel_validation_wrong_size() {
3971        let cfg = LosslessConfig::new();
3972        let req = cfg.encode_request(2, 2, PixelLayout::Rgb8);
3973        assert!(req.validate_pixels(&[0u8; 11]).is_err());
3974    }
3975
3976    #[test]
3977    fn test_limits_check() {
3978        let limits = Limits::new().with_max_width(100);
3979        let cfg = LosslessConfig::new();
3980        let req = cfg
3981            .encode_request(200, 100, PixelLayout::Rgb8)
3982            .with_limits(&limits);
3983        assert!(req.check_limits().is_err());
3984    }
3985
3986    #[test]
3987    fn test_lossless_encode_rgb8_small() {
3988        // 4x4 red image
3989        let pixels = [255u8, 0, 0].repeat(16);
3990        let result = LosslessConfig::new()
3991            .encode_request(4, 4, PixelLayout::Rgb8)
3992            .encode(&pixels);
3993        assert!(result.is_ok());
3994        let jxl = result.unwrap();
3995        assert_eq!(&jxl[..2], &[0xFF, 0x0A]); // JXL signature
3996    }
3997
3998    #[test]
3999    fn test_lossy_encode_rgb8_small() {
4000        // 8x8 gradient
4001        let mut pixels = Vec::with_capacity(8 * 8 * 3);
4002        for y in 0..8u8 {
4003            for x in 0..8u8 {
4004                pixels.push(x * 32);
4005                pixels.push(y * 32);
4006                pixels.push(128);
4007            }
4008        }
4009        let result = LossyConfig::new(2.0)
4010            .with_gaborish(false)
4011            .encode_request(8, 8, PixelLayout::Rgb8)
4012            .encode(&pixels);
4013        assert!(result.is_ok());
4014        let jxl = result.unwrap();
4015        assert_eq!(&jxl[..2], &[0xFF, 0x0A]);
4016    }
4017
4018    #[test]
4019    fn test_fluent_lossless() {
4020        let pixels = vec![128u8; 4 * 4 * 3];
4021        let result = LosslessConfig::new().encode(&pixels, 4, 4, PixelLayout::Rgb8);
4022        assert!(result.is_ok());
4023    }
4024
4025    #[test]
4026    fn test_lossy_gray8() {
4027        // Grayscale input → RGB expansion → VarDCT (XYB)
4028        let pixels = vec![128u8; 8 * 8];
4029        let result = LossyConfig::new(2.0)
4030            .with_gaborish(false)
4031            .encode_request(8, 8, PixelLayout::Gray8)
4032            .encode(&pixels);
4033        assert!(result.is_ok(), "lossy Gray8 should encode: {result:?}");
4034    }
4035
4036    #[test]
4037    fn test_lossy_gray_alpha8() {
4038        let pixels: Vec<u8> = (0..8 * 8).flat_map(|_| [128u8, 255]).collect();
4039        let result = LossyConfig::new(2.0)
4040            .with_gaborish(false)
4041            .encode_request(8, 8, PixelLayout::GrayAlpha8)
4042            .encode(&pixels);
4043        assert!(result.is_ok(), "lossy GrayAlpha8 should encode: {result:?}");
4044    }
4045
4046    #[test]
4047    fn test_lossy_gray16() {
4048        let pixels_u16: Vec<u16> = (0..8 * 8).map(|_| 32768u16).collect();
4049        let pixels: &[u8] = bytemuck::cast_slice(&pixels_u16);
4050        let result = LossyConfig::new(2.0)
4051            .with_gaborish(false)
4052            .encode_request(8, 8, PixelLayout::Gray16)
4053            .encode(pixels);
4054        assert!(result.is_ok(), "lossy Gray16 should encode: {result:?}");
4055    }
4056
4057    #[test]
4058    fn test_lossy_rgba_linear_f32() {
4059        let pixels_f32: Vec<f32> = (0..8 * 8).flat_map(|_| [0.5f32, 0.3, 0.7, 1.0]).collect();
4060        let pixels: &[u8] = bytemuck::cast_slice(&pixels_f32);
4061        let result = LossyConfig::new(2.0)
4062            .with_gaborish(false)
4063            .encode_request(8, 8, PixelLayout::RgbaLinearF32)
4064            .encode(pixels);
4065        assert!(
4066            result.is_ok(),
4067            "lossy RgbaLinearF32 should encode: {result:?}"
4068        );
4069    }
4070
4071    #[test]
4072    fn test_lossy_gray_linear_f32() {
4073        let pixels_f32: Vec<f32> = (0..8 * 8).map(|_| 0.5f32).collect();
4074        let pixels: &[u8] = bytemuck::cast_slice(&pixels_f32);
4075        let result = LossyConfig::new(2.0)
4076            .with_gaborish(false)
4077            .encode_request(8, 8, PixelLayout::GrayLinearF32)
4078            .encode(pixels);
4079        assert!(
4080            result.is_ok(),
4081            "lossy GrayLinearF32 should encode: {result:?}"
4082        );
4083    }
4084
4085    #[test]
4086    fn test_lossless_grayalpha8() {
4087        let pixels: Vec<u8> = (0..8 * 8).flat_map(|_| [200u8, 255]).collect();
4088        let result = LosslessConfig::new().encode(&pixels, 8, 8, PixelLayout::GrayAlpha8);
4089        assert!(
4090            result.is_ok(),
4091            "lossless GrayAlpha8 should encode: {result:?}"
4092        );
4093    }
4094
4095    #[test]
4096    fn test_lossless_grayalpha16() {
4097        let pixels_u16: Vec<u16> = (0..8 * 8).flat_map(|_| [32768u16, 65535]).collect();
4098        let pixels: &[u8] = bytemuck::cast_slice(&pixels_u16);
4099        let result = LosslessConfig::new().encode(pixels, 8, 8, PixelLayout::GrayAlpha16);
4100        assert!(
4101            result.is_ok(),
4102            "lossless GrayAlpha16 should encode: {result:?}"
4103        );
4104    }
4105
4106    #[test]
4107    fn test_bgra_lossless() {
4108        // 4x4 red image in BGRA (B=0, G=0, R=255, A=255)
4109        let pixels = [0u8, 0, 255, 255].repeat(16);
4110        let result = LosslessConfig::new().encode(&pixels, 4, 4, PixelLayout::Bgra8);
4111        assert!(result.is_ok());
4112        let jxl = result.unwrap();
4113        assert_eq!(&jxl[..2], &[0xFF, 0x0A]);
4114    }
4115
4116    #[test]
4117    fn test_lossy_alpha_encodes() {
4118        // Lossy+alpha: VarDCT RGB + modular alpha extra channel
4119        let pixels = [255u8, 0, 0, 255].repeat(64);
4120        let result =
4121            LossyConfig::new(2.0)
4122                .with_gaborish(false)
4123                .encode(&pixels, 8, 8, PixelLayout::Bgra8);
4124        assert!(
4125            result.is_ok(),
4126            "BGRA lossy encode failed: {:?}",
4127            result.err()
4128        );
4129
4130        let result2 = LossyConfig::new(2.0).encode(&pixels, 8, 8, PixelLayout::Rgba8);
4131        assert!(
4132            result2.is_ok(),
4133            "RGBA lossy encode failed: {:?}",
4134            result2.err()
4135        );
4136    }
4137
4138    #[test]
4139    fn test_stop_cancellation() {
4140        use enough::Unstoppable;
4141        // Unstoppable should not cancel
4142        let pixels = vec![128u8; 4 * 4 * 3];
4143        let cfg = LosslessConfig::new();
4144        let result = cfg
4145            .encode_request(4, 4, PixelLayout::Rgb8)
4146            .with_stop(&Unstoppable)
4147            .encode(&pixels);
4148        assert!(result.is_ok());
4149    }
4150
4151    #[test]
4152    fn test_lossy_palette_encode() {
4153        // 16x16 RGB image with 4 colors + slight noise
4154        let colors = [[255u8, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0]];
4155        let mut pixels = Vec::with_capacity(16 * 16 * 3);
4156        for y in 0..16u8 {
4157            for x in 0..16u8 {
4158                let ci = ((y / 4) * 4 + x / 4) as usize % 4;
4159                let noise = ((x.wrapping_mul(7).wrapping_add(y.wrapping_mul(13))) % 5) as i16 - 2;
4160                for &channel in &colors[ci][..3] {
4161                    let v = (channel as i16 + noise).clamp(0, 255) as u8;
4162                    pixels.push(v);
4163                }
4164            }
4165        }
4166        let cfg = LosslessConfig::new()
4167            .with_lossy_palette(true)
4168            .with_ans(true);
4169        let result = cfg.encode(&pixels, 16, 16, PixelLayout::Rgb8);
4170        assert!(
4171            result.is_ok(),
4172            "lossy palette encode failed: {:?}",
4173            result.err()
4174        );
4175        let jxl = result.unwrap();
4176        assert_eq!(&jxl[..2], &[0xFF, 0x0A], "JXL signature");
4177
4178        // Verify jxl-oxide can parse and decode it
4179        let cursor = std::io::Cursor::new(&jxl);
4180        let reader = std::io::BufReader::new(cursor);
4181        let image = jxl_oxide::JxlImage::builder()
4182            .read(reader)
4183            .expect("jxl-oxide parse");
4184        assert!(
4185            image.width() > 0,
4186            "decoded image should have non-zero width"
4187        );
4188    }
4189
4190    #[test]
4191    fn test_lossy_palette_multi_group() {
4192        // 300x300 RGB image with ~20 dominant colors + noise (>256x256 = multi-group)
4193        let colors = [
4194            [255u8, 0, 0],
4195            [0, 255, 0],
4196            [0, 0, 255],
4197            [255, 255, 0],
4198            [255, 0, 255],
4199            [0, 255, 255],
4200            [128, 128, 128],
4201            [64, 64, 64],
4202        ];
4203        let mut pixels = Vec::with_capacity(300 * 300 * 3);
4204        for y in 0..300u32 {
4205            for x in 0..300u32 {
4206                let ci = ((y / 40) * 8 + x / 40) as usize % colors.len();
4207                let noise = ((x.wrapping_mul(7).wrapping_add(y.wrapping_mul(13))) % 7) as i16 - 3;
4208                for &channel in &colors[ci][..3] {
4209                    let v = (channel as i16 + noise).clamp(0, 255) as u8;
4210                    pixels.push(v);
4211                }
4212            }
4213        }
4214
4215        // Encode with lossy palette + ANS (multi-group)
4216        let cfg = LosslessConfig::new()
4217            .with_lossy_palette(true)
4218            .with_ans(true);
4219        let jxl = cfg
4220            .encode(&pixels, 300, 300, PixelLayout::Rgb8)
4221            .expect("lossy palette multi-group encode");
4222        assert_eq!(&jxl[..2], &[0xFF, 0x0A], "JXL signature");
4223        assert!(jxl.len() < 300 * 300 * 3, "should compress");
4224
4225        // Save to disk for inspection
4226        let out = crate::test_helpers::output_dir("lossy_palette");
4227        let jxl_out = out.join("lossy_palette_multi.jxl");
4228        let png_out = out.join("lossy_palette_multi.png");
4229        std::fs::write(&jxl_out, &jxl).ok();
4230        eprintln!(
4231            "LOSSY_PALETTE_MULTI test: encoded {} bytes ({}x{})",
4232            jxl.len(),
4233            300,
4234            300
4235        );
4236
4237        // Try djxl decode first for better error messages
4238        let djxl_result = std::process::Command::new("djxl")
4239            .args([jxl_out.to_str().unwrap(), png_out.to_str().unwrap()])
4240            .output();
4241        if let Ok(output) = djxl_result {
4242            eprintln!(
4243                "djxl: status={}, stderr={}",
4244                output.status,
4245                String::from_utf8_lossy(&output.stderr)
4246            );
4247        }
4248
4249        // Verify jxl-rs can decode it
4250        let decoded = crate::test_helpers::decode_with_jxl_rs(&jxl).expect("jxl-rs decode failed");
4251        assert_eq!(decoded.width, 300);
4252        assert_eq!(decoded.height, 300);
4253        assert_eq!(decoded.channels, 3);
4254
4255        // Verify lossy quality: each pixel should be within 50 of original (delta palette error)
4256        // decoded.pixels is f32 in [0.0, 1.0] — convert to u8 for comparison
4257        let mut max_error = 0i32;
4258        let mut error_pos = (0, 0, 0);
4259        for (i, (&orig, &dec)) in pixels.iter().zip(decoded.pixels.iter()).enumerate() {
4260            let dec_u8 = (dec * 255.0).round().clamp(0.0, 255.0) as u8;
4261            let diff = (orig as i32 - dec_u8 as i32).abs();
4262            if diff > max_error {
4263                max_error = diff;
4264                let pixel = i / 3;
4265                error_pos = (pixel % 300, pixel / 300, i % 3);
4266            }
4267        }
4268        let err_idx = error_pos.1 * 300 * 3 + error_pos.0 * 3 + error_pos.2;
4269        let dec_u8 = (decoded.pixels[err_idx] * 255.0).round().clamp(0.0, 255.0) as u8;
4270        eprintln!(
4271            "max_error={} at ({},{}) ch={}, orig={} decoded={}",
4272            max_error, error_pos.0, error_pos.1, error_pos.2, pixels[err_idx], dec_u8,
4273        );
4274        assert!(
4275            max_error <= 80,
4276            "lossy palette max error {} too large (expected <= 80)",
4277            max_error
4278        );
4279    }
4280
4281    #[test]
4282    fn test_palette_256_colors_regression() {
4283        // Regression test for palette+ANS checksum mismatch with many unique colors.
4284        // Root cause was u2S bit width bug in write_palette_transform (fixed Feb 17, 2026):
4285        // nb_colors selectors 1-2 used 11/14 bits instead of 10/12 bits. Triggered when
4286        // nb_colors >= 256 (selector 1). Two test cases:
4287        //
4288        // 1. 32x32 with 256 unique colors via standard API (passes 50% heuristic)
4289        // 2. 16x16 with 256 unique colors via internal API (bypasses heuristic)
4290        use crate::modular::channel::{Channel, ModularImage};
4291        use crate::modular::encode::write_modular_stream_with_palette;
4292
4293        // Test 1: 32x32 through standard API (256 colors, each used 4x)
4294        let mut pixels = Vec::with_capacity(32 * 32 * 3);
4295        for i in 0..1024u32 {
4296            let idx = (i / 4) as u8;
4297            pixels.push(idx);
4298            pixels.push(((idx as u32 * 7 + 13) & 0xFF) as u8);
4299            pixels.push(((idx as u32 * 31 + 97) & 0xFF) as u8);
4300        }
4301        let cfg = LosslessConfig::new().with_ans(true);
4302        let jxl = cfg
4303            .encode(&pixels, 32, 32, PixelLayout::Rgb8)
4304            .expect("palette 256-colors encode");
4305        let decoded = crate::test_helpers::decode_with_jxl_rs(&jxl).expect("jxl-rs decode failed");
4306        for (i, (&orig, &dec)) in pixels.iter().zip(decoded.pixels.iter()).enumerate() {
4307            let dec_u8 = (dec * 255.0).round().clamp(0.0, 255.0) as u8;
4308            assert_eq!(
4309                orig, dec_u8,
4310                "32x32: mismatch at byte {}: orig={} decoded={}",
4311                i, orig, dec_u8
4312            );
4313        }
4314
4315        // Test 2: 16x16 via internal API (bypasses 50% heuristic)
4316        let mut channels = Vec::new();
4317        for c in 0..3 {
4318            let mut ch = Channel::new(16, 16).unwrap();
4319            for y in 0..16 {
4320                for x in 0..16 {
4321                    let idx = y * 16 + x;
4322                    let val = match c {
4323                        0 => idx as i32,
4324                        1 => ((idx * 3 + 17) & 0xFF) as i32,
4325                        2 => (255 - idx) as i32,
4326                        _ => 0,
4327                    };
4328                    ch.set(x, y, val);
4329                }
4330            }
4331            channels.push(ch);
4332        }
4333        let image = ModularImage {
4334            channels,
4335            bit_depth: 8,
4336            is_grayscale: false,
4337            has_alpha: false,
4338        };
4339        let mut writer = crate::bit_writer::BitWriter::new();
4340        write_modular_stream_with_palette(&image, &mut writer, true, 0, 3)
4341            .expect("palette encode with 256 unique colors must not fail");
4342    }
4343
4344    #[test]
4345    fn test_16bit_tree_learning() {
4346        // Test multiple 16-bit scenarios that previously failed
4347        for &(w, h, layout, label) in &[
4348            (32u32, 32u32, PixelLayout::Rgb16, "32x32 RGB16"),
4349            (8, 8, PixelLayout::Rgba16, "8x8 RGBA16"),
4350            (8, 8, PixelLayout::Rgb16, "8x8 RGB16"),
4351            (16, 16, PixelLayout::Gray16, "16x16 Gray16"),
4352        ] {
4353            let nc = layout.bytes_per_pixel()
4354                / if layout.is_16bit() {
4355                    2
4356                } else if layout.is_f32() {
4357                    4
4358                } else {
4359                    1
4360                };
4361            let mut pixels = vec![0u16; (w * h) as usize * nc];
4362            for y in 0..h {
4363                for x in 0..w {
4364                    let idx = ((y * w + x) as usize) * nc;
4365                    pixels[idx] = (x * 2048) as u16;
4366                    if nc >= 2 {
4367                        pixels[idx + 1] = (y * 2048) as u16;
4368                    }
4369                    if nc >= 3 {
4370                        pixels[idx + 2] = ((x + y) * 1024) as u16;
4371                    }
4372                    if nc >= 4 {
4373                        pixels[idx + 3] = 65535; // opaque alpha
4374                    }
4375                }
4376            }
4377            let bytes: Vec<u8> = pixels.iter().flat_map(|v| v.to_ne_bytes()).collect();
4378
4379            let cfg = LosslessConfig::new().with_effort(7).with_ans(true);
4380            let jxl = cfg
4381                .encode(&bytes, w, h, layout)
4382                .unwrap_or_else(|e| panic!("{}: encode failed: {}", label, e));
4383
4384            let decoded = crate::test_helpers::decode_with_jxl_rs(&jxl)
4385                .unwrap_or_else(|e| panic!("{}: jxl-rs decode failed: {}", label, e));
4386            assert_eq!(decoded.width, w as usize, "{}: width", label);
4387            assert_eq!(decoded.height, h as usize, "{}: height", label);
4388
4389            let scale = 65535.0;
4390            let mut mismatches = 0;
4391            for (i, (&orig, &dec_f)) in pixels.iter().zip(decoded.pixels.iter()).enumerate() {
4392                let dec = (dec_f * scale).round().clamp(0.0, scale) as u16;
4393                if orig != dec && mismatches < 3 {
4394                    eprintln!("{}: mismatch[{}]: orig={} dec={}", label, i, orig, dec);
4395                    mismatches += 1;
4396                }
4397            }
4398            assert_eq!(mismatches, 0, "{}: {} mismatches", label, mismatches);
4399            eprintln!("{}: PASS ({} bytes)", label, jxl.len());
4400        }
4401    }
4402
4403    #[test]
4404    fn test_srgb_lut_matches_powf() {
4405        for i in 0u16..256 {
4406            let lut_val = SRGB_U8_TO_LINEAR[i as usize];
4407            let fast_val = srgb_to_linear_f(i as f32 / 255.0);
4408            let diff = (lut_val - fast_val).abs();
4409            // LUT uses f64 exact powf, srgb_to_linear_f uses fast_powf (~3e-5 relative error)
4410            let tol = fast_val.abs() * 5e-5 + 1e-7;
4411            assert!(
4412                diff <= tol,
4413                "sRGB LUT mismatch at {i}: LUT={lut_val}, fast={fast_val}, diff={diff}"
4414            );
4415        }
4416    }
4417
4418    #[test]
4419    fn test_quality_to_distance_f32_mapping() {
4420        // Verify the piecewise mapping at key points.
4421        assert_eq!(quality_to_distance(100.0), 0.0);
4422        assert_eq!(quality_to_distance(90.0), 1.0); // visually lossless
4423        assert_eq!(quality_to_distance(80.0), 1.5);
4424        assert_eq!(quality_to_distance(70.0), 2.0);
4425        assert_eq!(quality_to_distance(50.0), 4.0);
4426        assert_eq!(quality_to_distance(0.0), 9.0);
4427        // Clamped above 100
4428        assert_eq!(quality_to_distance(110.0), 0.0);
4429    }
4430
4431    #[test]
4432    fn test_calibrated_jxl_quality() {
4433        // Boundary: below table minimum clamps to first entry's output.
4434        assert_eq!(calibrated_jxl_quality(0.0), 5.0);
4435        // Boundary: above table maximum clamps to last entry's output.
4436        assert_eq!(calibrated_jxl_quality(100.0), 93.8);
4437        // Exact table entry.
4438        assert_eq!(calibrated_jxl_quality(90.0), 84.2);
4439        // Interpolated mid-point between (50, 48.5) and (55, 51.9).
4440        let mid = calibrated_jxl_quality(52.5);
4441        let expected = 48.5 + 0.5 * (51.9 - 48.5);
4442        assert!(
4443            (mid - expected).abs() < 0.01,
4444            "expected {expected}, got {mid}"
4445        );
4446    }
4447
4448    #[test]
4449    fn test_interp_quality_edge_cases() {
4450        let table = &[(10.0f32, 20.0f32), (20.0, 40.0), (30.0, 60.0)];
4451        // Below table
4452        assert_eq!(interp_quality(table, 5.0), 20.0);
4453        // Above table
4454        assert_eq!(interp_quality(table, 35.0), 60.0);
4455        // Exact match
4456        assert_eq!(interp_quality(table, 20.0), 40.0);
4457        // Midpoint
4458        assert!((interp_quality(table, 15.0) - 30.0).abs() < 0.001);
4459    }
4460}