Skip to main content

zenwebp/decoder/
api.rs

1use alloc::string::String;
2use thiserror::Error;
3use whereat::at;
4
5/// Errors that can occur when attempting to decode a WebP image
6#[derive(Debug, Error)]
7#[non_exhaustive]
8pub enum DecodeError {
9    /// An IO error occurred while reading the file
10    #[cfg(feature = "std")]
11    #[error("IO Error: {0}")]
12    IoError(#[from] std::io::Error),
13
14    /// RIFF's "RIFF" signature not found or invalid
15    #[error("Invalid RIFF signature: {0:x?}")]
16    RiffSignatureInvalid([u8; 4]),
17
18    /// WebP's "WEBP" signature not found or invalid
19    #[error("Invalid WebP signature: {0:x?}")]
20    WebpSignatureInvalid([u8; 4]),
21
22    /// An expected chunk was missing
23    #[error("An expected chunk was missing")]
24    ChunkMissing,
25
26    /// Chunk Header was incorrect or invalid in its usage
27    #[error("Invalid Chunk header: {0:x?}")]
28    ChunkHeaderInvalid([u8; 4]),
29
30    /// The ALPH chunk preprocessing info flag was invalid
31    #[error("Alpha chunk preprocessing flag invalid")]
32    InvalidAlphaPreprocessing,
33
34    /// Invalid compression method
35    #[error("Invalid compression method")]
36    InvalidCompressionMethod,
37
38    /// Alpha chunk doesn't match the frame's size
39    #[error("Alpha chunk size mismatch")]
40    AlphaChunkSizeMismatch,
41
42    /// Image is too large, either for the platform's pointer size or generally
43    #[error("Image too large")]
44    ImageTooLarge,
45
46    /// Frame would go out of the canvas
47    #[error("Frame outside image")]
48    FrameOutsideImage,
49
50    /// Signature of 0x2f not found
51    #[error("Invalid lossless signature: {0:x?}")]
52    LosslessSignatureInvalid(u8),
53
54    /// Version Number was not zero
55    #[error("Invalid lossless version number: {0}")]
56    VersionNumberInvalid(u8),
57
58    /// Invalid color cache bits
59    #[error("Invalid color cache bits: {0}")]
60    InvalidColorCacheBits(u8),
61
62    /// An invalid Huffman code was encountered
63    #[error("Invalid Huffman code")]
64    HuffmanError,
65
66    /// The bitstream was somehow corrupt
67    #[error("Corrupt bitstream")]
68    BitStreamError,
69
70    /// The transforms specified were invalid
71    #[error("Invalid transform")]
72    TransformError,
73
74    /// VP8's `[0x9D, 0x01, 0x2A]` magic not found or invalid
75    #[error("Invalid VP8 magic: {0:x?}")]
76    Vp8MagicInvalid([u8; 3]),
77
78    /// VP8 Decoder initialisation wasn't provided with enough data
79    #[error("Not enough VP8 init data")]
80    NotEnoughInitData,
81
82    /// At time of writing, only the YUV colour-space encoded as `0` is specified
83    #[error("Invalid VP8 color space: {0}")]
84    ColorSpaceInvalid(u8),
85
86    /// LUMA prediction mode was not recognised
87    #[error("Invalid VP8 luma prediction mode: {0}")]
88    LumaPredictionModeInvalid(i8),
89
90    /// Intra-prediction mode was not recognised
91    #[error("Invalid VP8 intra prediction mode: {0}")]
92    IntraPredictionModeInvalid(i8),
93
94    /// Chroma prediction mode was not recognised
95    #[error("Invalid VP8 chroma prediction mode: {0}")]
96    ChromaPredictionModeInvalid(i8),
97
98    /// Inconsistent image sizes
99    #[error("Inconsistent image sizes")]
100    InconsistentImageSizes,
101
102    /// The file may be valid, but this crate doesn't support decoding it.
103    #[error("Unsupported feature: {0}")]
104    UnsupportedFeature(String),
105
106    /// Invalid function call or parameter
107    #[error("Invalid parameter: {0}")]
108    InvalidParameter(String),
109
110    /// Memory limit exceeded
111    #[error("Memory limit exceeded")]
112    MemoryLimitExceeded,
113
114    /// Invalid chunk size
115    #[error("Invalid chunk size")]
116    InvalidChunkSize,
117
118    /// No more frames in image
119    #[error("No more frames")]
120    NoMoreFrames,
121
122    /// Decoding was cancelled via a [`enough::Stop`] token.
123    #[error("Decoding cancelled: {0}")]
124    Cancelled(enough::StopReason),
125
126    /// Unsupported codec operation.
127    #[cfg(feature = "zencodec")]
128    #[error(transparent)]
129    UnsupportedOperation(#[from] zencodec::UnsupportedOperation),
130}
131
132/// Result type alias using `At<DecodeError>` for automatic location tracking.
133///
134/// Errors wrapped in `At<>` automatically capture file and line information,
135/// making debugging easier in production environments.
136pub type DecodeResult<T> = core::result::Result<T, whereat::At<DecodeError>>;
137
138impl From<enough::StopReason> for DecodeError {
139    fn from(reason: enough::StopReason) -> Self {
140        Self::Cancelled(reason)
141    }
142}
143
144impl From<whereat::At<DecodeError>> for DecodeError {
145    fn from(at: whereat::At<DecodeError>) -> Self {
146        at.decompose().0
147    }
148}
149
150// Core decoder implementation using SliceReader for no_std compatibility
151use alloc::format;
152use alloc::vec;
153use alloc::vec::Vec;
154use core::num::NonZeroU16;
155use core::ops::Range;
156
157use hashbrown::HashMap;
158
159use super::extended::{self, WebPExtendedInfo, get_alpha_predictor, read_alpha_chunk};
160use super::lossless::LosslessDecoder;
161use super::vp8v2::DecoderContext;
162use crate::slice_reader::SliceReader;
163
164/// All possible RIFF chunks in a WebP image file
165#[allow(clippy::upper_case_acronyms)]
166#[derive(Debug, Clone, Copy, PartialEq, Hash, Eq)]
167pub(crate) enum WebPRiffChunk {
168    RIFF,
169    WEBP,
170    VP8,
171    VP8L,
172    VP8X,
173    ANIM,
174    ANMF,
175    ALPH,
176    ICCP,
177    EXIF,
178    XMP,
179    Unknown([u8; 4]),
180}
181
182impl WebPRiffChunk {
183    pub(crate) const fn from_fourcc(chunk_fourcc: [u8; 4]) -> Self {
184        match &chunk_fourcc {
185            b"RIFF" => Self::RIFF,
186            b"WEBP" => Self::WEBP,
187            b"VP8 " => Self::VP8,
188            b"VP8L" => Self::VP8L,
189            b"VP8X" => Self::VP8X,
190            b"ANIM" => Self::ANIM,
191            b"ANMF" => Self::ANMF,
192            b"ALPH" => Self::ALPH,
193            b"ICCP" => Self::ICCP,
194            b"EXIF" => Self::EXIF,
195            b"XMP " => Self::XMP,
196            _ => Self::Unknown(chunk_fourcc),
197        }
198    }
199
200    pub(crate) const fn to_fourcc(self) -> [u8; 4] {
201        match self {
202            Self::RIFF => *b"RIFF",
203            Self::WEBP => *b"WEBP",
204            Self::VP8 => *b"VP8 ",
205            Self::VP8L => *b"VP8L",
206            Self::VP8X => *b"VP8X",
207            Self::ANIM => *b"ANIM",
208            Self::ANMF => *b"ANMF",
209            Self::ALPH => *b"ALPH",
210            Self::ICCP => *b"ICCP",
211            Self::EXIF => *b"EXIF",
212            Self::XMP => *b"XMP ",
213            Self::Unknown(fourcc) => fourcc,
214        }
215    }
216
217    pub(crate) const fn is_unknown(self) -> bool {
218        matches!(self, Self::Unknown(_))
219    }
220}
221
222// enum WebPImage {
223//     Lossy(VP8Frame),
224//     Lossless(LosslessFrame),
225//     Extended(ExtendedImage),
226// }
227
228enum ImageKind {
229    Lossy,
230    Lossless,
231    Extended(WebPExtendedInfo),
232}
233
234struct AnimationState {
235    next_frame: u32,
236    next_frame_start: u64,
237    dispose_next_frame: bool,
238    previous_frame_width: u32,
239    previous_frame_height: u32,
240    previous_frame_x_offset: u32,
241    previous_frame_y_offset: u32,
242    canvas: Option<Vec<u8>>,
243    /// Reusable scratch buffer for per-frame decode data.
244    /// Avoids allocating a fresh Vec<u8> for every animation frame.
245    frame_scratch: Vec<u8>,
246    /// Reusable decoder context for lossy VP8 frames in animations.
247    /// Shared across all lossy frames to avoid per-frame allocation
248    /// of coefficient/prediction/filter buffers (~100KB savings per frame).
249    ctx: DecoderContext,
250}
251impl Default for AnimationState {
252    fn default() -> Self {
253        Self {
254            next_frame: 0,
255            next_frame_start: 0,
256            dispose_next_frame: true,
257            previous_frame_width: 0,
258            previous_frame_height: 0,
259            previous_frame_x_offset: 0,
260            previous_frame_y_offset: 0,
261            canvas: None,
262            frame_scratch: Vec::new(),
263            ctx: DecoderContext::new(),
264        }
265    }
266}
267
268/// Number of times that an animation loops.
269#[derive(Copy, Clone, Debug, Eq, PartialEq)]
270pub enum LoopCount {
271    /// The animation loops forever.
272    Forever,
273    /// Each frame of the animation is displayed the specified number of times.
274    Times(NonZeroU16),
275}
276
277impl core::fmt::Display for LoopCount {
278    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
279        match self {
280            LoopCount::Forever => f.write_str("infinite"),
281            LoopCount::Times(n) => write!(f, "{} time{}", n, if n.get() == 1 { "" } else { "s" }),
282        }
283    }
284}
285
286impl From<u16> for LoopCount {
287    fn from(n: u16) -> Self {
288        match NonZeroU16::new(n) {
289            None => LoopCount::Forever,
290            Some(n) => LoopCount::Times(n),
291        }
292    }
293}
294
295/// WebP decoder configuration. Reusable across requests.
296#[derive(Clone, Debug, PartialEq)]
297pub struct DecodeConfig {
298    /// Upsampling method for lossy chroma reconstruction. Default: `Bilinear`.
299    pub upsampling: UpsamplingMethod,
300
301    /// Decode limits for dimensions, memory, frame count, etc.
302    pub limits: super::limits::Limits,
303
304    /// Chroma dithering strength for lossy decoding (0-100, 0=off). Default: 0.
305    /// Adds noise to U/V planes to hide banding at low quality settings.
306    /// libwebp defaults to 0 in both simple and advanced APIs.
307    pub dithering_strength: u8,
308}
309
310impl Default for DecodeConfig {
311    fn default() -> Self {
312        Self {
313            upsampling: UpsamplingMethod::Bilinear,
314            limits: super::limits::Limits::default(),
315            dithering_strength: 0,
316        }
317    }
318}
319
320impl DecodeConfig {
321    /// Set the upsampling method.
322    #[must_use]
323    pub fn upsampling(mut self, method: UpsamplingMethod) -> Self {
324        self.upsampling = method;
325        self
326    }
327
328    /// Set decode limits.
329    #[must_use]
330    pub fn limits(mut self, limits: super::limits::Limits) -> Self {
331        self.limits = limits;
332        self
333    }
334
335    /// Set maximum dimensions.
336    #[must_use]
337    pub fn max_dimensions(mut self, width: u32, height: u32) -> Self {
338        self.limits = self.limits.max_dimensions(width, height);
339        self
340    }
341
342    /// Set maximum memory usage.
343    #[must_use]
344    pub fn max_memory(mut self, bytes: u64) -> Self {
345        self.limits = self.limits.max_memory(bytes);
346        self
347    }
348
349    /// Disable fancy upsampling.
350    #[must_use]
351    pub fn no_fancy_upsampling(mut self) -> Self {
352        self.upsampling = UpsamplingMethod::Simple;
353        self
354    }
355
356    /// Set chroma dithering strength (0=off, 100=max). Default: 0.
357    ///
358    /// Adds random noise to U/V chroma planes after loop filtering to hide
359    /// banding artifacts from coarse chroma quantization at low quality settings.
360    #[must_use]
361    pub fn with_dithering_strength(mut self, strength: u8) -> Self {
362        self.dithering_strength = strength;
363        self
364    }
365
366    pub(crate) fn to_options(&self) -> WebPDecodeOptions {
367        WebPDecodeOptions {
368            lossy_upsampling: self.upsampling,
369            dithering_strength: self.dithering_strength,
370        }
371    }
372}
373
374/// Decoding request that borrows configuration and input data.
375///
376/// # Example
377///
378/// ```rust,no_run
379/// use zenwebp::{DecodeConfig, DecodeRequest};
380///
381/// let config = DecodeConfig::default();
382/// let webp_data: &[u8] = &[]; // your WebP data
383/// let (pixels, w, h) = DecodeRequest::new(&config, webp_data).decode_rgba()?;
384/// # Ok::<(), whereat::At<zenwebp::DecodeError>>(())
385/// ```
386pub struct DecodeRequest<'a> {
387    config: &'a DecodeConfig,
388    data: &'a [u8],
389    stop: Option<&'a dyn enough::Stop>,
390    stride_pixels: Option<u32>,
391}
392
393impl<'a> DecodeRequest<'a> {
394    /// Create a new decoding request.
395    #[must_use]
396    pub fn new(config: &'a DecodeConfig, data: &'a [u8]) -> Self {
397        Self {
398            config,
399            data,
400            stop: None,
401            stride_pixels: None,
402        }
403    }
404
405    /// Set a cooperative cancellation token.
406    #[must_use]
407    pub fn stop(mut self, stop: &'a dyn enough::Stop) -> Self {
408        self.stop = Some(stop);
409        self
410    }
411
412    /// Set row stride in pixels for `_into` methods. Must be >= image width.
413    #[must_use]
414    pub fn stride(mut self, stride_pixels: u32) -> Self {
415        self.stride_pixels = Some(stride_pixels);
416        self
417    }
418
419    /// Decode to the image's native pixel format (RGB or RGBA).
420    ///
421    /// Returns RGBA if the image has alpha, RGB otherwise. This avoids
422    /// both unnecessary alpha expansion and alpha stripping.
423    ///
424    /// The returned [`PixelLayout`](crate::PixelLayout) indicates the format.
425    pub fn decode(self) -> DecodeResult<(Vec<u8>, u32, u32, crate::PixelLayout)> {
426        let (pixels, w, h, has_alpha) = decode_native_internal(
427            self.data,
428            &self.config.to_options(),
429            &self.config.limits,
430            self.stop,
431        )?;
432        let layout = if has_alpha {
433            crate::PixelLayout::Rgba8
434        } else {
435            crate::PixelLayout::Rgb8
436        };
437        Ok((pixels, w, h, layout))
438    }
439
440    /// Decode to RGBA pixels. If the image has no alpha channel, alpha is set to 255.
441    pub fn decode_rgba(self) -> DecodeResult<(Vec<u8>, u32, u32)> {
442        let (rgba, w, h) = decode_to_rgba_internal(
443            self.data,
444            &self.config.to_options(),
445            &self.config.limits,
446            self.stop,
447        )?;
448        Ok((rgba, w, h))
449    }
450
451    /// Decode to RGB pixels (no alpha). If the image has alpha, it is discarded.
452    pub fn decode_rgb(self) -> DecodeResult<(Vec<u8>, u32, u32)> {
453        let (native, w, h, has_alpha) = decode_native_internal(
454            self.data,
455            &self.config.to_options(),
456            &self.config.limits,
457            self.stop,
458        )?;
459        if !has_alpha {
460            Ok((native, w, h))
461        } else {
462            let pixel_count = (w as usize) * (h as usize);
463            let mut rgb = alloc::vec![0u8; pixel_count * 3];
464            garb::bytes::rgba_to_rgb(&native, &mut rgb).map_err(|e| at!(garb_err(e)))?;
465            Ok((rgb, w, h))
466        }
467    }
468
469    /// Decode to RGBA, writing into a pre-allocated buffer.
470    ///
471    /// If [`stride`](Self::stride) is set, rows are written with that pixel stride.
472    /// Otherwise rows are packed (stride == width).
473    pub fn decode_rgba_into(self, output: &mut [u8]) -> DecodeResult<(u32, u32)> {
474        let (rgba, w, h) = decode_to_rgba_internal(
475            self.data,
476            &self.config.to_options(),
477            &self.config.limits,
478            self.stop,
479        )?;
480        convert_to_output(
481            &rgba,
482            output,
483            w,
484            h,
485            4,
486            self.stride_pixels,
487            |src, dst, w, h, ss, ds| {
488                // RGBA -> RGBA is just a strided copy
489                for y in 0..h {
490                    dst[y * ds..][..w * 4].copy_from_slice(&src[y * ss..][..w * 4]);
491                }
492                Ok(())
493            },
494        )?;
495        Ok((w, h))
496    }
497
498    /// Decode to RGB, writing into a pre-allocated buffer.
499    ///
500    /// If [`stride`](Self::stride) is set, rows are written with that pixel stride.
501    /// Otherwise rows are packed (stride == width).
502    pub fn decode_rgb_into(self, output: &mut [u8]) -> DecodeResult<(u32, u32)> {
503        let (rgba, w, h) = decode_to_rgba_internal(
504            self.data,
505            &self.config.to_options(),
506            &self.config.limits,
507            self.stop,
508        )?;
509        convert_to_output(
510            &rgba,
511            output,
512            w,
513            h,
514            3,
515            self.stride_pixels,
516            |src, dst, w, h, ss, ds| {
517                garb::bytes::rgba_to_rgb_strided(src, dst, w, h, ss, ds).map_err(garb_err)
518            },
519        )?;
520        Ok((w, h))
521    }
522
523    /// Read image info without decoding pixel data.
524    pub fn info(self) -> DecodeResult<ImageInfo> {
525        ImageInfo::from_webp(self.data)
526    }
527
528    /// Decode to YUV 4:2:0 planes (lossy only).
529    pub fn decode_yuv420(self) -> DecodeResult<YuvPlanes> {
530        decode_yuv420(self.data)
531    }
532
533    /// Decode lossy VP8 to RGB.
534    #[allow(dead_code)]
535    pub(crate) fn decode_rgb_lossy(self) -> DecodeResult<(Vec<u8>, u16, u16)> {
536        self.decode_lossy_internal(3)
537    }
538
539    /// Decode lossy VP8 to RGBA.
540    #[allow(dead_code)]
541    pub(crate) fn decode_rgba_lossy(self) -> DecodeResult<(Vec<u8>, u16, u16)> {
542        self.decode_lossy_internal(4)
543    }
544
545    fn decode_lossy_internal(self, bpp: usize) -> DecodeResult<(Vec<u8>, u16, u16)> {
546        let data = self.data;
547        let dither_strength = self.config.dithering_strength;
548        if data.len() < 20 {
549            return Err(whereat::at!(DecodeError::NotEnoughInitData));
550        }
551
552        // Parse RIFF/WebP container to find the VP8 chunk
553        if &data[..4] != b"RIFF" {
554            let mut sig = [0u8; 4];
555            sig.copy_from_slice(&data[..4]);
556            return Err(whereat::at!(DecodeError::RiffSignatureInvalid(sig)));
557        }
558        if &data[8..12] != b"WEBP" {
559            let mut sig = [0u8; 4];
560            sig.copy_from_slice(&data[8..12]);
561            return Err(whereat::at!(DecodeError::WebpSignatureInvalid(sig)));
562        }
563
564        let first_chunk = &data[12..16];
565
566        match first_chunk {
567            b"VP8 " => {
568                // Simple lossy VP8
569                let chunk_size =
570                    u32::from_le_bytes([data[16], data[17], data[18], data[19]]) as usize;
571                let vp8_start = 20;
572                let vp8_end = (vp8_start + chunk_size).min(data.len());
573                let vp8_data = &data[vp8_start..vp8_end];
574
575                let mut ctx = DecoderContext::new().with_dithering_strength(dither_strength);
576                let mut output = Vec::new();
577                let (w, h) = ctx.decode_to_rgb(vp8_data, &mut output, bpp)?;
578                Ok((output, w, h))
579            }
580            b"VP8X" => {
581                // Extended format — use the demuxer to find the VP8 bitstream
582                use crate::mux::WebPDemuxer;
583
584                let demuxer = WebPDemuxer::new(data).map_err(|e| {
585                    whereat::at!(DecodeError::InvalidParameter(alloc::format!(
586                        "demux error: {e}"
587                    )))
588                })?;
589
590                if demuxer.is_animated() {
591                    return Err(whereat::at!(DecodeError::UnsupportedFeature(
592                        "lossy single-frame decode does not support animation; use AnimationDecoder"
593                            .into()
594                    )));
595                }
596
597                let frame = demuxer
598                    .frame(1)
599                    .ok_or_else(|| whereat::at!(DecodeError::ChunkMissing))?;
600
601                if !frame.is_lossy {
602                    return Err(whereat::at!(DecodeError::UnsupportedFeature(
603                        "lossy decoder only supports VP8, got VP8L".into()
604                    )));
605                }
606
607                let mut ctx = DecoderContext::new().with_dithering_strength(dither_strength);
608                let mut output = Vec::new();
609
610                // Decode lossy bitstream, requesting RGBA if alpha is present
611                let decode_bpp = if frame.has_alpha { 4 } else { bpp };
612                let (w, h) = ctx.decode_to_rgb(frame.bitstream, &mut output, decode_bpp)?;
613
614                // Apply alpha channel if present
615                if let Some(alpha_data) = frame.alpha_data {
616                    let alpha_chunk = read_alpha_chunk(alpha_data, w, h)?;
617
618                    let fw = usize::from(w);
619                    let fh = usize::from(h);
620                    for y in 0..fh {
621                        for x in 0..fw {
622                            let predictor: u8 = get_alpha_predictor(
623                                x,
624                                y,
625                                fw,
626                                alpha_chunk.filtering_method,
627                                &output,
628                            );
629
630                            let alpha_index = y * fw + x;
631                            let buffer_index = alpha_index * 4 + 3;
632
633                            output[buffer_index] =
634                                predictor.wrapping_add(alpha_chunk.data[alpha_index]);
635                        }
636                    }
637                }
638
639                // Convert to requested bpp if needed
640                if decode_bpp == 4 && bpp == 3 {
641                    let pixel_count = usize::from(w) * usize::from(h);
642                    let mut rgb = alloc::vec![0u8; pixel_count * 3];
643                    garb::bytes::rgba_to_rgb(&output, &mut rgb)
644                        .map_err(|e| whereat::at!(garb_err(e)))?;
645                    Ok((rgb, w, h))
646                } else {
647                    Ok((output, w, h))
648                }
649            }
650            _ => Err(whereat::at!(DecodeError::UnsupportedFeature(
651                alloc::format!("lossy decoder only supports VP8, got {:?}", first_chunk)
652            ))),
653        }
654    }
655}
656
657/// WebP decoder configuration options (internal, used by AnimationDecoder)
658#[derive(Clone)]
659#[non_exhaustive]
660pub(crate) struct WebPDecodeOptions {
661    /// The upsampling method used in conversion from lossy yuv to rgb
662    pub lossy_upsampling: UpsamplingMethod,
663    /// Chroma dithering strength (0=off, 100=max). Default: 0.
664    pub dithering_strength: u8,
665}
666
667impl Default for WebPDecodeOptions {
668    fn default() -> Self {
669        Self {
670            lossy_upsampling: UpsamplingMethod::Bilinear,
671            dithering_strength: 0,
672        }
673    }
674}
675
676/// Methods for upsampling the chroma values in lossy decoding
677///
678/// The chroma red and blue planes are encoded in VP8 as half the size of the luma plane
679/// Therefore we need to upsample these values up to fit each pixel in the image.
680#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
681pub enum UpsamplingMethod {
682    /// Fancy upsampling
683    ///
684    /// Does bilinear interpolation using the 4 values nearest to the pixel, weighting based on the distance
685    /// from the pixel.
686    #[default]
687    Bilinear,
688    /// Simple upsampling, just uses the closest u/v value to the pixel when upsampling
689    ///
690    /// Matches the -nofancy option in dwebp.
691    /// Should be faster but may lead to slightly jagged edges.
692    Simple,
693}
694
695/// WebP image format decoder.
696pub struct WebPDecoder<'a> {
697    r: SliceReader<'a>,
698    memory_limit: usize,
699    limits: super::limits::Limits,
700
701    width: u32,
702    height: u32,
703
704    kind: ImageKind,
705    animation: AnimationState,
706
707    is_lossy: bool,
708    has_alpha: bool,
709    num_frames: u32,
710    loop_count: LoopCount,
711    loop_duration: u64,
712
713    chunks: HashMap<WebPRiffChunk, Range<u64>>,
714
715    webp_decode_options: WebPDecodeOptions,
716
717    stop: Option<&'a dyn enough::Stop>,
718}
719
720impl<'a> WebPDecoder<'a> {
721    /// Create a new `WebPDecoder` from the data slice (alias for [`new`](Self::new)).
722    ///
723    /// This method parses the WebP headers and prepares for decoding. Use [`info()`](Self::info)
724    /// to inspect metadata before calling decode methods.
725    ///
726    /// # Example - Two-phase decoding
727    ///
728    /// ```rust,no_run
729    /// use zenwebp::WebPDecoder;
730    ///
731    /// # let webp_data: &[u8] = &[]; // your WebP data
732    /// // Phase 1: Parse headers
733    /// let mut decoder = WebPDecoder::build(webp_data)?;
734    ///
735    /// // Phase 2: Inspect metadata
736    /// let info = decoder.info();
737    /// println!("{}x{}, alpha={}", info.width, info.height, info.has_alpha);
738    ///
739    /// // Phase 3: Decode (no re-parsing)
740    /// let mut output = vec![0u8; decoder.output_buffer_size().unwrap()];
741    /// decoder.read_image(&mut output)?;
742    /// # Ok::<(), zenwebp::DecodeError>(())
743    /// ```
744    pub fn build(data: &'a [u8]) -> Result<Self, DecodeError> {
745        Ok(Self::new(data)?)
746    }
747
748    /// Create a new `WebPDecoder` from the data slice.
749    pub fn new(data: &'a [u8]) -> DecodeResult<Self> {
750        Self::new_with_options(data, WebPDecodeOptions::default())
751    }
752
753    /// Get image information without decoding the full image.
754    ///
755    /// Returns metadata that was parsed during construction. This is a zero-cost
756    /// operation that doesn't re-parse or decode any data.
757    ///
758    /// # Example
759    ///
760    /// ```rust,no_run
761    /// use zenwebp::WebPDecoder;
762    ///
763    /// let webp_data: &[u8] = &[]; // your WebP data
764    /// let decoder = WebPDecoder::new(webp_data)?;
765    /// let info = decoder.info();
766    /// println!("Format: {:?}, {}x{}", info.format, info.width, info.height);
767    /// # Ok::<(), zenwebp::DecodeError>(())
768    /// ```
769    pub fn info(&self) -> ImageInfo {
770        let icc_profile = self
771            .read_chunk_direct(WebPRiffChunk::ICCP, self.memory_limit)
772            .unwrap_or(None);
773        let exif = self
774            .read_chunk_direct(WebPRiffChunk::EXIF, self.memory_limit)
775            .unwrap_or(None);
776        let xmp = self
777            .read_chunk_direct(WebPRiffChunk::XMP, self.memory_limit)
778            .unwrap_or(None);
779        let orientation = exif
780            .as_deref()
781            .and_then(crate::exif_orientation::parse_orientation)
782            .and_then(zenpixels::Orientation::from_exif);
783        ImageInfo {
784            width: self.width,
785            height: self.height,
786            has_alpha: self.has_alpha,
787            is_lossy: self.is_lossy,
788            has_animation: self.is_animated(),
789            frame_count: self.num_frames,
790            format: if self.is_lossy {
791                BitstreamFormat::Lossy
792            } else {
793                BitstreamFormat::Lossless
794            },
795            orientation,
796            icc_profile,
797            exif,
798            xmp,
799        }
800    }
801
802    /// Create a new `WebPDecoder` from the data slice with the given options.
803    pub(crate) fn new_with_options(
804        data: &'a [u8],
805        webp_decode_options: WebPDecodeOptions,
806    ) -> DecodeResult<Self> {
807        let mut decoder = Self {
808            r: SliceReader::new(data),
809            width: 0,
810            height: 0,
811            num_frames: 0,
812            kind: ImageKind::Lossy,
813            chunks: HashMap::new(),
814            animation: Default::default(),
815            memory_limit: usize::MAX,
816            limits: super::limits::Limits::default(),
817            is_lossy: false,
818            has_alpha: false,
819            loop_count: LoopCount::Times(NonZeroU16::new(1).unwrap()),
820            loop_duration: 0,
821            webp_decode_options,
822            stop: None,
823        };
824        decoder.read_data()?;
825        Ok(decoder)
826    }
827
828    fn read_data(&mut self) -> DecodeResult<()> {
829        let (WebPRiffChunk::RIFF, riff_size, _) = read_chunk_header(&mut self.r)? else {
830            return Err(at!(DecodeError::ChunkHeaderInvalid(*b"RIFF")));
831        };
832
833        match &read_fourcc(&mut self.r)? {
834            WebPRiffChunk::WEBP => {}
835            fourcc => return Err(at!(DecodeError::WebpSignatureInvalid(fourcc.to_fourcc()))),
836        }
837
838        let (chunk, chunk_size, chunk_size_rounded) = read_chunk_header(&mut self.r)?;
839        let start = self.r.stream_position();
840
841        match chunk {
842            WebPRiffChunk::VP8 => {
843                let tag = self.r.read_u24_le()?;
844
845                let keyframe = tag & 1 == 0;
846                if !keyframe {
847                    return Err(at!(DecodeError::UnsupportedFeature(
848                        "Non-keyframe frames".into(),
849                    )));
850                }
851
852                let mut tag = [0u8; 3];
853                self.r.read_exact(&mut tag)?;
854                if tag != [0x9d, 0x01, 0x2a] {
855                    return Err(at!(DecodeError::Vp8MagicInvalid(tag)));
856                }
857
858                let w = self.r.read_u16_le()?;
859                let h = self.r.read_u16_le()?;
860
861                self.width = u32::from(w & 0x3FFF);
862                self.height = u32::from(h & 0x3FFF);
863                if self.width == 0 || self.height == 0 {
864                    return Err(at!(DecodeError::InconsistentImageSizes));
865                }
866
867                self.limits.check_dimensions(self.width, self.height)?;
868
869                self.chunks
870                    .insert(WebPRiffChunk::VP8, start..start + chunk_size);
871                self.kind = ImageKind::Lossy;
872                self.is_lossy = true;
873            }
874            WebPRiffChunk::VP8L => {
875                let signature = self.r.read_u8()?;
876                if signature != 0x2f {
877                    return Err(at!(DecodeError::LosslessSignatureInvalid(signature)));
878                }
879
880                let header = self.r.read_u32_le()?;
881                let version = header >> 29;
882                if version != 0 {
883                    return Err(at!(DecodeError::VersionNumberInvalid(version as u8)));
884                }
885
886                self.width = (1 + header) & 0x3FFF;
887                self.height = (1 + (header >> 14)) & 0x3FFF;
888                self.limits.check_dimensions(self.width, self.height)?;
889                self.chunks
890                    .insert(WebPRiffChunk::VP8L, start..start + chunk_size);
891                self.kind = ImageKind::Lossless;
892                self.has_alpha = (header >> 28) & 1 != 0;
893            }
894            WebPRiffChunk::VP8X => {
895                let mut info = extended::read_extended_header(&mut self.r)?;
896                self.width = info.canvas_width;
897                self.height = info.canvas_height;
898                self.limits.check_dimensions(self.width, self.height)?;
899
900                let mut position = start + chunk_size_rounded;
901                let max_position = position + riff_size.saturating_sub(12);
902                self.r.seek_from_start(position)?;
903
904                while position < max_position {
905                    match read_chunk_header(&mut self.r) {
906                        Ok((chunk, chunk_size, chunk_size_rounded)) => {
907                            let range = position + 8..position + 8 + chunk_size;
908                            position += 8 + chunk_size_rounded;
909
910                            if !chunk.is_unknown() {
911                                self.chunks.entry(chunk).or_insert(range);
912                            }
913
914                            if chunk == WebPRiffChunk::ANMF {
915                                self.num_frames += 1;
916                                self.limits.check_frame_count(self.num_frames as usize)?;
917                                if chunk_size < 24 {
918                                    return Err(at!(DecodeError::InvalidChunkSize));
919                                }
920
921                                self.r.seek_relative(12)?;
922                                let duration = self.r.read_u32_le()? & 0xffffff;
923                                self.loop_duration =
924                                    self.loop_duration.wrapping_add(u64::from(duration));
925
926                                // If the image is animated, the image data chunk will be inside the
927                                // ANMF chunks, so we must inspect them to determine whether the
928                                // image contains any lossy image data. VP8 chunks store lossy data
929                                // and the spec says that lossless images SHOULD NOT contain ALPH
930                                // chunks, so we treat both as indicators of lossy images.
931                                if !self.is_lossy {
932                                    let (subchunk, ..) = read_chunk_header(&mut self.r)?;
933                                    if let WebPRiffChunk::VP8 | WebPRiffChunk::ALPH = subchunk {
934                                        self.is_lossy = true;
935                                    }
936                                    self.r.seek_relative(chunk_size_rounded as i64 - 24)?;
937                                } else {
938                                    self.r.seek_relative(chunk_size_rounded as i64 - 16)?;
939                                }
940
941                                continue;
942                            }
943
944                            self.r.seek_relative(chunk_size_rounded as i64)?;
945                        }
946                        Err(DecodeError::BitStreamError) => {
947                            break;
948                        }
949                        Err(e) => return Err(at!(e)),
950                    }
951                }
952                self.is_lossy = self.is_lossy || self.chunks.contains_key(&WebPRiffChunk::VP8);
953
954                // NOTE: We allow malformed images that have `info.icc_profile` set without a ICCP chunk,
955                // because this is relatively common.
956                if info.animation
957                    && (!self.chunks.contains_key(&WebPRiffChunk::ANIM)
958                        || !self.chunks.contains_key(&WebPRiffChunk::ANMF))
959                    || info.exif_metadata && !self.chunks.contains_key(&WebPRiffChunk::EXIF)
960                    || info.xmp_metadata && !self.chunks.contains_key(&WebPRiffChunk::XMP)
961                    || !info.animation
962                        && self.chunks.contains_key(&WebPRiffChunk::VP8)
963                            == self.chunks.contains_key(&WebPRiffChunk::VP8L)
964                {
965                    return Err(at!(DecodeError::ChunkMissing));
966                }
967
968                // Decode ANIM chunk.
969                if info.animation {
970                    match self.read_chunk(WebPRiffChunk::ANIM, 6) {
971                        Ok(Some(chunk)) => {
972                            let mut cursor = SliceReader::new(&chunk);
973                            cursor.read_exact(&mut info.background_color_hint)?;
974                            self.loop_count = match cursor.read_u16_le()? {
975                                0 => LoopCount::Forever,
976                                n => LoopCount::Times(NonZeroU16::new(n).unwrap()),
977                            };
978                            self.animation.next_frame_start =
979                                self.chunks.get(&WebPRiffChunk::ANMF).unwrap().start - 8;
980                        }
981                        Ok(None) => return Err(at!(DecodeError::ChunkMissing)),
982                        Err(ref e) if matches!(e.error(), DecodeError::MemoryLimitExceeded) => {
983                            return Err(at!(DecodeError::InvalidChunkSize));
984                        }
985                        Err(e) => return Err(e),
986                    }
987                }
988
989                // If the image is animated, the image data chunk will be inside the ANMF chunks. We
990                // store the ALPH, VP8, and VP8L chunks (as applicable) of the first frame in the
991                // hashmap so that we can read them later.
992                if let Some(range) = self.chunks.get(&WebPRiffChunk::ANMF).cloned() {
993                    let mut position = range.start + 16;
994                    self.r.seek_from_start(position)?;
995                    for _ in 0..2 {
996                        let (subchunk, subchunk_size, subchunk_size_rounded) =
997                            read_chunk_header(&mut self.r)?;
998                        let subrange = position + 8..position + 8 + subchunk_size;
999                        self.chunks.entry(subchunk).or_insert(subrange.clone());
1000
1001                        position += 8 + subchunk_size_rounded;
1002                        if position + 8 > range.end {
1003                            break;
1004                        }
1005                    }
1006                }
1007
1008                self.has_alpha = info.alpha;
1009                self.kind = ImageKind::Extended(info);
1010            }
1011            _ => return Err(at!(DecodeError::ChunkHeaderInvalid(chunk.to_fourcc()))),
1012        };
1013
1014        Ok(())
1015    }
1016
1017    /// Sets the maximum amount of memory that the decoder is allowed to allocate at once.
1018    ///
1019    /// TODO: Some allocations currently ignore this limit.
1020    /// Set a cooperative cancellation token for decoding.
1021    pub fn set_stop(&mut self, stop: Option<&'a dyn enough::Stop>) {
1022        self.stop = stop;
1023    }
1024
1025    /// Sets the memory limit in bytes for decoded image buffers.
1026    pub fn set_memory_limit(&mut self, limit: usize) {
1027        self.memory_limit = limit;
1028    }
1029
1030    /// Set decode limits for validation.
1031    pub fn set_limits(&mut self, limits: super::limits::Limits) {
1032        self.limits = limits;
1033    }
1034
1035    /// Get the background color specified in the image file if the image is extended and animated webp.
1036    pub fn background_color_hint(&self) -> Option<[u8; 4]> {
1037        if let ImageKind::Extended(info) = &self.kind {
1038            Some(info.background_color_hint)
1039        } else {
1040            None
1041        }
1042    }
1043
1044    /// Sets the background color if the image is an extended and animated webp.
1045    pub fn set_background_color(&mut self, color: [u8; 4]) -> DecodeResult<()> {
1046        if let ImageKind::Extended(info) = &mut self.kind {
1047            info.background_color = Some(color);
1048            Ok(())
1049        } else {
1050            Err(at!(DecodeError::InvalidParameter(
1051                "Background color can only be set on animated webp".into(),
1052            )))
1053        }
1054    }
1055
1056    /// Returns the (width, height) of the image in pixels.
1057    pub fn dimensions(&self) -> (u32, u32) {
1058        (self.width, self.height)
1059    }
1060
1061    /// Returns whether the image has an alpha channel. If so, the pixel format is Rgba8 and
1062    /// otherwise Rgb8.
1063    pub fn has_alpha(&self) -> bool {
1064        self.has_alpha
1065    }
1066
1067    /// Returns true if the image is animated.
1068    pub fn is_animated(&self) -> bool {
1069        match &self.kind {
1070            ImageKind::Lossy | ImageKind::Lossless => false,
1071            ImageKind::Extended(extended) => extended.animation,
1072        }
1073    }
1074
1075    /// Returns whether the image is lossy. For animated images, this is true if any frame is lossy.
1076    pub fn is_lossy(&self) -> bool {
1077        self.is_lossy
1078    }
1079
1080    /// Returns the number of frames of a single loop of the animation, or zero if the image is not
1081    /// animated.
1082    pub fn num_frames(&self) -> u32 {
1083        self.num_frames
1084    }
1085
1086    /// Returns the number of times the animation should loop.
1087    pub fn loop_count(&self) -> LoopCount {
1088        self.loop_count
1089    }
1090
1091    /// Returns the total duration of one loop through the animation in milliseconds, or zero if the
1092    /// image is not animated.
1093    ///
1094    /// This is the sum of the durations of all individual frames of the image.
1095    pub fn loop_duration(&self) -> u64 {
1096        self.loop_duration
1097    }
1098
1099    fn read_chunk(
1100        &mut self,
1101        chunk: WebPRiffChunk,
1102        max_size: usize,
1103    ) -> DecodeResult<Option<Vec<u8>>> {
1104        self.read_chunk_direct(chunk, max_size)
1105    }
1106
1107    fn read_chunk_direct(
1108        &self,
1109        chunk: WebPRiffChunk,
1110        max_size: usize,
1111    ) -> DecodeResult<Option<Vec<u8>>> {
1112        match self.chunks.get(&chunk) {
1113            Some(range) => {
1114                let len = (range.end - range.start) as usize;
1115                if len > max_size {
1116                    return Err(at!(DecodeError::MemoryLimitExceeded));
1117                }
1118                let slice = self.chunk_slice(range)?;
1119                Ok(Some(slice.to_vec()))
1120            }
1121            None => Ok(None),
1122        }
1123    }
1124
1125    /// Get a slice of the underlying buffer for a chunk range, with bounds validation.
1126    fn chunk_slice(&self, range: &core::ops::Range<u64>) -> DecodeResult<&[u8]> {
1127        let buf = self.r.get_ref();
1128        let start = range.start as usize;
1129        let end = range.end as usize;
1130        if end > buf.len() || start > end {
1131            return Err(at!(DecodeError::InvalidChunkSize));
1132        }
1133        Ok(&buf[start..end])
1134    }
1135
1136    /// Returns the raw bytes of the ICC profile, or None if there is no ICC profile.
1137    pub fn icc_profile(&mut self) -> DecodeResult<Option<Vec<u8>>> {
1138        self.read_chunk(WebPRiffChunk::ICCP, self.memory_limit)
1139    }
1140
1141    /// Returns the raw bytes of the EXIF metadata, or None if there is no EXIF metadata.
1142    pub fn exif_metadata(&mut self) -> DecodeResult<Option<Vec<u8>>> {
1143        self.read_chunk(WebPRiffChunk::EXIF, self.memory_limit)
1144    }
1145
1146    /// Returns the raw bytes of the XMP metadata, or None if there is no XMP metadata.
1147    pub fn xmp_metadata(&mut self) -> DecodeResult<Option<Vec<u8>>> {
1148        self.read_chunk(WebPRiffChunk::XMP, self.memory_limit)
1149    }
1150
1151    /// Returns the number of bytes required to store the image or a single frame, or None if that
1152    /// would take more than `usize::MAX` bytes.
1153    pub fn output_buffer_size(&self) -> Option<usize> {
1154        let bytes_per_pixel = if self.has_alpha() { 4 } else { 3 };
1155        (self.width as usize)
1156            .checked_mul(self.height as usize)?
1157            .checked_mul(bytes_per_pixel)
1158    }
1159
1160    /// Returns the raw bytes of the image. For animated images, this is the first frame.
1161    ///
1162    /// Fails with `ImageTooLarge` if `buf` has length different than `output_buffer_size()`
1163    pub fn read_image(&mut self, buf: &mut [u8]) -> DecodeResult<()> {
1164        if Some(buf.len()) != self.output_buffer_size() {
1165            return Err(at!(DecodeError::ImageTooLarge));
1166        }
1167
1168        if self.is_animated() {
1169            let saved = core::mem::take(&mut self.animation);
1170            self.animation.next_frame_start =
1171                self.chunks.get(&WebPRiffChunk::ANMF).unwrap().start - 8;
1172            let result = self.read_frame(buf);
1173            self.animation = saved;
1174            result?;
1175        } else if let Some(range) = self.chunks.get(&WebPRiffChunk::VP8L) {
1176            let data_slice = self.chunk_slice(range)?;
1177            let mut decoder = LosslessDecoder::new(data_slice);
1178            decoder.set_stop(self.stop);
1179
1180            if self.has_alpha {
1181                decoder.decode_frame(self.width, self.height, false, buf)?;
1182            } else {
1183                let alloc_size = self.width as usize * self.height as usize * 4;
1184                self.limits.check_memory(alloc_size)?;
1185                let mut data = vec![0; alloc_size];
1186                decoder.decode_frame(self.width, self.height, false, &mut data)?;
1187                garb::bytes::rgba_to_rgb(&data, buf).map_err(garb_err)?;
1188            }
1189        } else {
1190            let range = self
1191                .chunks
1192                .get(&WebPRiffChunk::VP8)
1193                .ok_or(DecodeError::ChunkMissing)?
1194                .clone();
1195            let data_buf = self.r.get_ref();
1196            let vp8_data = data_buf
1197                .get(range.start as usize..range.end as usize)
1198                .ok_or(at!(DecodeError::InvalidChunkSize))?;
1199
1200            // Lossy VP8 direct decode path
1201            let bpp = if self.has_alpha() { 4 } else { 3 };
1202            self.animation
1203                .ctx
1204                .set_dithering_strength(self.webp_decode_options.dithering_strength);
1205            let mut output = Vec::new();
1206            let (w, h) = self
1207                .animation
1208                .ctx
1209                .decode_to_rgb(vp8_data, &mut output, bpp)?;
1210            if u32::from(w) != self.width || u32::from(h) != self.height {
1211                return Err(at!(DecodeError::InconsistentImageSizes));
1212            }
1213
1214            if self.has_alpha() {
1215                buf.copy_from_slice(&output);
1216
1217                let alpha_range = self
1218                    .chunks
1219                    .get(&WebPRiffChunk::ALPH)
1220                    .ok_or_else(|| at!(DecodeError::ChunkMissing))?
1221                    .clone();
1222                let alpha_slice = &data_buf[alpha_range.start as usize..alpha_range.end as usize];
1223                let alpha_chunk =
1224                    read_alpha_chunk(alpha_slice, self.width as u16, self.height as u16)?;
1225
1226                let fw = usize::from(w);
1227                let fh = usize::from(h);
1228                for y in 0..fh {
1229                    for x in 0..fw {
1230                        let predictor: u8 =
1231                            get_alpha_predictor(x, y, fw, alpha_chunk.filtering_method, buf);
1232
1233                        let alpha_index = y * fw + x;
1234                        let buffer_index = alpha_index * 4 + 3;
1235
1236                        buf[buffer_index] = predictor.wrapping_add(alpha_chunk.data[alpha_index]);
1237                    }
1238                }
1239            } else {
1240                buf.copy_from_slice(&output);
1241            }
1242        }
1243
1244        Ok(())
1245    }
1246
1247    /// Reads the next frame of the animation.
1248    ///
1249    /// The frame contents are written into `buf` and the method returns the duration of the frame
1250    /// in milliseconds. If there are no more frames, the method returns
1251    /// `DecodeError::NoMoreFrames` and `buf` is left unchanged.
1252    ///
1253    pub fn read_frame(&mut self, buf: &mut [u8]) -> DecodeResult<u32> {
1254        if !self.is_animated() {
1255            return Err(at!(DecodeError::InvalidParameter(String::from(
1256                "not an animated WebP",
1257            ))));
1258        }
1259        if Some(buf.len()) != self.output_buffer_size() {
1260            return Err(at!(DecodeError::ImageTooLarge));
1261        }
1262
1263        if self.animation.next_frame == self.num_frames {
1264            return Err(at!(DecodeError::NoMoreFrames));
1265        }
1266
1267        let ImageKind::Extended(info) = &self.kind else {
1268            unreachable!()
1269        };
1270
1271        self.r.seek_from_start(self.animation.next_frame_start)?;
1272
1273        let anmf_size = match read_chunk_header(&mut self.r)? {
1274            (WebPRiffChunk::ANMF, size, _) if size >= 32 => size,
1275            _ => return Err(at!(DecodeError::ChunkHeaderInvalid(*b"ANMF"))),
1276        };
1277
1278        // Read ANMF chunk
1279        let frame_x = extended::read_3_bytes(&mut self.r)? * 2;
1280        let frame_y = extended::read_3_bytes(&mut self.r)? * 2;
1281        let frame_width = extended::read_3_bytes(&mut self.r)? + 1;
1282        let frame_height = extended::read_3_bytes(&mut self.r)? + 1;
1283        if frame_width > 16384 || frame_height > 16384 {
1284            return Err(at!(DecodeError::ImageTooLarge));
1285        }
1286        if frame_x + frame_width > self.width || frame_y + frame_height > self.height {
1287            return Err(at!(DecodeError::FrameOutsideImage));
1288        }
1289        let duration = extended::read_3_bytes(&mut self.r)?;
1290        let frame_info = self.r.read_u8()?;
1291        let use_alpha_blending = frame_info & 0b00000010 == 0;
1292        let dispose = frame_info & 0b00000001 != 0;
1293
1294        // Propagate dithering strength to the reusable decoder context.
1295        self.animation
1296            .ctx
1297            .set_dithering_strength(self.webp_decode_options.dithering_strength);
1298
1299        // Read normal bitstream now
1300        let (chunk, chunk_size, chunk_size_rounded) = read_chunk_header(&mut self.r)?;
1301        if chunk_size_rounded + 24 > anmf_size {
1302            return Err(at!(DecodeError::ChunkHeaderInvalid(chunk.to_fourcc())));
1303        }
1304
1305        let frame_has_alpha: bool = match chunk {
1306            WebPRiffChunk::VP8 => {
1307                // Lossy VP8 decode with buffer reuse across animation frames.
1308                // DecoderContext is reused from self.animation.ctx, saving
1309                // ~100KB of allocation per frame for coefficient/filter buffers.
1310                let data_slice = self.r.take_slice(chunk_size as usize)?;
1311                let (w, h) = self.animation.ctx.decode_to_rgb(
1312                    data_slice,
1313                    &mut self.animation.frame_scratch,
1314                    3,
1315                )?;
1316                if u32::from(w) != frame_width || u32::from(h) != frame_height {
1317                    return Err(at!(DecodeError::InconsistentImageSizes));
1318                }
1319                false
1320            }
1321            WebPRiffChunk::VP8L => {
1322                let data_slice = self.r.take_slice(chunk_size as usize)?;
1323                let mut lossless_decoder = LosslessDecoder::new(data_slice);
1324                lossless_decoder.set_stop(self.stop);
1325                let frame_alloc = frame_width as usize * frame_height as usize * 4;
1326                self.limits.check_memory(frame_alloc)?;
1327                self.animation.frame_scratch.resize(frame_alloc, 0);
1328                lossless_decoder.decode_frame(
1329                    frame_width,
1330                    frame_height,
1331                    false,
1332                    &mut self.animation.frame_scratch,
1333                )?;
1334                true
1335            }
1336            WebPRiffChunk::ALPH => {
1337                if chunk_size_rounded + 32 > anmf_size {
1338                    return Err(at!(DecodeError::ChunkHeaderInvalid(chunk.to_fourcc())));
1339                }
1340
1341                // read alpha
1342                let alpha_slice = self.r.take_slice(chunk_size as usize)?;
1343                // Skip padding if chunk_size is odd
1344                if chunk_size_rounded > chunk_size {
1345                    self.r
1346                        .seek_relative((chunk_size_rounded - chunk_size) as i64)?;
1347                }
1348                let alpha_chunk =
1349                    read_alpha_chunk(alpha_slice, frame_width as u16, frame_height as u16)?;
1350
1351                // read opaque — lossy decode with buffer reuse
1352                let (next_chunk, next_chunk_size, _) = read_chunk_header(&mut self.r)?;
1353                if chunk_size + next_chunk_size + 32 > anmf_size {
1354                    return Err(at!(DecodeError::ChunkHeaderInvalid(next_chunk.to_fourcc())));
1355                }
1356
1357                let vp8_slice = self.r.take_slice(next_chunk_size as usize)?;
1358                let (w, h) = self.animation.ctx.decode_to_rgb(
1359                    vp8_slice,
1360                    &mut self.animation.frame_scratch,
1361                    4,
1362                )?;
1363
1364                let fw = usize::from(w);
1365                let fh = usize::from(h);
1366
1367                for y in 0..fh {
1368                    for x in 0..fw {
1369                        let predictor: u8 = get_alpha_predictor(
1370                            x,
1371                            y,
1372                            fw,
1373                            alpha_chunk.filtering_method,
1374                            &self.animation.frame_scratch,
1375                        );
1376
1377                        let alpha_index = y * fw + x;
1378                        let buffer_index = alpha_index * 4 + 3;
1379
1380                        self.animation.frame_scratch[buffer_index] =
1381                            predictor.wrapping_add(alpha_chunk.data[alpha_index]);
1382                    }
1383                }
1384
1385                true
1386            }
1387            _ => return Err(at!(DecodeError::ChunkHeaderInvalid(chunk.to_fourcc()))),
1388        };
1389
1390        let clear_color = if self.animation.dispose_next_frame {
1391            match (info.background_color, frame_has_alpha) {
1392                (color @ Some(_), _) => color,
1393                (_, true) => Some([0, 0, 0, 0]),
1394                _ => None,
1395            }
1396        } else {
1397            None
1398        };
1399
1400        // fill starting canvas with clear color
1401        if self.animation.canvas.is_none() {
1402            self.animation.canvas = {
1403                let canvas_alloc = self.width as usize * self.height as usize * 4;
1404                self.limits.check_memory(canvas_alloc)?;
1405                let mut canvas = vec![0; canvas_alloc];
1406                if let Some(color) = info.background_color.as_ref() {
1407                    canvas
1408                        .chunks_exact_mut(4)
1409                        .for_each(|c| c.copy_from_slice(color))
1410                }
1411                Some(canvas)
1412            }
1413        }
1414        extended::composite_frame(
1415            self.animation.canvas.as_mut().unwrap(),
1416            self.width,
1417            self.height,
1418            clear_color,
1419            &self.animation.frame_scratch,
1420            frame_x,
1421            frame_y,
1422            frame_width,
1423            frame_height,
1424            frame_has_alpha,
1425            use_alpha_blending,
1426            self.animation.previous_frame_width,
1427            self.animation.previous_frame_height,
1428            self.animation.previous_frame_x_offset,
1429            self.animation.previous_frame_y_offset,
1430        )?;
1431
1432        self.animation.previous_frame_width = frame_width;
1433        self.animation.previous_frame_height = frame_height;
1434        self.animation.previous_frame_x_offset = frame_x;
1435        self.animation.previous_frame_y_offset = frame_y;
1436
1437        self.animation.dispose_next_frame = dispose;
1438        self.animation.next_frame_start += anmf_size + 8;
1439        self.animation.next_frame += 1;
1440
1441        if self.has_alpha() {
1442            buf.copy_from_slice(self.animation.canvas.as_ref().unwrap());
1443        } else {
1444            garb::bytes::rgba_to_rgb(self.animation.canvas.as_ref().unwrap(), buf)
1445                .map_err(garb_err)?;
1446        }
1447
1448        Ok(duration)
1449    }
1450
1451    /// Resets the animation to the first frame.
1452    ///
1453    pub fn reset_animation(&mut self) -> DecodeResult<()> {
1454        if !self.is_animated() {
1455            return Err(at!(DecodeError::InvalidParameter(String::from(
1456                "not an animated WebP",
1457            ))));
1458        }
1459        self.animation.next_frame = 0;
1460        self.animation.next_frame_start = self.chunks.get(&WebPRiffChunk::ANMF).unwrap().start - 8;
1461        self.animation.dispose_next_frame = true;
1462        Ok(())
1463    }
1464
1465    /// Sets the upsampling method that is used in lossy decoding
1466    pub fn set_lossy_upsampling(&mut self, upsampling_method: UpsamplingMethod) {
1467        self.webp_decode_options.lossy_upsampling = upsampling_method;
1468    }
1469}
1470
1471/// Convert a garb SizeError into a DecodeError.
1472fn garb_err(e: garb::SizeError) -> DecodeError {
1473    DecodeError::InvalidParameter(alloc::format!("pixel conversion: {e}"))
1474}
1475
1476pub(crate) fn read_fourcc(r: &mut SliceReader) -> Result<WebPRiffChunk, DecodeError> {
1477    let mut chunk_fourcc = [0; 4];
1478    r.read_exact(&mut chunk_fourcc)?;
1479    Ok(WebPRiffChunk::from_fourcc(chunk_fourcc))
1480}
1481
1482pub(crate) fn read_chunk_header(
1483    r: &mut SliceReader,
1484) -> Result<(WebPRiffChunk, u64, u64), DecodeError> {
1485    let chunk = read_fourcc(r)?;
1486    let chunk_size = r.read_u32_le()?;
1487    let chunk_size_rounded = chunk_size.saturating_add(chunk_size & 1);
1488    Ok((chunk, chunk_size.into(), chunk_size_rounded.into()))
1489}
1490
1491// ============================================================================
1492// Internal decode helpers
1493// ============================================================================
1494
1495/// Decode WebP data to its native pixel format (RGB or RGBA).
1496/// Returns (pixels, width, height, has_alpha).
1497fn decode_native_internal(
1498    data: &[u8],
1499    options: &WebPDecodeOptions,
1500    limits: &super::limits::Limits,
1501    stop: Option<&dyn enough::Stop>,
1502) -> DecodeResult<(Vec<u8>, u32, u32, bool)> {
1503    let mut decoder = WebPDecoder::new_with_options(data, options.clone())?;
1504    decoder.set_limits(limits.clone());
1505    decoder.set_stop(stop);
1506    let (w, h) = decoder.dimensions();
1507    let output_size = decoder
1508        .output_buffer_size()
1509        .ok_or_else(|| at!(DecodeError::ImageTooLarge))?;
1510    let mut pixels = alloc::vec![0u8; output_size];
1511    decoder.read_image(&mut pixels)?;
1512    Ok((pixels, w, h, decoder.has_alpha()))
1513}
1514
1515/// Decode WebP data to RGBA pixels (always 4 bytes per pixel).
1516/// For lossless images with alpha, decodes directly to RGBA (no scratch buffer).
1517/// For lossy/opaque images, decodes to native format then expands to RGBA.
1518fn decode_to_rgba_internal(
1519    data: &[u8],
1520    options: &WebPDecodeOptions,
1521    limits: &super::limits::Limits,
1522    stop: Option<&dyn enough::Stop>,
1523) -> DecodeResult<(Vec<u8>, u32, u32)> {
1524    let (native, w, h, has_alpha) = decode_native_internal(data, options, limits, stop)?;
1525    if has_alpha {
1526        Ok((native, w, h))
1527    } else {
1528        let pixel_count = (w as usize) * (h as usize);
1529        let mut rgba = alloc::vec![0u8; pixel_count * 4];
1530        garb::bytes::rgb_to_rgba(&native, &mut rgba).map_err(|e| at!(garb_err(e)))?;
1531        Ok((rgba, w, h))
1532    }
1533}
1534
1535/// Convert RGBA source pixels to a strided output buffer, validating dimensions.
1536///
1537/// `bpp` is the bytes per pixel of the output format.
1538/// `convert_fn` receives (src, dst, width, height, src_stride, dst_stride) and
1539/// writes the converted pixels.
1540fn convert_to_output(
1541    rgba: &[u8],
1542    output: &mut [u8],
1543    w: u32,
1544    h: u32,
1545    bpp: usize,
1546    stride_pixels: Option<u32>,
1547    convert_fn: impl FnOnce(&[u8], &mut [u8], usize, usize, usize, usize) -> Result<(), DecodeError>,
1548) -> DecodeResult<()> {
1549    let wu = w as usize;
1550    let hu = h as usize;
1551    let stride_px = stride_pixels.unwrap_or(w) as usize;
1552    if stride_px < wu {
1553        return Err(at!(DecodeError::InvalidParameter(format!(
1554            "stride_pixels {} < width {}",
1555            stride_px, w
1556        ))));
1557    }
1558    let dst_stride = stride_px * bpp;
1559    let required = dst_stride * hu;
1560    if output.len() < required {
1561        return Err(at!(DecodeError::InvalidParameter(format!(
1562            "output buffer too small: got {}, need {}",
1563            output.len(),
1564            required
1565        ))));
1566    }
1567    let src_stride = wu * 4;
1568    convert_fn(rgba, output, wu, hu, src_stride, dst_stride).map_err(|e| at!(e))
1569}
1570
1571// ============================================================================
1572// Convenience decode functions (webpx-compatible API)
1573// ============================================================================
1574
1575/// Decode WebP data to RGBA pixels.
1576///
1577/// Returns the decoded pixels and dimensions.
1578///
1579/// # Example
1580///
1581/// ```rust,no_run
1582/// let webp_data: &[u8] = &[]; // your WebP data
1583/// let (pixels, width, height) = zenwebp::oneshot::decode_rgba(webp_data)?;
1584/// # Ok::<(), whereat::At<zenwebp::DecodeError>>(())
1585/// ```
1586#[track_caller]
1587pub fn decode_rgba(data: &[u8]) -> DecodeResult<(Vec<u8>, u32, u32)> {
1588    DecodeRequest::new(&DecodeConfig::default(), data).decode_rgba()
1589}
1590
1591/// Decode WebP data to RGB pixels (no alpha).
1592///
1593/// Returns the decoded pixels and dimensions.
1594///
1595/// # Example
1596///
1597/// ```rust,no_run
1598/// let webp_data: &[u8] = &[]; // your WebP data
1599/// let (pixels, width, height) = zenwebp::oneshot::decode_rgb(webp_data)?;
1600/// # Ok::<(), whereat::At<zenwebp::DecodeError>>(())
1601/// ```
1602#[track_caller]
1603pub fn decode_rgb(data: &[u8]) -> DecodeResult<(Vec<u8>, u32, u32)> {
1604    DecodeRequest::new(&DecodeConfig::default(), data).decode_rgb()
1605}
1606
1607/// Decode WebP data directly into a pre-allocated RGBA buffer.
1608///
1609/// # Arguments
1610/// * `data` - WebP encoded data
1611/// * `output` - Pre-allocated output buffer (must be at least `stride_pixels * height * 4` bytes)
1612/// * `stride_pixels` - Row stride in pixels (must be >= width)
1613///
1614/// # Returns
1615/// Width and height of the decoded image.
1616#[track_caller]
1617pub fn decode_rgba_into(
1618    data: &[u8],
1619    output: &mut [u8],
1620    stride_pixels: u32,
1621) -> DecodeResult<(u32, u32)> {
1622    DecodeRequest::new(&DecodeConfig::default(), data)
1623        .stride(stride_pixels)
1624        .decode_rgba_into(output)
1625}
1626
1627/// Decode WebP data directly into a pre-allocated RGB buffer.
1628///
1629/// # Arguments
1630/// * `data` - WebP encoded data
1631/// * `output` - Pre-allocated output buffer (must be at least `stride_pixels * height * 3` bytes)
1632/// * `stride_pixels` - Row stride in pixels (must be >= width)
1633///
1634/// # Returns
1635/// Width and height of the decoded image.
1636#[track_caller]
1637pub fn decode_rgb_into(
1638    data: &[u8],
1639    output: &mut [u8],
1640    stride_pixels: u32,
1641) -> DecodeResult<(u32, u32)> {
1642    DecodeRequest::new(&DecodeConfig::default(), data)
1643        .stride(stride_pixels)
1644        .decode_rgb_into(output)
1645}
1646
1647/// Image information obtained from WebP data header.
1648#[derive(Debug, Clone)]
1649pub struct ImageInfo {
1650    /// Image width in pixels.
1651    pub width: u32,
1652    /// Image height in pixels.
1653    pub height: u32,
1654    /// Whether the image has an alpha channel.
1655    pub has_alpha: bool,
1656    /// Whether the image uses lossy compression.
1657    pub is_lossy: bool,
1658    /// Whether the image is animated.
1659    pub has_animation: bool,
1660    /// Number of frames (1 for static images).
1661    pub frame_count: u32,
1662    /// Bitstream format (lossy or lossless).
1663    pub format: BitstreamFormat,
1664    /// EXIF orientation (1-8), parsed from the EXIF chunk if present.
1665    ///
1666    /// WebP does not apply orientation during decode — pixels are returned
1667    /// in stored order. Use this value to transform for display.
1668    /// `None` if no EXIF data or no orientation tag.
1669    pub orientation: Option<zenpixels::Orientation>,
1670    /// ICC color profile, if present.
1671    pub icc_profile: Option<Vec<u8>>,
1672    /// EXIF metadata, if present.
1673    pub exif: Option<Vec<u8>>,
1674    /// XMP metadata, if present.
1675    pub xmp: Option<Vec<u8>>,
1676}
1677
1678impl ImageInfo {
1679    /// Minimum bytes needed to probe WebP metadata.
1680    ///
1681    /// This is a conservative estimate that covers the RIFF header, VP8/VP8L chunk
1682    /// header, and enough data to read basic image metadata. Actual images may be
1683    /// larger, but this is sufficient for probing.
1684    pub const PROBE_BYTES: usize = 64;
1685
1686    /// Parse image information from WebP data (alias for [`from_webp`](Self::from_webp)).
1687    ///
1688    /// This is a fast probing operation that only parses headers without decoding
1689    /// the full image data.
1690    ///
1691    /// # Example
1692    ///
1693    /// ```rust,no_run
1694    /// use zenwebp::ImageInfo;
1695    ///
1696    /// let webp_data: &[u8] = &[]; // your WebP data
1697    /// let info = ImageInfo::from_bytes(webp_data)?;
1698    /// println!("{}x{}, alpha={}", info.width, info.height, info.has_alpha);
1699    /// # Ok::<(), whereat::At<zenwebp::DecodeError>>(())
1700    /// ```
1701    #[track_caller]
1702    pub fn from_bytes(data: &[u8]) -> DecodeResult<Self> {
1703        Self::from_webp(data)
1704    }
1705
1706    /// Parse image information from WebP data.
1707    ///
1708    /// Extracts dimensions, format info, and metadata (ICC, EXIF, XMP) in a single
1709    /// pass. This replaces the need to use both [`WebPDecoder`] and
1710    /// [`WebPDemuxer`](crate::mux::WebPDemuxer) for probing.
1711    #[track_caller]
1712    pub fn from_webp(data: &[u8]) -> DecodeResult<Self> {
1713        let mut decoder = WebPDecoder::new(data)?;
1714        let (width, height) = decoder.dimensions();
1715        let is_lossy = decoder.is_lossy();
1716        let is_animated = decoder.is_animated();
1717        let frame_count = if is_animated { decoder.num_frames() } else { 1 };
1718        let format = if is_lossy {
1719            BitstreamFormat::Lossy
1720        } else {
1721            BitstreamFormat::Lossless
1722        };
1723        let icc_profile = decoder.icc_profile().unwrap_or(None);
1724        let exif = decoder.exif_metadata().unwrap_or(None);
1725        let xmp = decoder.xmp_metadata().unwrap_or(None);
1726        let orientation = exif
1727            .as_deref()
1728            .and_then(crate::exif_orientation::parse_orientation)
1729            .and_then(zenpixels::Orientation::from_exif);
1730        Ok(Self {
1731            width,
1732            height,
1733            has_alpha: decoder.has_alpha(),
1734            is_lossy,
1735            has_animation: is_animated,
1736            frame_count,
1737            format,
1738            orientation,
1739            icc_profile,
1740            exif,
1741            xmp,
1742        })
1743    }
1744
1745    /// Estimate resource consumption for decoding this image.
1746    ///
1747    /// Returns memory, time, and output size estimates. See
1748    /// [`heuristics::estimate_decode`](crate::heuristics::estimate_decode) for details.
1749    #[must_use]
1750    pub fn estimate_decode(&self, output_bpp: u8) -> crate::heuristics::DecodeEstimate {
1751        if self.has_animation {
1752            crate::heuristics::estimate_animation_decode(self.width, self.height, self.frame_count)
1753        } else {
1754            crate::heuristics::estimate_decode(self.width, self.height, output_bpp)
1755        }
1756    }
1757}
1758
1759/// Bitstream compression format.
1760#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
1761#[non_exhaustive]
1762pub enum BitstreamFormat {
1763    /// Lossy compression (VP8).
1764    #[default]
1765    Lossy,
1766    /// Lossless compression (VP8L).
1767    Lossless,
1768}
1769
1770impl core::fmt::Display for BitstreamFormat {
1771    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
1772        match self {
1773            BitstreamFormat::Lossy => f.write_str("lossy"),
1774            BitstreamFormat::Lossless => f.write_str("lossless"),
1775        }
1776    }
1777}
1778
1779/// Decoded YUV 4:2:0 planar image data.
1780///
1781/// Contains separate Y, U, and V planes at their native resolutions.
1782/// Y is full resolution, U and V are half resolution in each dimension.
1783#[derive(Debug, Clone)]
1784pub struct YuvPlanes {
1785    /// Luma plane (full resolution).
1786    pub y: Vec<u8>,
1787    /// Chroma blue plane (half resolution in each dimension).
1788    pub u: Vec<u8>,
1789    /// Chroma red plane (half resolution in each dimension).
1790    pub v: Vec<u8>,
1791    /// Width of the luma plane in pixels.
1792    pub y_width: u32,
1793    /// Height of the luma plane in pixels.
1794    pub y_height: u32,
1795    /// Width of each chroma plane in pixels.
1796    pub uv_width: u32,
1797    /// Height of each chroma plane in pixels.
1798    pub uv_height: u32,
1799}
1800
1801/// Decode WebP data to BGRA pixels (blue, green, red, alpha order).
1802///
1803/// Returns the decoded pixels and dimensions.
1804#[track_caller]
1805pub fn decode_bgra(data: &[u8]) -> DecodeResult<(Vec<u8>, u32, u32)> {
1806    let (mut rgba, w, h) = decode_rgba(data)?;
1807    garb::bytes::rgba_to_bgra_inplace(&mut rgba).map_err(|e| at!(garb_err(e)))?;
1808    Ok((rgba, w, h))
1809}
1810
1811/// Decode WebP data to BGR pixels (blue, green, red order, no alpha).
1812///
1813/// Returns the decoded pixels and dimensions.
1814#[track_caller]
1815pub fn decode_bgr(data: &[u8]) -> DecodeResult<(Vec<u8>, u32, u32)> {
1816    let (rgba, w, h) = decode_rgba(data)?;
1817    let mut bgr = vec![0u8; (w * h * 3) as usize];
1818    garb::bytes::rgba_to_bgr(&rgba, &mut bgr).map_err(|e| at!(garb_err(e)))?;
1819    Ok((bgr, w, h))
1820}
1821
1822/// Decode WebP data directly into a pre-allocated BGRA buffer.
1823///
1824/// Also suitable for BGRX output -- alpha bytes are set to 255 for opaque images.
1825///
1826/// # Arguments
1827/// * `data` - WebP encoded data
1828/// * `output` - Pre-allocated output buffer (must be at least `stride_pixels * height * 4` bytes)
1829/// * `stride_pixels` - Row stride in pixels (must be >= width)
1830///
1831/// # Returns
1832/// Width and height of the decoded image.
1833#[track_caller]
1834pub fn decode_bgra_into(
1835    data: &[u8],
1836    output: &mut [u8],
1837    stride_pixels: u32,
1838) -> DecodeResult<(u32, u32)> {
1839    let (rgba, w, h) = decode_rgba(data)?;
1840    convert_to_output(
1841        &rgba,
1842        output,
1843        w,
1844        h,
1845        4,
1846        Some(stride_pixels),
1847        |src, dst, w, h, ss, ds| {
1848            garb::bytes::rgba_to_bgra_strided(src, dst, w, h, ss, ds).map_err(garb_err)
1849        },
1850    )?;
1851    Ok((w, h))
1852}
1853
1854/// Decode WebP data to ARGB pixels (alpha, red, green, blue order).
1855///
1856/// Returns the decoded pixels and dimensions.
1857#[track_caller]
1858pub fn decode_argb(data: &[u8]) -> DecodeResult<(Vec<u8>, u32, u32)> {
1859    let (mut rgba, w, h) = decode_rgba(data)?;
1860    garb::bytes::rgba_to_argb_inplace(&mut rgba).map_err(|e| at!(garb_err(e)))?;
1861    Ok((rgba, w, h))
1862}
1863
1864/// Decode WebP data directly into a pre-allocated ARGB buffer.
1865///
1866/// Also suitable for XRGB output -- alpha bytes are set to 255 for opaque images.
1867///
1868/// # Arguments
1869/// * `data` - WebP encoded data
1870/// * `output` - Pre-allocated output buffer (must be at least `stride_pixels * height * 4` bytes)
1871/// * `stride_pixels` - Row stride in pixels (must be >= width)
1872///
1873/// # Returns
1874/// Width and height of the decoded image.
1875#[track_caller]
1876pub fn decode_argb_into(
1877    data: &[u8],
1878    output: &mut [u8],
1879    stride_pixels: u32,
1880) -> DecodeResult<(u32, u32)> {
1881    let (rgba, w, h) = decode_rgba(data)?;
1882    convert_to_output(
1883        &rgba,
1884        output,
1885        w,
1886        h,
1887        4,
1888        Some(stride_pixels),
1889        |src, dst, w, h, ss, ds| {
1890            garb::bytes::rgba_to_argb_strided(src, dst, w, h, ss, ds).map_err(garb_err)
1891        },
1892    )?;
1893    Ok((w, h))
1894}
1895
1896/// Decode WebP data directly into a pre-allocated BGR buffer.
1897///
1898/// # Arguments
1899/// * `data` - WebP encoded data
1900/// * `output` - Pre-allocated output buffer (must be at least `stride_pixels * height * 3` bytes)
1901/// * `stride_pixels` - Row stride in pixels (must be >= width)
1902///
1903/// # Returns
1904/// Width and height of the decoded image.
1905#[track_caller]
1906pub fn decode_bgr_into(
1907    data: &[u8],
1908    output: &mut [u8],
1909    stride_pixels: u32,
1910) -> DecodeResult<(u32, u32)> {
1911    let (rgba, w, h) = decode_rgba(data)?;
1912    convert_to_output(
1913        &rgba,
1914        output,
1915        w,
1916        h,
1917        3,
1918        Some(stride_pixels),
1919        |src, dst, w, h, ss, ds| {
1920            garb::bytes::rgba_to_bgr_strided(src, dst, w, h, ss, ds).map_err(garb_err)
1921        },
1922    )?;
1923    Ok((w, h))
1924}
1925
1926/// Decode WebP data to raw YUV 4:2:0 planes.
1927///
1928/// For VP8 lossy images, returns the native YUV planes without upsampling.
1929/// For VP8L lossless images, decodes to RGBA then converts to YUV.
1930///
1931/// # Returns
1932/// [`YuvPlanes`] containing separate Y, U, and V buffers.
1933#[track_caller]
1934pub fn decode_yuv420(data: &[u8]) -> DecodeResult<YuvPlanes> {
1935    let decoder = WebPDecoder::new(data)?;
1936
1937    if decoder.is_lossy() && !decoder.is_animated() {
1938        // For lossy images, extract the native YUV planes from the VP8 frame
1939        if let Some(range) = decoder.chunks.get(&WebPRiffChunk::VP8) {
1940            let data_slice = decoder.chunk_slice(range)?;
1941            let mut ctx = super::vp8v2::DecoderContext::new();
1942            let frame = ctx.decode_to_frame(data_slice)?;
1943
1944            let w = u32::from(frame.width);
1945            let h = u32::from(frame.height);
1946            let uv_w = w.div_ceil(2);
1947            let uv_h = h.div_ceil(2);
1948
1949            let buffer_width = {
1950                let diff = w % 16;
1951                if diff > 0 {
1952                    (w + 16 - diff) as usize
1953                } else {
1954                    w as usize
1955                }
1956            };
1957            let chroma_bw = buffer_width / 2;
1958
1959            let mut y = Vec::with_capacity((w * h) as usize);
1960            for row in 0..h as usize {
1961                y.extend_from_slice(
1962                    &frame.ybuf[row * buffer_width..row * buffer_width + w as usize],
1963                );
1964            }
1965
1966            let mut u = Vec::with_capacity((uv_w * uv_h) as usize);
1967            let mut v = Vec::with_capacity((uv_w * uv_h) as usize);
1968            for row in 0..uv_h as usize {
1969                u.extend_from_slice(&frame.ubuf[row * chroma_bw..row * chroma_bw + uv_w as usize]);
1970                v.extend_from_slice(&frame.vbuf[row * chroma_bw..row * chroma_bw + uv_w as usize]);
1971            }
1972
1973            return Ok(YuvPlanes {
1974                y,
1975                u,
1976                v,
1977                y_width: w,
1978                y_height: h,
1979                uv_width: uv_w,
1980                uv_height: uv_h,
1981            });
1982        }
1983    }
1984
1985    // For lossless or animated images, decode to RGBA then convert to YUV
1986    let (rgba, w, h) = decode_rgba(data)?;
1987    let (y_bytes, u_bytes, v_bytes) =
1988        super::yuv::convert_image_yuv::<4>(&rgba, w as u16, h as u16, w as usize);
1989
1990    let uv_w = w.div_ceil(2);
1991    let uv_h = h.div_ceil(2);
1992    let mb_width = (w as usize).div_ceil(16);
1993
1994    let luma_width = 16 * mb_width;
1995    let chroma_width = 8 * mb_width;
1996
1997    let mut y = Vec::with_capacity((w * h) as usize);
1998    for row in 0..h as usize {
1999        y.extend_from_slice(&y_bytes[row * luma_width..row * luma_width + w as usize]);
2000    }
2001
2002    let mut u = Vec::with_capacity((uv_w * uv_h) as usize);
2003    let mut v = Vec::with_capacity((uv_w * uv_h) as usize);
2004    for row in 0..uv_h as usize {
2005        u.extend_from_slice(&u_bytes[row * chroma_width..row * chroma_width + uv_w as usize]);
2006        v.extend_from_slice(&v_bytes[row * chroma_width..row * chroma_width + uv_w as usize]);
2007    }
2008
2009    Ok(YuvPlanes {
2010        y,
2011        u,
2012        v,
2013        y_width: w,
2014        y_height: h,
2015        uv_width: uv_w,
2016        uv_height: uv_h,
2017    })
2018}
2019
2020/// Decode WebP data to premultiplied RGBA pixels.
2021///
2022/// Each color channel is multiplied by its alpha: `C' = C * A / 255`.
2023/// This is the native format for GPU compositing and avoids a per-pixel
2024/// multiply during alpha blending. Lossy for low-alpha pixels.
2025#[track_caller]
2026pub fn decode_rgba_premultiplied(data: &[u8]) -> DecodeResult<(Vec<u8>, u32, u32)> {
2027    let (mut pixels, w, h) = decode_rgba(data)?;
2028    garb::bytes::premultiply_alpha_rgba_u8(&mut pixels).map_err(|e| at!(garb_err(e)))?;
2029    Ok((pixels, w, h))
2030}
2031
2032/// Decode WebP data to premultiplied BGRA pixels.
2033///
2034/// Each color channel is multiplied by its alpha: `C' = C * A / 255`.
2035#[track_caller]
2036pub fn decode_bgra_premultiplied(data: &[u8]) -> DecodeResult<(Vec<u8>, u32, u32)> {
2037    let (mut pixels, w, h) = decode_bgra(data)?;
2038    garb::bytes::premultiply_alpha_bgra_u8(&mut pixels).map_err(|e| at!(garb_err(e)))?;
2039    Ok((pixels, w, h))
2040}
2041
2042/// Decode WebP data to premultiplied ARGB pixels.
2043///
2044/// Each color channel is multiplied by its alpha: `C' = C * A / 255`.
2045#[track_caller]
2046pub fn decode_argb_premultiplied(data: &[u8]) -> DecodeResult<(Vec<u8>, u32, u32)> {
2047    let (mut pixels, w, h) = decode_rgba_premultiplied(data)?;
2048    garb::bytes::rgba_to_argb_inplace(&mut pixels).map_err(|e| at!(garb_err(e)))?;
2049    Ok((pixels, w, h))
2050}
2051
2052/// Decode WebP data to RGB565 pixels (2 bytes per pixel, little-endian).
2053///
2054/// Bit layout per u16: `R[15:11] G[10:5] B[4:0]`.
2055#[track_caller]
2056pub fn decode_rgb565(data: &[u8]) -> DecodeResult<(Vec<u8>, u32, u32)> {
2057    let (rgba, w, h) = decode_rgba(data)?;
2058    let mut out = vec![0u8; (w * h * 2) as usize];
2059    garb::bytes::rgba_to_rgb565(&rgba, &mut out).map_err(|e| at!(garb_err(e)))?;
2060    Ok((out, w, h))
2061}
2062
2063/// Decode WebP data to RGBA4444 pixels (2 bytes per pixel, little-endian).
2064///
2065/// Bit layout per u16: `R[15:12] G[11:8] B[7:4] A[3:0]`.
2066#[track_caller]
2067pub fn decode_rgba4444(data: &[u8]) -> DecodeResult<(Vec<u8>, u32, u32)> {
2068    let (rgba, w, h) = decode_rgba(data)?;
2069    let mut out = vec![0u8; (w * h * 2) as usize];
2070    garb::bytes::rgba_to_rgba4444(&rgba, &mut out).map_err(|e| at!(garb_err(e)))?;
2071    Ok((out, w, h))
2072}
2073
2074#[cfg(test)]
2075mod tests {
2076    use super::*;
2077    const RGB_BPP: usize = 3;
2078
2079    #[test]
2080    fn add_with_overflow_size() {
2081        let bytes = vec![
2082            0x52, 0x49, 0x46, 0x46, 0xaf, 0x37, 0x80, 0x47, 0x57, 0x45, 0x42, 0x50, 0x6c, 0x64,
2083            0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xfb, 0x7e, 0x73, 0x00, 0x06, 0x00, 0x00, 0x00,
2084            0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
2085            0x40, 0xfb, 0xff, 0xff, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
2086            0x00, 0x00, 0x00, 0x00, 0x62, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49,
2087            0x49, 0x54, 0x55, 0x50, 0x4c, 0x54, 0x59, 0x50, 0x45, 0x33, 0x37, 0x44, 0x4d, 0x46,
2088        ];
2089
2090        let _ = WebPDecoder::new(&bytes);
2091    }
2092
2093    #[test]
2094    fn decode_2x2_single_color_image() {
2095        // Image data created from imagemagick and output of xxd:
2096        // $ convert -size 2x2 xc:#f00 red.webp
2097        // $ xxd -g 1 red.webp | head
2098
2099        const NUM_PIXELS: usize = 2 * 2 * RGB_BPP;
2100        // 2x2 red pixel image
2101        let bytes = [
2102            0x52, 0x49, 0x46, 0x46, 0x3c, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50, 0x56, 0x50,
2103            0x38, 0x20, 0x30, 0x00, 0x00, 0x00, 0xd0, 0x01, 0x00, 0x9d, 0x01, 0x2a, 0x02, 0x00,
2104            0x02, 0x00, 0x02, 0x00, 0x34, 0x25, 0xa0, 0x02, 0x74, 0xba, 0x01, 0xf8, 0x00, 0x03,
2105            0xb0, 0x00, 0xfe, 0xf0, 0xc4, 0x0b, 0xff, 0x20, 0xb9, 0x61, 0x75, 0xc8, 0xd7, 0xff,
2106            0x20, 0x3f, 0xe4, 0x07, 0xfc, 0x80, 0xff, 0xf8, 0xf2, 0x00, 0x00, 0x00,
2107        ];
2108
2109        let mut data = [0; NUM_PIXELS];
2110        let mut decoder = WebPDecoder::new(&bytes).unwrap();
2111        decoder.read_image(&mut data).unwrap();
2112
2113        // All pixels should be the same (or very close) for a solid-color image.
2114        // The `yuv` crate's bilinear chroma upsampling uses slightly different
2115        // rounding than our hand-written upsample, so allow +-1 tolerance.
2116        let first_pixel = &data[..RGB_BPP];
2117        for (i, ch) in data.chunks_exact(3).enumerate() {
2118            for c in 0..3 {
2119                let diff = (ch[c] as i16 - first_pixel[c] as i16).unsigned_abs();
2120                assert!(
2121                    diff <= 1,
2122                    "pixel {i} channel {c}: got {} expected {} (diff {diff})",
2123                    ch[c],
2124                    first_pixel[c]
2125                );
2126            }
2127        }
2128    }
2129
2130    #[test]
2131    fn decode_3x3_single_color_image() {
2132        // Test that any odd pixel "tail" is decoded properly
2133
2134        const NUM_PIXELS: usize = 3 * 3 * RGB_BPP;
2135        // 3x3 red pixel image
2136        let bytes = [
2137            0x52, 0x49, 0x46, 0x46, 0x3c, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50, 0x56, 0x50,
2138            0x38, 0x20, 0x30, 0x00, 0x00, 0x00, 0xd0, 0x01, 0x00, 0x9d, 0x01, 0x2a, 0x03, 0x00,
2139            0x03, 0x00, 0x02, 0x00, 0x34, 0x25, 0xa0, 0x02, 0x74, 0xba, 0x01, 0xf8, 0x00, 0x03,
2140            0xb0, 0x00, 0xfe, 0xf0, 0xc4, 0x0b, 0xff, 0x20, 0xb9, 0x61, 0x75, 0xc8, 0xd7, 0xff,
2141            0x20, 0x3f, 0xe4, 0x07, 0xfc, 0x80, 0xff, 0xf8, 0xf2, 0x00, 0x00, 0x00,
2142        ];
2143
2144        let mut data = [0; NUM_PIXELS];
2145        let mut decoder = WebPDecoder::new(&bytes).unwrap();
2146        decoder.read_image(&mut data).unwrap();
2147
2148        // All pixels should be the same (or very close) for a solid-color image.
2149        // The `yuv` crate's bilinear chroma upsampling uses slightly different
2150        // rounding than our hand-written upsample, so allow +-1 tolerance.
2151        let first_pixel = &data[..RGB_BPP];
2152        for (i, ch) in data.chunks_exact(3).enumerate() {
2153            for c in 0..3 {
2154                let diff = (ch[c] as i16 - first_pixel[c] as i16).unsigned_abs();
2155                assert!(
2156                    diff <= 1,
2157                    "pixel {i} channel {c}: got {} expected {} (diff {diff})",
2158                    ch[c],
2159                    first_pixel[c]
2160                );
2161            }
2162        }
2163    }
2164}