Skip to main content

zenavif_parse/
lib.rs

1#![deny(unsafe_code)]
2#![allow(clippy::missing_safety_doc)]
3//! AVIF container parser (ISOBMFF/MIAF demuxer).
4//!
5//! Extracts AV1 payloads, alpha channels, grid tiles, animation frames,
6//! and container metadata from AVIF files. Written in safe Rust with
7//! fallible allocations throughout.
8//!
9//! The primary API is [`AvifParser`], which performs zero-copy parsing by
10//! recording byte offsets and resolving data on demand.
11//!
12//! A legacy eager API ([`read_avif`]) is available behind the `eager` feature flag.
13
14// This Source Code Form is subject to the terms of the Mozilla Public
15// License, v. 2.0. If a copy of the MPL was not distributed with this
16// file, You can obtain one at https://mozilla.org/MPL/2.0/.
17
18use arrayvec::ArrayVec;
19use log::{debug, warn};
20
21use bitreader::BitReader;
22use byteorder::ReadBytesExt;
23use fallible_collections::{TryClone, TryReserveError};
24use std::borrow::Cow;
25use std::convert::{TryFrom, TryInto as _};
26
27use std::io::{Read, Take};
28use std::num::NonZeroU32;
29use std::ops::{Range, RangeFrom};
30
31mod obu;
32
33mod boxes;
34use crate::boxes::{BoxType, FourCC};
35
36/// This crate can be used from C.
37#[cfg(feature = "c_api")]
38pub mod c_api;
39
40pub use enough::{Stop, StopReason, Unstoppable};
41
42// Arbitrary buffer size limit used for raw read_bufs on a box.
43// const BUF_SIZE_LIMIT: u64 = 10 * 1024 * 1024;
44
45/// A trait to indicate a type can be infallibly converted to `u64`.
46/// This should only be implemented for infallible conversions, so only unsigned types are valid.
47trait ToU64 {
48    fn to_u64(self) -> u64;
49}
50
51/// Infallible: usize always fits in u64.
52impl ToU64 for usize {
53    fn to_u64(self) -> u64 {
54        const _: () = assert!(std::mem::size_of::<usize>() <= std::mem::size_of::<u64>());
55        self as u64
56    }
57}
58
59/// A trait to indicate a type can be infallibly converted to `usize`.
60/// This should only be implemented for infallible conversions, so only unsigned types are valid.
61pub(crate) trait ToUsize {
62    fn to_usize(self) -> usize;
63}
64
65/// Infallible widening cast: `$from_type` always fits in `usize`.
66macro_rules! impl_to_usize_from {
67    ( $from_type:ty ) => {
68        impl ToUsize for $from_type {
69            fn to_usize(self) -> usize {
70                const _: () = assert!(std::mem::size_of::<$from_type>() <= std::mem::size_of::<usize>());
71                self as usize
72            }
73        }
74    };
75}
76
77impl_to_usize_from!(u8);
78impl_to_usize_from!(u16);
79impl_to_usize_from!(u32);
80
81/// Indicate the current offset (i.e., bytes already read) in a reader
82trait Offset {
83    fn offset(&self) -> u64;
84}
85
86/// Wraps a reader to track the current offset
87struct OffsetReader<'a, T> {
88    reader: &'a mut T,
89    offset: u64,
90}
91
92impl<'a, T> OffsetReader<'a, T> {
93    fn new(reader: &'a mut T) -> Self {
94        Self { reader, offset: 0 }
95    }
96}
97
98impl<T> Offset for OffsetReader<'_, T> {
99    fn offset(&self) -> u64 {
100        self.offset
101    }
102}
103
104impl<T: Read> Read for OffsetReader<'_, T> {
105    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
106        let bytes_read = self.reader.read(buf)?;
107        self.offset = self
108            .offset
109            .checked_add(bytes_read.to_u64())
110            .ok_or(Error::Unsupported("total bytes read too large for offset type"))?;
111        Ok(bytes_read)
112    }
113}
114
115pub(crate) type TryVec<T> = fallible_collections::TryVec<T>;
116pub(crate) type TryString = fallible_collections::TryVec<u8>;
117
118// To ensure we don't use stdlib allocating types by accident
119#[allow(dead_code)]
120struct Vec;
121#[allow(dead_code)]
122struct Box;
123#[allow(dead_code)]
124struct HashMap;
125#[allow(dead_code)]
126struct String;
127
128/// Describes parser failures.
129///
130/// This enum wraps the standard `io::Error` type, unified with
131/// our own parser error states and those of crates we use.
132#[derive(Debug)]
133pub enum Error {
134    /// Parse error caused by corrupt or malformed data.
135    InvalidData(&'static str),
136    /// Parse error caused by limited parser support rather than invalid data.
137    Unsupported(&'static str),
138    /// Reflect `std::io::ErrorKind::UnexpectedEof` for short data.
139    UnexpectedEOF,
140    /// Propagate underlying errors from `std::io`.
141    Io(std::io::Error),
142    /// `read_mp4` terminated without detecting a moov box.
143    NoMoov,
144    /// Out of memory
145    OutOfMemory,
146    /// Resource limit exceeded during parsing
147    ResourceLimitExceeded(&'static str),
148    /// Operation was stopped/cancelled
149    Stopped(enough::StopReason),
150}
151
152impl std::fmt::Display for Error {
153    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
154        let msg = match self {
155            Self::InvalidData(s) | Self::Unsupported(s) | Self::ResourceLimitExceeded(s) => s,
156            Self::UnexpectedEOF => "EOF",
157            Self::Io(err) => return err.fmt(f),
158            Self::NoMoov => "Missing Moov box",
159            Self::OutOfMemory => "OOM",
160            Self::Stopped(reason) => return write!(f, "Stopped: {}", reason),
161        };
162        f.write_str(msg)
163    }
164}
165
166impl std::error::Error for Error {}
167
168impl From<bitreader::BitReaderError> for Error {
169    #[cold]
170    #[cfg_attr(debug_assertions, track_caller)]
171    fn from(err: bitreader::BitReaderError) -> Self {
172        log::warn!("bitreader: {err}");
173        Self::InvalidData("truncated bits")
174    }
175}
176
177impl From<std::io::Error> for Error {
178    fn from(err: std::io::Error) -> Self {
179        match err.kind() {
180            std::io::ErrorKind::UnexpectedEof => Self::UnexpectedEOF,
181            _ => Self::Io(err),
182        }
183    }
184}
185
186impl From<std::string::FromUtf8Error> for Error {
187    fn from(_: std::string::FromUtf8Error) -> Self {
188        Self::InvalidData("invalid utf8")
189    }
190}
191
192impl From<std::num::TryFromIntError> for Error {
193    fn from(_: std::num::TryFromIntError) -> Self {
194        Self::Unsupported("integer conversion failed")
195    }
196}
197
198impl From<Error> for std::io::Error {
199    fn from(err: Error) -> Self {
200        let kind = match err {
201            Error::InvalidData(_) => std::io::ErrorKind::InvalidData,
202            Error::UnexpectedEOF => std::io::ErrorKind::UnexpectedEof,
203            Error::Io(io_err) => return io_err,
204            _ => std::io::ErrorKind::Other,
205        };
206        Self::new(kind, err)
207    }
208}
209
210impl From<TryReserveError> for Error {
211    fn from(_: TryReserveError) -> Self {
212        Self::OutOfMemory
213    }
214}
215
216impl From<enough::StopReason> for Error {
217    fn from(reason: enough::StopReason) -> Self {
218        Self::Stopped(reason)
219    }
220}
221
222/// Result shorthand using our Error enum.
223pub type Result<T, E = Error> = std::result::Result<T, E>;
224
225/// Basic ISO box structure.
226///
227/// mp4 files are a sequence of possibly-nested 'box' structures.  Each box
228/// begins with a header describing the length of the box's data and a
229/// four-byte box type which identifies the type of the box. Together these
230/// are enough to interpret the contents of that section of the file.
231///
232/// See ISO 14496-12:2015 § 4.2
233#[derive(Debug, Clone, Copy)]
234struct BoxHeader {
235    /// Box type.
236    name: BoxType,
237    /// Size of the box in bytes.
238    size: u64,
239    /// Offset to the start of the contained data (or header size).
240    offset: u64,
241    /// Uuid for extended type.
242    #[allow(unused)]
243    uuid: Option<[u8; 16]>,
244}
245
246impl BoxHeader {
247    /// 4-byte size + 4-byte type
248    const MIN_SIZE: u64 = 8;
249    /// 4-byte size + 4-byte type + 16-byte size
250    const MIN_LARGE_SIZE: u64 = 16;
251}
252
253/// File type box 'ftyp'.
254#[derive(Debug)]
255#[allow(unused)]
256struct FileTypeBox {
257    major_brand: FourCC,
258    minor_version: u32,
259    compatible_brands: TryVec<FourCC>,
260}
261
262// Handler reference box 'hdlr'
263#[derive(Debug)]
264#[allow(unused)]
265struct HandlerBox {
266    handler_type: FourCC,
267}
268
269/// AV1 codec configuration from the `av1C` property box.
270///
271/// Contains the AV1 codec parameters as signaled in the container.
272/// See AV1-ISOBMFF § 2.3.
273#[derive(Debug, Clone, PartialEq, Eq)]
274pub struct AV1Config {
275    /// AV1 seq_profile (0=Main, 1=High, 2=Professional)
276    pub profile: u8,
277    /// AV1 seq_level_idx for operating point 0
278    pub level: u8,
279    /// AV1 seq_tier for operating point 0
280    pub tier: u8,
281    /// Bit depth (8, 10, or 12)
282    pub bit_depth: u8,
283    /// True if monochrome (no chroma planes)
284    pub monochrome: bool,
285    /// Chroma subsampling X (1 = horizontally subsampled)
286    pub chroma_subsampling_x: u8,
287    /// Chroma subsampling Y (1 = vertically subsampled)
288    pub chroma_subsampling_y: u8,
289    /// Chroma sample position (0=unknown, 1=vertical, 2=colocated)
290    pub chroma_sample_position: u8,
291}
292
293/// Colour information from the `colr` property box.
294///
295/// Can be either CICP-based (`nclx`) or an ICC profile (`rICC`/`prof`).
296/// See ISOBMFF § 12.1.5.
297#[derive(Debug, Clone, PartialEq, Eq)]
298pub enum ColorInformation {
299    /// CICP-based color information (colour_type = 'nclx')
300    Nclx {
301        /// Colour primaries (ITU-T H.273 Table 2)
302        color_primaries: u16,
303        /// Transfer characteristics (ITU-T H.273 Table 3)
304        transfer_characteristics: u16,
305        /// Matrix coefficients (ITU-T H.273 Table 4)
306        matrix_coefficients: u16,
307        /// True if full range (0-255 for 8-bit), false if limited/studio range
308        full_range: bool,
309    },
310    /// ICC profile (colour_type = 'rICC' or 'prof')
311    IccProfile(std::vec::Vec<u8>),
312}
313
314/// Image rotation from the `irot` property box.
315///
316/// Specifies a counter-clockwise rotation to apply after decoding.
317/// See ISOBMFF § 12.1.4.
318#[derive(Debug, Clone, Copy, PartialEq, Eq)]
319pub struct ImageRotation {
320    /// Rotation angle in degrees counter-clockwise: 0, 90, 180, or 270.
321    pub angle: u16,
322}
323
324/// Image mirror from the `imir` property box.
325///
326/// Specifies a mirror (flip) axis to apply after rotation.
327/// See ISOBMFF § 12.1.4.
328#[derive(Debug, Clone, Copy, PartialEq, Eq)]
329pub struct ImageMirror {
330    /// Mirror axis: 0 = top-to-bottom (vertical axis, left-right flip),
331    /// 1 = left-to-right (horizontal axis, top-bottom flip).
332    pub axis: u8,
333}
334
335/// Clean aperture from the `clap` property box.
336///
337/// Defines a crop rectangle as a centered region. All values are
338/// stored as exact rationals (numerator/denominator).
339/// See ISOBMFF § 12.1.4.
340#[derive(Debug, Clone, Copy, PartialEq, Eq)]
341pub struct CleanAperture {
342    /// Width of the clean aperture (numerator)
343    pub width_n: u32,
344    /// Width of the clean aperture (denominator)
345    pub width_d: u32,
346    /// Height of the clean aperture (numerator)
347    pub height_n: u32,
348    /// Height of the clean aperture (denominator)
349    pub height_d: u32,
350    /// Horizontal offset of the clean aperture center (numerator, signed)
351    pub horiz_off_n: i32,
352    /// Horizontal offset of the clean aperture center (denominator)
353    pub horiz_off_d: u32,
354    /// Vertical offset of the clean aperture center (numerator, signed)
355    pub vert_off_n: i32,
356    /// Vertical offset of the clean aperture center (denominator)
357    pub vert_off_d: u32,
358}
359
360/// Pixel aspect ratio from the `pasp` property box.
361///
362/// For AVIF, the spec requires this to be 1:1 if present.
363/// See ISOBMFF § 12.1.4.
364#[derive(Debug, Clone, Copy, PartialEq, Eq)]
365pub struct PixelAspectRatio {
366    /// Horizontal spacing
367    pub h_spacing: u32,
368    /// Vertical spacing
369    pub v_spacing: u32,
370}
371
372/// Content light level info from the `clli` property box.
373///
374/// HDR metadata for display mapping.
375/// See ISOBMFF § 12.1.5 / ITU-T H.274.
376#[derive(Debug, Clone, Copy, PartialEq, Eq)]
377pub struct ContentLightLevel {
378    /// Maximum content light level (cd/m²)
379    pub max_content_light_level: u16,
380    /// Maximum picture average light level (cd/m²)
381    pub max_pic_average_light_level: u16,
382}
383
384/// Mastering display colour volume from the `mdcv` property box.
385///
386/// HDR metadata describing the mastering display's color volume.
387/// See ISOBMFF § 12.1.5 / SMPTE ST 2086.
388#[derive(Debug, Clone, Copy, PartialEq, Eq)]
389pub struct MasteringDisplayColourVolume {
390    /// Display primaries: [(x, y); 3] in 0.00002 units (CIE 1931)
391    /// Order: green, blue, red (per SMPTE ST 2086)
392    pub primaries: [(u16, u16); 3],
393    /// White point (x, y) in 0.00002 units
394    pub white_point: (u16, u16),
395    /// Maximum display luminance in 0.0001 cd/m² units
396    pub max_luminance: u32,
397    /// Minimum display luminance in 0.0001 cd/m² units
398    pub min_luminance: u32,
399}
400
401/// Content colour volume from the `cclv` property box.
402///
403/// Describes the colour volume of the content. Derived from H.265 D.2.40 /
404/// ITU-T H.274. All fields are optional, controlled by presence flags.
405/// See ISOBMFF § 12.1.5.
406#[derive(Debug, Clone, Copy, PartialEq, Eq)]
407pub struct ContentColourVolume {
408    /// Content colour primaries (x, y) for 3 primaries, as signed i32.
409    /// Present only if `ccv_primaries_present_flag` was set.
410    pub primaries: Option<[(i32, i32); 3]>,
411    /// Minimum luminance value. Present only if flag was set.
412    pub min_luminance: Option<u32>,
413    /// Maximum luminance value. Present only if flag was set.
414    pub max_luminance: Option<u32>,
415    /// Average luminance value. Present only if flag was set.
416    pub avg_luminance: Option<u32>,
417}
418
419/// Ambient viewing environment from the `amve` property box.
420///
421/// Describes the ambient viewing conditions under which the content
422/// was authored. See ISOBMFF § 12.1.5 / H.265 D.2.39.
423#[derive(Debug, Clone, Copy, PartialEq, Eq)]
424pub struct AmbientViewingEnvironment {
425    /// Ambient illuminance in units of 1/10000 cd/m²
426    pub ambient_illuminance: u32,
427    /// Ambient light x chromaticity (CIE 1931), units of 1/50000
428    pub ambient_light_x: u16,
429    /// Ambient light y chromaticity (CIE 1931), units of 1/50000
430    pub ambient_light_y: u16,
431}
432
433/// Per-channel gain map parameters from ISO 21496-1.
434///
435/// Each field is a rational number (numerator/denominator pair) describing
436/// how to apply the gain map for this channel.
437#[derive(Debug, Clone, Copy, PartialEq, Eq)]
438pub struct GainMapChannel {
439    /// Minimum gain map value (numerator).
440    pub gain_map_min_n: i32,
441    /// Minimum gain map value (denominator).
442    pub gain_map_min_d: u32,
443    /// Maximum gain map value (numerator).
444    pub gain_map_max_n: i32,
445    /// Maximum gain map value (denominator).
446    pub gain_map_max_d: u32,
447    /// Gamma curve parameter (numerator).
448    pub gamma_n: u32,
449    /// Gamma curve parameter (denominator).
450    pub gamma_d: u32,
451    /// Base image offset (numerator).
452    pub base_offset_n: i32,
453    /// Base image offset (denominator).
454    pub base_offset_d: u32,
455    /// Alternate image offset (numerator).
456    pub alternate_offset_n: i32,
457    /// Alternate image offset (denominator).
458    pub alternate_offset_d: u32,
459}
460
461/// Gain map metadata from a ToneMapImage (`tmap`) derived image item.
462///
463/// Describes how to apply a gain map to convert between SDR and HDR
464/// renditions. The gain map is a separate AV1-encoded image that, combined
465/// with this metadata, allows reconstructing an HDR image from the SDR base.
466///
467/// See ISO 21496-1:2025 for the full specification.
468#[derive(Debug, Clone, PartialEq, Eq)]
469pub struct GainMapMetadata {
470    /// If true, each RGB channel has independent gain map parameters.
471    /// If false, `channels[0]` applies to all three channels.
472    pub is_multichannel: bool,
473    /// If true, the gain map is encoded in the base image's colour space.
474    /// If false, it's in the alternate image's colour space.
475    pub use_base_colour_space: bool,
476    /// ISO 21496-1 backward direction flag (bit 2 of flags byte).
477    /// When true, the base image is HDR and the alternate is SDR.
478    /// Default false = base is SDR, alternate is HDR.
479    pub backward_direction: bool,
480    /// Base HDR headroom (numerator).
481    pub base_hdr_headroom_n: u32,
482    /// Base HDR headroom (denominator).
483    pub base_hdr_headroom_d: u32,
484    /// Alternate HDR headroom (numerator).
485    pub alternate_hdr_headroom_n: u32,
486    /// Alternate HDR headroom (denominator).
487    pub alternate_hdr_headroom_d: u32,
488    /// Per-channel parameters. For single-channel mode, only index 0 is
489    /// meaningful (indices 1 and 2 are copies of index 0).
490    pub channels: [GainMapChannel; 3],
491}
492
493impl GainMapMetadata {
494    /// Parse an ISO 21496-1 AVIF `tmap` item payload into a `GainMapMetadata`.
495    ///
496    /// This is the public mirror of the internal parser. Useful for testing
497    /// and for consumers who hold raw tmap payload bytes.
498    pub fn parse_tmap_bytes(data: &[u8]) -> Result<Self> {
499        parse_tone_map_image(data)
500    }
501
502    /// Serialize this metadata to the ISO 21496-1 AVIF `tmap` item payload format.
503    ///
504    /// This is the inverse of the internal `parse_tone_map_image` function and
505    /// produces the exact byte sequence expected in an AVIF `tmap` item. The
506    /// output can be passed to `zenavif_serialize::Aviffy::set_gain_map` or
507    /// used for byte-level roundtrip testing.
508    ///
509    /// The writer is always written as version 0 / minimum_version 0.
510    pub fn to_bytes(&self) -> std::vec::Vec<u8> {
511        let channel_count = if self.is_multichannel { 3usize } else { 1usize };
512        let mut buf = std::vec::Vec::with_capacity(5 + 8 + channel_count * 40);
513        buf.push(0u8); // version
514        buf.extend_from_slice(&0u16.to_be_bytes()); // minimum_version
515        buf.extend_from_slice(&0u16.to_be_bytes()); // writer_version
516        let flags = (u8::from(self.is_multichannel) << 7)
517            | (u8::from(self.use_base_colour_space) << 6)
518            | (u8::from(self.backward_direction) << 2);
519        buf.push(flags);
520        buf.extend_from_slice(&self.base_hdr_headroom_n.to_be_bytes());
521        buf.extend_from_slice(&self.base_hdr_headroom_d.to_be_bytes());
522        buf.extend_from_slice(&self.alternate_hdr_headroom_n.to_be_bytes());
523        buf.extend_from_slice(&self.alternate_hdr_headroom_d.to_be_bytes());
524        for ch in self.channels.iter().take(channel_count) {
525            buf.extend_from_slice(&ch.gain_map_min_n.to_be_bytes());
526            buf.extend_from_slice(&ch.gain_map_min_d.to_be_bytes());
527            buf.extend_from_slice(&ch.gain_map_max_n.to_be_bytes());
528            buf.extend_from_slice(&ch.gain_map_max_d.to_be_bytes());
529            buf.extend_from_slice(&ch.gamma_n.to_be_bytes());
530            buf.extend_from_slice(&ch.gamma_d.to_be_bytes());
531            buf.extend_from_slice(&ch.base_offset_n.to_be_bytes());
532            buf.extend_from_slice(&ch.base_offset_d.to_be_bytes());
533            buf.extend_from_slice(&ch.alternate_offset_n.to_be_bytes());
534            buf.extend_from_slice(&ch.alternate_offset_d.to_be_bytes());
535        }
536        buf
537    }
538}
539
540// ─── zencodec conversions ────────────────────────────────────────────
541
542#[cfg(feature = "zencodec")]
543impl From<&GainMapChannel> for zencodec::GainMapChannel {
544    fn from(ch: &GainMapChannel) -> Self {
545        Self {
546            min: ch.gain_map_min_n as f64 / ch.gain_map_min_d.max(1) as f64,
547            max: ch.gain_map_max_n as f64 / ch.gain_map_max_d.max(1) as f64,
548            gamma: ch.gamma_n as f64 / ch.gamma_d.max(1) as f64,
549            base_offset: ch.base_offset_n as f64 / ch.base_offset_d.max(1) as f64,
550            alternate_offset: ch.alternate_offset_n as f64 / ch.alternate_offset_d.max(1) as f64,
551        }
552    }
553}
554
555#[cfg(feature = "zencodec")]
556impl From<&GainMapMetadata> for zencodec::GainMapParams {
557    fn from(md: &GainMapMetadata) -> Self {
558        let mut p = Self::default();
559        p.channels = [
560            zencodec::GainMapChannel::from(&md.channels[0]),
561            zencodec::GainMapChannel::from(&md.channels[1]),
562            zencodec::GainMapChannel::from(&md.channels[2]),
563        ];
564        p.base_hdr_headroom =
565            md.base_hdr_headroom_n as f64 / md.base_hdr_headroom_d.max(1) as f64;
566        p.alternate_hdr_headroom =
567            md.alternate_hdr_headroom_n as f64 / md.alternate_hdr_headroom_d.max(1) as f64;
568        p.use_base_color_space = md.use_base_colour_space;
569        p.backward_direction = md.backward_direction;
570        p
571    }
572}
573
574#[cfg(feature = "zencodec")]
575impl From<&zencodec::GainMapChannel> for GainMapChannel {
576    fn from(ch: &zencodec::GainMapChannel) -> Self {
577        use zencodec::gainmap::{Fraction, UFraction};
578        let min = Fraction::from_f64_cf(ch.min);
579        let max = Fraction::from_f64_cf(ch.max);
580        let gamma = UFraction::from_f64_cf(ch.gamma);
581        let base_off = Fraction::from_f64_cf(ch.base_offset);
582        let alt_off = Fraction::from_f64_cf(ch.alternate_offset);
583        Self {
584            gain_map_min_n: min.numerator,
585            gain_map_min_d: min.denominator,
586            gain_map_max_n: max.numerator,
587            gain_map_max_d: max.denominator,
588            gamma_n: gamma.numerator,
589            gamma_d: gamma.denominator,
590            base_offset_n: base_off.numerator,
591            base_offset_d: base_off.denominator,
592            alternate_offset_n: alt_off.numerator,
593            alternate_offset_d: alt_off.denominator,
594        }
595    }
596}
597
598#[cfg(feature = "zencodec")]
599impl From<&zencodec::GainMapParams> for GainMapMetadata {
600    fn from(p: &zencodec::GainMapParams) -> Self {
601        use zencodec::gainmap::UFraction;
602        let headroom_base = UFraction::from_f64_cf(p.base_hdr_headroom);
603        let headroom_alt = UFraction::from_f64_cf(p.alternate_hdr_headroom);
604        Self {
605            is_multichannel: !p.is_single_channel(),
606            use_base_colour_space: p.use_base_color_space,
607            backward_direction: p.backward_direction,
608            base_hdr_headroom_n: headroom_base.numerator,
609            base_hdr_headroom_d: headroom_base.denominator,
610            alternate_hdr_headroom_n: headroom_alt.numerator,
611            alternate_hdr_headroom_d: headroom_alt.denominator,
612            channels: [
613                GainMapChannel::from(&p.channels[0]),
614                GainMapChannel::from(&p.channels[1]),
615                GainMapChannel::from(&p.channels[2]),
616            ],
617        }
618    }
619}
620
621/// Gain map information extracted from an AVIF container.
622///
623/// Bundles the ISO 21496-1 metadata, the raw AV1-encoded gain map image data,
624/// and the alternate rendition's color information into a single type.
625///
626/// The `gain_map_data` field contains an AV1 bitstream that can be decoded
627/// with any AV1 decoder (e.g., rav1d) to obtain the gain map pixel values.
628///
629/// # Example
630///
631/// ```no_run
632/// let bytes = std::fs::read("hdr.avif").unwrap();
633/// let parser = zenavif_parse::AvifParser::from_bytes(&bytes).unwrap();
634/// if let Some(Ok(gm)) = parser.gain_map() {
635///     println!("Gain map: {} bytes", gm.gain_map_data.len());
636///     println!("Multichannel: {}", gm.metadata.is_multichannel);
637/// }
638/// ```
639#[derive(Debug, Clone)]
640pub struct AvifGainMap {
641    /// ISO 21496-1 gain map metadata (parsed from the `tmap` item payload).
642    pub metadata: GainMapMetadata,
643    /// Raw AV1 bitstream of the gain map image. Decode with an AV1 decoder
644    /// to obtain the gain map pixel values.
645    pub gain_map_data: std::vec::Vec<u8>,
646    /// Color information for the alternate (typically HDR) rendition,
647    /// from the `tmap` item's `colr` property.
648    pub alt_color_info: Option<ColorInformation>,
649}
650
651/// Depth auxiliary image extracted from an AVIF container.
652///
653/// AVIF supports auxiliary images via `auxl` item references with `auxC` type
654/// properties, following the HEIF (ISO 23008-12) auxiliary image mechanism.
655/// Depth maps use the auxiliary type URN
656/// `urn:mpeg:mpegB:cicp:systems:auxiliary:depth` (MPEG-B Part 23) or the
657/// legacy HEVC-style `urn:mpeg:hevc:2015:auxid:2`.
658///
659/// The `data` field contains a raw AV1 bitstream that can be decoded with
660/// any AV1 decoder to obtain the depth image pixel values (typically
661/// monochrome 8-bit or 10-bit).
662///
663/// # Example
664///
665/// ```no_run
666/// let bytes = std::fs::read("portrait.avif").unwrap();
667/// let parser = zenavif_parse::AvifParser::from_bytes(&bytes).unwrap();
668/// if let Some(Ok(dm)) = parser.depth_map() {
669///     println!("Depth map: {}x{}, {} bytes AV1 data", dm.width, dm.height, dm.data.len());
670/// }
671/// ```
672#[derive(Debug, Clone)]
673pub struct AvifDepthMap {
674    /// Raw AV1 bitstream of the depth auxiliary image. Decode with an AV1
675    /// decoder to obtain grayscale depth pixel values.
676    pub data: std::vec::Vec<u8>,
677    /// Width of the depth image in pixels (from `ispe` property).
678    pub width: u32,
679    /// Height of the depth image in pixels (from `ispe` property).
680    pub height: u32,
681    /// AV1 codec configuration for the depth item (from `av1C` property).
682    pub av1_config: Option<AV1Config>,
683    /// Color information for the depth item (from `colr` property), if present.
684    pub color_info: Option<ColorInformation>,
685}
686
687/// Operating point selector from the `a1op` property box.
688///
689/// Selects which AV1 operating point to decode for multi-operating-point images.
690/// See AVIF § 4.3.4.
691#[derive(Debug, Clone, Copy, PartialEq, Eq)]
692pub struct OperatingPointSelector {
693    /// Operating point index (0..31)
694    pub op_index: u8,
695}
696
697/// Layer selector from the `lsel` property box.
698///
699/// Selects which spatial layer to render for layered/progressive images.
700/// See HEIF (ISO 23008-12).
701#[derive(Debug, Clone, Copy, PartialEq, Eq)]
702pub struct LayerSelector {
703    /// Layer ID to render (0-3), or 0xFFFF for all layers (progressive)
704    pub layer_id: u16,
705}
706
707/// AV1 layered image indexing from the `a1lx` property box.
708///
709/// Provides byte sizes for the first 3 layers so decoders can seek
710/// to a specific layer without parsing the full bitstream.
711/// See AVIF § 4.3.6.
712#[derive(Debug, Clone, Copy, PartialEq, Eq)]
713pub struct AV1LayeredImageIndexing {
714    /// Byte sizes of layers 0, 1, 2. The last layer's size is implicit
715    /// (total item size minus the sum of these three).
716    pub layer_sizes: [u32; 3],
717}
718
719/// Options for parsing AVIF files
720///
721/// Prefer using [`DecodeConfig::lenient()`] with [`AvifParser`] instead.
722#[derive(Debug, Clone, Copy)]
723#[derive(Default)]
724pub struct ParseOptions {
725    /// Enable lenient parsing mode
726    ///
727    /// When true, non-critical validation errors (like non-zero flags in boxes
728    /// that expect zero flags) will be ignored instead of returning errors.
729    /// This allows parsing of slightly malformed but otherwise valid AVIF files.
730    ///
731    /// Default: false (strict validation)
732    pub lenient: bool,
733}
734
735/// Configuration for parsing AVIF files with resource limits and validation options
736///
737/// Provides fine-grained control over resource consumption during AVIF parsing,
738/// allowing defensive parsing against malicious or malformed files.
739///
740/// Resource limits are checked **before** allocations occur, preventing out-of-memory
741/// conditions from malicious files that claim unrealistic dimensions or counts.
742///
743/// # Examples
744///
745/// ```rust
746/// use zenavif_parse::DecodeConfig;
747///
748/// // Default limits (suitable for most apps)
749/// let config = DecodeConfig::default();
750///
751/// // Strict limits for untrusted input
752/// let config = DecodeConfig::default()
753///     .with_peak_memory_limit(100_000_000)  // 100MB
754///     .with_total_megapixels_limit(64)       // 64MP max
755///     .with_max_animation_frames(100);       // 100 frames
756///
757/// // No limits (backwards compatible with read_avif)
758/// let config = DecodeConfig::unlimited();
759/// ```
760#[derive(Debug, Clone)]
761pub struct DecodeConfig {
762    /// Maximum peak heap memory usage in bytes.
763    /// Default: 1GB (1,000,000,000 bytes)
764    pub peak_memory_limit: Option<u64>,
765
766    /// Maximum total megapixels for grid images.
767    /// Default: 512 megapixels
768    pub total_megapixels_limit: Option<u32>,
769
770    /// Maximum number of animation frames.
771    /// Default: 10,000 frames
772    pub max_animation_frames: Option<u32>,
773
774    /// Maximum number of grid tiles.
775    /// Default: 1,000 tiles
776    pub max_grid_tiles: Option<u32>,
777
778    /// Enable lenient parsing mode.
779    /// Default: false (strict validation)
780    pub lenient: bool,
781}
782
783impl Default for DecodeConfig {
784    fn default() -> Self {
785        Self {
786            peak_memory_limit: Some(1_000_000_000),
787            total_megapixels_limit: Some(512),
788            max_animation_frames: Some(10_000),
789            max_grid_tiles: Some(1_000),
790            lenient: false,
791        }
792    }
793}
794
795impl DecodeConfig {
796    /// Create a configuration with no resource limits.
797    ///
798    /// Equivalent to the behavior of `read_avif()` before resource limits were added.
799    pub fn unlimited() -> Self {
800        Self {
801            peak_memory_limit: None,
802            total_megapixels_limit: None,
803            max_animation_frames: None,
804            max_grid_tiles: None,
805            lenient: false,
806        }
807    }
808
809    /// Set the peak memory limit in bytes
810    pub fn with_peak_memory_limit(mut self, bytes: u64) -> Self {
811        self.peak_memory_limit = Some(bytes);
812        self
813    }
814
815    /// Set the total megapixels limit for grid images
816    pub fn with_total_megapixels_limit(mut self, megapixels: u32) -> Self {
817        self.total_megapixels_limit = Some(megapixels);
818        self
819    }
820
821    /// Set the maximum animation frame count
822    pub fn with_max_animation_frames(mut self, frames: u32) -> Self {
823        self.max_animation_frames = Some(frames);
824        self
825    }
826
827    /// Set the maximum grid tile count
828    pub fn with_max_grid_tiles(mut self, tiles: u32) -> Self {
829        self.max_grid_tiles = Some(tiles);
830        self
831    }
832
833    /// Enable lenient parsing mode
834    pub fn lenient(mut self, lenient: bool) -> Self {
835        self.lenient = lenient;
836        self
837    }
838}
839
840/// Grid configuration for tiled/grid-based AVIF images
841#[derive(Debug, Clone, PartialEq)]
842/// Grid image configuration
843///
844/// For tiled/grid AVIF images, this describes the grid layout.
845/// Grid images are composed of multiple AV1 image items (tiles) arranged in a rectangular grid.
846///
847/// ## Grid Layout Determination
848///
849/// Grid layout can be specified in two ways:
850/// 1. **Explicit ImageGrid property box** - contains rows, columns, and output dimensions
851/// 2. **Calculated from ispe properties** - when no ImageGrid box exists, dimensions are
852///    calculated by dividing the grid item's dimensions by a tile's dimensions
853///
854/// ## Output Dimensions
855///
856/// - `output_width` and `output_height` may be 0, indicating the decoder should calculate
857///   them from the tile dimensions
858/// - When non-zero, they specify the exact output dimensions of the composed image
859pub struct GridConfig {
860    /// Number of tile rows (1-256)
861    pub rows: u8,
862    /// Number of tile columns (1-256)
863    pub columns: u8,
864    /// Output width in pixels (0 = calculate from tiles)
865    pub output_width: u32,
866    /// Output height in pixels (0 = calculate from tiles)
867    pub output_height: u32,
868}
869
870/// Frame information for animated AVIF
871#[cfg(feature = "eager")]
872#[deprecated(since = "1.5.0", note = "Use `AvifParser::frame()` which returns `FrameRef` instead")]
873#[derive(Debug)]
874pub struct AnimationFrame {
875    /// AV1 bitstream data for this frame
876    pub data: TryVec<u8>,
877    /// Duration in milliseconds (0 if unknown)
878    pub duration_ms: u32,
879}
880
881/// Animation configuration for animated AVIF (avis brand)
882#[cfg(feature = "eager")]
883#[deprecated(since = "1.5.0", note = "Use `AvifParser::animation_info()` and `AvifParser::frames()` instead")]
884#[derive(Debug)]
885#[allow(deprecated)]
886pub struct AnimationConfig {
887    /// Number of times to loop (0 = infinite)
888    pub loop_count: u32,
889    /// All frames in the animation
890    pub frames: TryVec<AnimationFrame>,
891}
892
893// Internal structures for animation parsing
894
895#[derive(Debug)]
896struct MovieHeader {
897    _timescale: u32,
898    _duration: u64,
899}
900
901#[derive(Debug)]
902struct MediaHeader {
903    timescale: u32,
904    _duration: u64,
905}
906
907#[derive(Debug)]
908struct TimeToSampleEntry {
909    sample_count: u32,
910    sample_delta: u32,
911}
912
913#[derive(Debug)]
914struct SampleToChunkEntry {
915    first_chunk: u32,
916    samples_per_chunk: u32,
917    _sample_description_index: u32,
918}
919
920#[derive(Debug)]
921struct SampleTable {
922    time_to_sample: TryVec<TimeToSampleEntry>,
923    sample_sizes: TryVec<u32>,
924    /// Precomputed byte offset for each sample, derived from
925    /// sample_to_chunk + chunk_offsets + sample_sizes during parsing.
926    sample_offsets: TryVec<u64>,
927}
928
929/// A track reference entry (e.g., auxl, cdsc) parsed from a `tref` sub-box.
930#[derive(Debug)]
931struct TrackReference {
932    reference_type: FourCC,
933    track_ids: TryVec<u32>,
934}
935
936/// Codec properties extracted from a `stsd` VisualSampleEntry.
937#[derive(Debug, Clone, Default)]
938struct TrackCodecConfig {
939    av1_config: Option<AV1Config>,
940    color_info: Option<ColorInformation>,
941}
942
943/// Parsed data from a single track box (`trak`).
944#[derive(Debug)]
945struct ParsedTrack {
946    track_id: u32,
947    handler_type: FourCC,
948    media_timescale: u32,
949    sample_table: SampleTable,
950    references: TryVec<TrackReference>,
951    loop_count: u32,
952    codec_config: TrackCodecConfig,
953}
954
955/// Paired color + optional alpha animation data after track association.
956struct ParsedAnimationData {
957    color_timescale: u32,
958    color_sample_table: SampleTable,
959    alpha_timescale: Option<u32>,
960    alpha_sample_table: Option<SampleTable>,
961    loop_count: u32,
962    color_codec_config: TrackCodecConfig,
963}
964
965#[cfg(feature = "eager")]
966#[deprecated(since = "1.5.0", note = "Use `AvifParser` for zero-copy parsing instead")]
967#[derive(Debug, Default)]
968#[allow(deprecated)]
969pub struct AvifData {
970    /// AV1 data for the color channels.
971    ///
972    /// The collected data indicated by the `pitm` box, See ISO 14496-12:2015 § 8.11.4
973    pub primary_item: TryVec<u8>,
974    /// AV1 data for alpha channel.
975    ///
976    /// Associated alpha channel for the primary item, if any
977    pub alpha_item: Option<TryVec<u8>>,
978    /// If true, divide RGB values by the alpha value.
979    ///
980    /// See `prem` in MIAF § 7.3.5.2
981    pub premultiplied_alpha: bool,
982
983    /// Grid configuration for tiled images.
984    ///
985    /// If present, the image is a grid and `grid_tiles` contains the tile data.
986    /// Grid layout is determined either from an explicit ImageGrid property box or
987    /// calculated from ispe (Image Spatial Extents) properties.
988    ///
989    /// ## Example
990    ///
991    /// ```no_run
992    /// #[allow(deprecated)]
993    /// use std::fs::File;
994    /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
995    /// #[allow(deprecated)]
996    /// let data = zenavif_parse::read_avif(&mut File::open("image.avif")?)?;
997    ///
998    /// if let Some(grid) = data.grid_config {
999    ///     println!("Grid: {}×{} tiles", grid.rows, grid.columns);
1000    ///     println!("Output: {}×{}", grid.output_width, grid.output_height);
1001    ///     println!("Tile count: {}", data.grid_tiles.len());
1002    /// }
1003    /// # Ok(())
1004    /// # }
1005    /// ```
1006    pub grid_config: Option<GridConfig>,
1007
1008    /// AV1 payloads for grid image tiles.
1009    ///
1010    /// Empty for non-grid images. For grid images, contains one entry per tile.
1011    ///
1012    /// **Tile ordering:** Tiles are guaranteed to be in the correct order for grid assembly,
1013    /// sorted by their dimgIdx (reference index). This is row-major order: tiles in the first
1014    /// row from left to right, then the second row, etc.
1015    pub grid_tiles: TryVec<TryVec<u8>>,
1016
1017    /// Animation configuration (for animated AVIF with avis brand)
1018    ///
1019    /// When present, primary_item contains the first frame
1020    pub animation: Option<AnimationConfig>,
1021
1022    /// AV1 codec configuration from the container's `av1C` property.
1023    pub av1_config: Option<AV1Config>,
1024
1025    /// Colour information from the container's `colr` property.
1026    pub color_info: Option<ColorInformation>,
1027
1028    /// Image rotation from the container's `irot` property.
1029    pub rotation: Option<ImageRotation>,
1030
1031    /// Image mirror from the container's `imir` property.
1032    pub mirror: Option<ImageMirror>,
1033
1034    /// Clean aperture (crop) from the container's `clap` property.
1035    pub clean_aperture: Option<CleanAperture>,
1036
1037    /// Pixel aspect ratio from the container's `pasp` property.
1038    pub pixel_aspect_ratio: Option<PixelAspectRatio>,
1039
1040    /// Content light level from the container's `clli` property.
1041    pub content_light_level: Option<ContentLightLevel>,
1042
1043    /// Mastering display colour volume from the container's `mdcv` property.
1044    pub mastering_display: Option<MasteringDisplayColourVolume>,
1045
1046    /// Content colour volume from the container's `cclv` property.
1047    pub content_colour_volume: Option<ContentColourVolume>,
1048
1049    /// Ambient viewing environment from the container's `amve` property.
1050    pub ambient_viewing: Option<AmbientViewingEnvironment>,
1051
1052    /// Operating point selector from the container's `a1op` property.
1053    pub operating_point: Option<OperatingPointSelector>,
1054
1055    /// Layer selector from the container's `lsel` property.
1056    pub layer_selector: Option<LayerSelector>,
1057
1058    /// AV1 layered image indexing from the container's `a1lx` property.
1059    pub layered_image_indexing: Option<AV1LayeredImageIndexing>,
1060
1061    /// EXIF metadata from a `cdsc`-linked `Exif` item.
1062    ///
1063    /// Raw EXIF data (TIFF header onwards), with the 4-byte AVIF offset prefix stripped.
1064    pub exif: Option<TryVec<u8>>,
1065
1066    /// XMP metadata from a `cdsc`-linked `mime` item.
1067    ///
1068    /// Raw XMP/XML data as UTF-8.
1069    pub xmp: Option<TryVec<u8>>,
1070
1071    /// Gain map metadata from a `tmap` derived image item.
1072    pub gain_map_metadata: Option<GainMapMetadata>,
1073
1074    /// AV1-encoded gain map image data.
1075    pub gain_map_item: Option<TryVec<u8>>,
1076
1077    /// Color information for the alternate (HDR) rendition from the `tmap` item.
1078    pub gain_map_color_info: Option<ColorInformation>,
1079
1080    /// Depth auxiliary image data, if present.
1081    pub depth_item: Option<TryVec<u8>>,
1082
1083    /// Width of the depth auxiliary image (from `ispe`).
1084    pub depth_width: u32,
1085
1086    /// Height of the depth auxiliary image (from `ispe`).
1087    pub depth_height: u32,
1088
1089    /// AV1 codec configuration for the depth auxiliary item.
1090    pub depth_av1_config: Option<AV1Config>,
1091
1092    /// Color information for the depth auxiliary item.
1093    pub depth_color_info: Option<ColorInformation>,
1094
1095    /// Major brand from the `ftyp` box (e.g., `*b"avif"` or `*b"avis"`).
1096    pub major_brand: [u8; 4],
1097
1098    /// Compatible brands from the `ftyp` box.
1099    pub compatible_brands: std::vec::Vec<[u8; 4]>,
1100}
1101
1102#[cfg(feature = "eager")]
1103#[allow(deprecated)]
1104impl AvifData {
1105    /// Get the full gain map bundle, if present.
1106    ///
1107    /// Consumes the gain map metadata and data from this `AvifData` and returns
1108    /// an [`AvifGainMap`]. Returns `None` if no gain map metadata or data is present.
1109    pub fn gain_map(&self) -> Option<AvifGainMap> {
1110        let metadata = self.gain_map_metadata.as_ref()?.clone();
1111        let gain_map_data = self.gain_map_item.as_ref()?.to_vec();
1112        Some(AvifGainMap {
1113            metadata,
1114            gain_map_data,
1115            alt_color_info: self.gain_map_color_info.clone(),
1116        })
1117    }
1118
1119    /// Get the depth auxiliary image bundle, if present.
1120    ///
1121    /// Returns [`AvifDepthMap`] with the raw AV1 depth data, dimensions,
1122    /// and codec/color info. Returns `None` if no depth auxiliary is present.
1123    pub fn depth_map(&self) -> Option<AvifDepthMap> {
1124        let data = self.depth_item.as_ref()?.to_vec();
1125        Some(AvifDepthMap {
1126            data,
1127            width: self.depth_width,
1128            height: self.depth_height,
1129            av1_config: self.depth_av1_config.clone(),
1130            color_info: self.depth_color_info.clone(),
1131        })
1132    }
1133}
1134
1135// # Memory Usage
1136//
1137// This implementation loads all image data into owned vectors (`TryVec<u8>`), which has
1138// memory implications depending on the file type:
1139//
1140// - **Static images**: Single copy of compressed data (~5-50KB typical)
1141//   - `primary_item`: compressed AV1 data
1142//   - `alpha_item`: compressed alpha data (if present)
1143//
1144// - **Grid images**: All tiles loaded (~100KB-2MB for large grids)
1145//   - `grid_tiles`: one compressed tile per grid cell
1146//
1147// - **Animated images**: All frames loaded eagerly (⚠️ HIGH MEMORY)
1148//   - Internal mdat boxes: ~500KB for 95-frame video
1149//   - Extracted frames: ~500KB duplicated in `animation.frames[].data`
1150//   - **Total: ~2× file size in memory**
1151//
1152// For large animated files, consider using a streaming approach or processing frames
1153// individually rather than loading the entire `AvifData` structure.
1154
1155#[cfg(feature = "eager")]
1156#[allow(deprecated)]
1157impl AvifData {
1158    #[deprecated(since = "1.5.0", note = "Use `AvifParser::from_reader()` instead")]
1159    pub fn from_reader<R: Read>(reader: &mut R) -> Result<Self> {
1160        read_avif(reader)
1161    }
1162
1163    /// Parses AV1 data to get basic properties of the opaque channel
1164    pub fn primary_item_metadata(&self) -> Result<AV1Metadata> {
1165        AV1Metadata::parse_av1_bitstream(&self.primary_item)
1166    }
1167
1168    /// Parses AV1 data to get basic properties about the alpha channel, if any
1169    pub fn alpha_item_metadata(&self) -> Result<Option<AV1Metadata>> {
1170        self.alpha_item.as_deref().map(AV1Metadata::parse_av1_bitstream).transpose()
1171    }
1172}
1173
1174/// Chroma subsampling configuration for AV1/AVIF.
1175///
1176/// `(false, false)` = 4:4:4 (no subsampling).
1177/// `(true, true)` = 4:2:0 (both axes subsampled).
1178/// `(true, false)` = 4:2:2 (horizontal only).
1179#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1180pub struct ChromaSubsampling {
1181    /// Whether the horizontal (X) axis is subsampled.
1182    pub horizontal: bool,
1183    /// Whether the vertical (Y) axis is subsampled.
1184    pub vertical: bool,
1185}
1186
1187impl ChromaSubsampling {
1188    /// 4:4:4 — no chroma subsampling.
1189    pub const NONE: Self = Self { horizontal: false, vertical: false };
1190    /// 4:2:0 — both axes subsampled.
1191    pub const YUV420: Self = Self { horizontal: true, vertical: true };
1192    /// 4:2:2 — horizontal subsampling only.
1193    pub const YUV422: Self = Self { horizontal: true, vertical: false };
1194}
1195
1196impl From<(bool, bool)> for ChromaSubsampling {
1197    fn from((h, v): (bool, bool)) -> Self {
1198        Self { horizontal: h, vertical: v }
1199    }
1200}
1201
1202impl From<ChromaSubsampling> for (bool, bool) {
1203    fn from(cs: ChromaSubsampling) -> Self {
1204        (cs.horizontal, cs.vertical)
1205    }
1206}
1207
1208/// AV1 sequence header metadata parsed from an OBU bitstream.
1209///
1210/// See [`AvifParser::primary_metadata()`] and [`AV1Metadata::parse_av1_bitstream()`].
1211#[non_exhaustive]
1212#[derive(Debug, Clone)]
1213pub struct AV1Metadata {
1214    /// Should be true for non-animated AVIF
1215    pub still_picture: bool,
1216    pub max_frame_width: NonZeroU32,
1217    pub max_frame_height: NonZeroU32,
1218    /// 8, 10, or 12
1219    pub bit_depth: u8,
1220    /// 0, 1 or 2 for the level of complexity
1221    pub seq_profile: u8,
1222    /// Chroma subsampling. Use named fields (`horizontal`, `vertical`) or
1223    /// constants like [`ChromaSubsampling::YUV420`].
1224    pub chroma_subsampling: ChromaSubsampling,
1225    pub monochrome: bool,
1226    /// AV1 base quantizer index (0-255) from the first frame header.
1227    /// `None` if the frame header could not be parsed.
1228    /// 0 = lossless candidate, 255 = worst quality.
1229    pub base_q_idx: Option<u8>,
1230    /// Whether the encoding is lossless (all quantization parameters are zero
1231    /// and chroma is not subsampled).
1232    /// `None` if the frame header could not be parsed.
1233    pub lossless: Option<bool>,
1234}
1235
1236impl AV1Metadata {
1237    /// Parses raw AV1 bitstream (sequence header + optional frame header).
1238    ///
1239    /// Extracts sequence-level metadata and attempts to parse the first frame
1240    /// header for quantization/lossless detection.
1241    ///
1242    /// This is for the bare image payload from an encoder, not an AVIF/HEIF file.
1243    /// To parse AVIF files, see [`AvifParser::from_reader()`].
1244    #[inline(never)]
1245    pub fn parse_av1_bitstream(obu_bitstream: &[u8]) -> Result<Self> {
1246        let (h, frame_quant) = obu::parse_obu_with_frame_info(obu_bitstream)?;
1247        let no_chroma_subsampling = !h.color.chroma_subsampling.horizontal
1248            && !h.color.chroma_subsampling.vertical;
1249        Ok(Self {
1250            still_picture: h.still_picture,
1251            max_frame_width: h.max_frame_width,
1252            max_frame_height: h.max_frame_height,
1253            bit_depth: h.color.bit_depth,
1254            seq_profile: h.seq_profile,
1255            chroma_subsampling: h.color.chroma_subsampling,
1256            monochrome: h.color.monochrome,
1257            base_q_idx: frame_quant.map(|fq| fq.base_q_idx),
1258            lossless: frame_quant.map(|fq| fq.coded_lossless && no_chroma_subsampling),
1259        })
1260    }
1261}
1262
1263/// A single frame from an animated AVIF, with zero-copy when possible.
1264///
1265/// The `data` field is `Cow::Borrowed` when the frame lives in a single
1266/// contiguous mdat extent, and `Cow::Owned` when extents must be concatenated.
1267pub struct FrameRef<'a> {
1268    pub data: Cow<'a, [u8]>,
1269    /// Alpha channel data for this frame, if the animation has a separate alpha track.
1270    pub alpha_data: Option<Cow<'a, [u8]>>,
1271    pub duration_ms: u32,
1272}
1273
1274/// Byte range of a media data box within the file.
1275struct MdatBounds {
1276    offset: u64,
1277    length: u64,
1278}
1279
1280/// Where an item's data lives: construction method + extent ranges.
1281struct ItemExtents {
1282    construction_method: ConstructionMethod,
1283    extents: TryVec<ExtentRange>,
1284}
1285
1286/// Zero-copy AVIF parser backed by a borrowed or owned byte buffer.
1287///
1288/// `AvifParser` records byte offsets during parsing but does **not** copy
1289/// mdat payload data. Data access methods return `Cow<[u8]>` — borrowed
1290/// when the item is a single contiguous extent, owned when extents must
1291/// be concatenated.
1292///
1293/// # Constructors
1294///
1295/// | Method | Lifetime | Zero-copy? |
1296/// |--------|----------|------------|
1297/// | [`from_bytes`](Self::from_bytes) | `'data` | Yes — borrows the slice |
1298/// | [`from_owned`](Self::from_owned) | `'static` | Within the owned buffer |
1299/// | [`from_reader`](Self::from_reader) | `'static` | Reads all, then owned |
1300///
1301/// # Example
1302///
1303/// ```no_run
1304/// use zenavif_parse::AvifParser;
1305///
1306/// let bytes = std::fs::read("image.avif")?;
1307/// let parser = AvifParser::from_bytes(&bytes)?;
1308/// let primary = parser.primary_data()?; // Cow::Borrowed for single-extent
1309/// # Ok::<(), Box<dyn std::error::Error>>(())
1310/// ```
1311pub struct AvifParser<'data> {
1312    raw: Cow<'data, [u8]>,
1313    mdat_bounds: TryVec<MdatBounds>,
1314    idat: Option<TryVec<u8>>,
1315    primary: ItemExtents,
1316    alpha: Option<ItemExtents>,
1317    grid_config: Option<GridConfig>,
1318    tiles: TryVec<ItemExtents>,
1319    animation_data: Option<AnimationParserData>,
1320    premultiplied_alpha: bool,
1321    av1_config: Option<AV1Config>,
1322    color_info: Option<ColorInformation>,
1323    rotation: Option<ImageRotation>,
1324    mirror: Option<ImageMirror>,
1325    clean_aperture: Option<CleanAperture>,
1326    pixel_aspect_ratio: Option<PixelAspectRatio>,
1327    content_light_level: Option<ContentLightLevel>,
1328    mastering_display: Option<MasteringDisplayColourVolume>,
1329    content_colour_volume: Option<ContentColourVolume>,
1330    ambient_viewing: Option<AmbientViewingEnvironment>,
1331    operating_point: Option<OperatingPointSelector>,
1332    layer_selector: Option<LayerSelector>,
1333    layered_image_indexing: Option<AV1LayeredImageIndexing>,
1334    exif_item: Option<ItemExtents>,
1335    xmp_item: Option<ItemExtents>,
1336    gain_map_metadata: Option<GainMapMetadata>,
1337    gain_map: Option<ItemExtents>,
1338    gain_map_color_info: Option<ColorInformation>,
1339    depth_item: Option<ItemExtents>,
1340    depth_width: u32,
1341    depth_height: u32,
1342    depth_av1_config: Option<AV1Config>,
1343    depth_color_info: Option<ColorInformation>,
1344    major_brand: [u8; 4],
1345    compatible_brands: std::vec::Vec<[u8; 4]>,
1346}
1347
1348struct AnimationParserData {
1349    media_timescale: u32,
1350    sample_table: SampleTable,
1351    alpha_media_timescale: Option<u32>,
1352    alpha_sample_table: Option<SampleTable>,
1353    loop_count: u32,
1354    codec_config: TrackCodecConfig,
1355}
1356
1357/// Animation metadata from [`AvifParser`]
1358#[derive(Debug, Clone, Copy)]
1359pub struct AnimationInfo {
1360    pub frame_count: usize,
1361    pub loop_count: u32,
1362    /// Whether animation has a separate alpha track.
1363    pub has_alpha: bool,
1364    /// Media timescale (ticks per second) for the color track.
1365    pub timescale: u32,
1366}
1367
1368/// Parsed structure from the box-level parse pass (no mdat data).
1369struct ParsedStructure {
1370    /// `None` for pure AVIF sequences (`avis` brand) that have only `moov`+`mdat`.
1371    meta: Option<AvifInternalMeta>,
1372    mdat_bounds: TryVec<MdatBounds>,
1373    animation_data: Option<ParsedAnimationData>,
1374    major_brand: [u8; 4],
1375    compatible_brands: std::vec::Vec<[u8; 4]>,
1376}
1377
1378impl<'data> AvifParser<'data> {
1379    // ========================================
1380    // Constructors
1381    // ========================================
1382
1383    /// Parse AVIF from a borrowed byte slice (true zero-copy).
1384    ///
1385    /// The returned parser borrows `data` — single-extent items will be
1386    /// returned as `Cow::Borrowed` slices into this buffer.
1387    pub fn from_bytes(data: &'data [u8]) -> Result<Self> {
1388        Self::from_bytes_with_config(data, &DecodeConfig::default(), &Unstoppable)
1389    }
1390
1391    /// Parse AVIF from a borrowed byte slice with resource limits.
1392    pub fn from_bytes_with_config(
1393        data: &'data [u8],
1394        config: &DecodeConfig,
1395        stop: &dyn Stop,
1396    ) -> Result<Self> {
1397        let parsed = Self::parse_raw(data, config, stop)?;
1398        Self::build(Cow::Borrowed(data), parsed, config)
1399    }
1400
1401    /// Parse AVIF from an owned buffer.
1402    ///
1403    /// The returned parser owns the data — single-extent items will still
1404    /// be returned as `Cow::Borrowed` slices (borrowing from the internal buffer).
1405    pub fn from_owned(data: std::vec::Vec<u8>) -> Result<AvifParser<'static>> {
1406        AvifParser::from_owned_with_config(data, &DecodeConfig::default(), &Unstoppable)
1407    }
1408
1409    /// Parse AVIF from an owned buffer with resource limits.
1410    pub fn from_owned_with_config(
1411        data: std::vec::Vec<u8>,
1412        config: &DecodeConfig,
1413        stop: &dyn Stop,
1414    ) -> Result<AvifParser<'static>> {
1415        let parsed = AvifParser::parse_raw(&data, config, stop)?;
1416        AvifParser::build(Cow::Owned(data), parsed, config)
1417    }
1418
1419    /// Parse AVIF from a reader (reads all bytes, then parses).
1420    pub fn from_reader<R: Read>(reader: &mut R) -> Result<AvifParser<'static>> {
1421        AvifParser::from_reader_with_config(reader, &DecodeConfig::default(), &Unstoppable)
1422    }
1423
1424    /// Parse AVIF from a reader with resource limits.
1425    ///
1426    /// If `config.peak_memory_limit` is set, reading is capped at that many
1427    /// bytes to prevent unbounded allocation from an untrusted reader.
1428    pub fn from_reader_with_config<R: Read>(
1429        reader: &mut R,
1430        config: &DecodeConfig,
1431        stop: &dyn Stop,
1432    ) -> Result<AvifParser<'static>> {
1433        let buf = if let Some(limit) = config.peak_memory_limit {
1434            let mut limited = reader.take(limit.saturating_add(1));
1435            let mut buf = std::vec::Vec::new();
1436            limited.read_to_end(&mut buf)?;
1437            if buf.len() as u64 > limit {
1438                return Err(Error::ResourceLimitExceeded(
1439                    "input exceeds peak_memory_limit",
1440                ));
1441            }
1442            buf
1443        } else {
1444            let mut buf = std::vec::Vec::new();
1445            reader.read_to_end(&mut buf)?;
1446            buf
1447        };
1448        AvifParser::from_owned_with_config(buf, config, stop)
1449    }
1450
1451    // ========================================
1452    // Internal: parse pass (records offsets, no mdat copy)
1453    // ========================================
1454
1455    /// Parse the AVIF box structure from raw bytes, recording mdat offsets
1456    /// without copying mdat content.
1457    fn parse_raw(data: &[u8], config: &DecodeConfig, stop: &dyn Stop) -> Result<ParsedStructure> {
1458        let parse_opts = ParseOptions { lenient: config.lenient };
1459        let mut cursor = std::io::Cursor::new(data);
1460        let mut f = OffsetReader::new(&mut cursor);
1461        let mut iter = BoxIter::with_max_remaining(&mut f, data.len() as u64);
1462
1463        // 'ftyp' box must occur first; see ISO 14496-12:2015 § 4.3.1
1464        let (major_brand, compatible_brands) = if let Some(mut b) = iter.next_box()? {
1465            if b.head.name == BoxType::FileTypeBox {
1466                let ftyp = read_ftyp(&mut b)?;
1467                if ftyp.major_brand != b"avif" && ftyp.major_brand != b"avis" {
1468                    return Err(Error::InvalidData("ftyp must be 'avif' or 'avis'"));
1469                }
1470                let major = ftyp.major_brand.value;
1471                let compat = ftyp.compatible_brands.iter().map(|b| b.value).collect();
1472                (major, compat)
1473            } else {
1474                return Err(Error::InvalidData("'ftyp' box must occur first"));
1475            }
1476        } else {
1477            return Err(Error::InvalidData("'ftyp' box must occur first"));
1478        };
1479
1480        let mut meta = None;
1481        let mut mdat_bounds = TryVec::new();
1482        let mut animation_data: Option<ParsedAnimationData> = None;
1483
1484        while let Some(mut b) = iter.next_box()? {
1485            stop.check()?;
1486
1487            match b.head.name {
1488                BoxType::MetadataBox => {
1489                    if meta.is_some() {
1490                        return Err(Error::InvalidData(
1491                            "There should be zero or one meta boxes per ISO 14496-12:2015 § 8.11.1.1",
1492                        ));
1493                    }
1494                    meta = Some(read_avif_meta(&mut b, &parse_opts)?);
1495                }
1496                BoxType::MovieBox => {
1497                    let tracks = read_moov(&mut b)?;
1498                    if !tracks.is_empty() {
1499                        animation_data = Some(associate_tracks(tracks)?);
1500                    }
1501                }
1502                BoxType::MediaDataBox => {
1503                    if b.bytes_left() > 0 {
1504                        let offset = b.offset();
1505                        let length = b.bytes_left();
1506                        mdat_bounds.push(MdatBounds { offset, length })?;
1507                    }
1508                    // Skip the content — we'll slice into raw later
1509                    skip_box_content(&mut b)?;
1510                }
1511                _ => skip_box_content(&mut b)?,
1512            }
1513
1514            check_parser_state(&b.head, &b.content)?;
1515        }
1516
1517        // meta is required for still images, but pure AVIF sequences (avis brand)
1518        // can have only moov+mdat with no meta box.
1519        if meta.is_none() && animation_data.is_none() {
1520            return Err(Error::InvalidData("missing meta"));
1521        }
1522
1523        Ok(ParsedStructure { meta, mdat_bounds, animation_data, major_brand, compatible_brands })
1524    }
1525
1526    /// Build an AvifParser from raw bytes + parsed structure.
1527    fn build(raw: Cow<'data, [u8]>, parsed: ParsedStructure, config: &DecodeConfig) -> Result<Self> {
1528        let tracker = ResourceTracker::new(config);
1529
1530        // Store animation metadata if present
1531        let animation_data = if let Some(anim) = parsed.animation_data {
1532            tracker.validate_animation_frames(anim.color_sample_table.sample_sizes.len() as u32)?;
1533            Some(AnimationParserData {
1534                media_timescale: anim.color_timescale,
1535                sample_table: anim.color_sample_table,
1536                alpha_media_timescale: anim.alpha_timescale,
1537                alpha_sample_table: anim.alpha_sample_table,
1538                loop_count: anim.loop_count,
1539                codec_config: anim.color_codec_config,
1540            })
1541        } else {
1542            None
1543        };
1544
1545        // Pure sequence (no meta box): only animation methods will work.
1546        // Use codec config from the color track's stsd if available.
1547        let Some(meta) = parsed.meta else {
1548            let track_config = animation_data.as_ref()
1549                .map(|a| a.codec_config.clone())
1550                .unwrap_or_default();
1551            return Ok(Self {
1552                raw,
1553                mdat_bounds: parsed.mdat_bounds,
1554                idat: None,
1555                primary: ItemExtents { construction_method: ConstructionMethod::File, extents: TryVec::new() },
1556                alpha: None,
1557                grid_config: None,
1558                tiles: TryVec::new(),
1559                animation_data,
1560                premultiplied_alpha: false,
1561                av1_config: track_config.av1_config,
1562                color_info: track_config.color_info,
1563                rotation: None,
1564                mirror: None,
1565                clean_aperture: None,
1566                pixel_aspect_ratio: None,
1567                content_light_level: None,
1568                mastering_display: None,
1569                content_colour_volume: None,
1570                ambient_viewing: None,
1571                operating_point: None,
1572                layer_selector: None,
1573                layered_image_indexing: None,
1574                exif_item: None,
1575                xmp_item: None,
1576                gain_map_metadata: None,
1577                gain_map: None,
1578                gain_map_color_info: None,
1579                depth_item: None,
1580                depth_width: 0,
1581                depth_height: 0,
1582                depth_av1_config: None,
1583                depth_color_info: None,
1584                major_brand: parsed.major_brand,
1585                compatible_brands: parsed.compatible_brands,
1586            });
1587        };
1588
1589        // Get primary item extents
1590        let primary = Self::get_item_extents(&meta, meta.primary_item_id)?;
1591
1592        // Find alpha item and get its extents
1593        let alpha_item_id = meta
1594            .item_references
1595            .iter()
1596            .filter(|iref| {
1597                iref.to_item_id == meta.primary_item_id
1598                    && iref.from_item_id != meta.primary_item_id
1599                    && iref.item_type == b"auxl"
1600            })
1601            .map(|iref| iref.from_item_id)
1602            .find(|&item_id| {
1603                meta.properties.iter().any(|prop| {
1604                    prop.item_id == item_id
1605                        && match &prop.property {
1606                            ItemProperty::AuxiliaryType(urn) => {
1607                                urn.type_subtype().0 == b"urn:mpeg:mpegB:cicp:systems:auxiliary:alpha"
1608                            }
1609                            _ => false,
1610                        }
1611                })
1612            });
1613
1614        let alpha = alpha_item_id
1615            .map(|id| Self::get_item_extents(&meta, id))
1616            .transpose()?;
1617
1618        // Check for premultiplied alpha
1619        let premultiplied_alpha = alpha_item_id.is_some_and(|alpha_id| {
1620            meta.item_references.iter().any(|iref| {
1621                iref.from_item_id == meta.primary_item_id
1622                    && iref.to_item_id == alpha_id
1623                    && iref.item_type == b"prem"
1624            })
1625        });
1626
1627        // Find depth auxiliary item (auxl reference with depth auxC type)
1628        let depth_item_id = meta
1629            .item_references
1630            .iter()
1631            .filter(|iref| {
1632                iref.to_item_id == meta.primary_item_id
1633                    && iref.from_item_id != meta.primary_item_id
1634                    && iref.item_type == b"auxl"
1635            })
1636            .map(|iref| iref.from_item_id)
1637            .find(|&item_id| {
1638                // Skip the alpha item if we already found one
1639                if alpha_item_id == Some(item_id) {
1640                    return false;
1641                }
1642                meta.properties.iter().any(|prop| {
1643                    prop.item_id == item_id
1644                        && match &prop.property {
1645                            ItemProperty::AuxiliaryType(urn) => {
1646                                is_depth_auxiliary_urn(urn.type_subtype().0)
1647                            }
1648                            _ => false,
1649                        }
1650                })
1651            });
1652
1653        let (depth_item, depth_width, depth_height, depth_av1_config, depth_color_info) =
1654            if let Some(depth_id) = depth_item_id {
1655                let extents = Self::get_item_extents(&meta, depth_id)?;
1656                // Get dimensions from ispe property
1657                let dims = meta.properties.iter().find_map(|p| {
1658                    if p.item_id == depth_id {
1659                        match &p.property {
1660                            ItemProperty::ImageSpatialExtents(e) => Some((e.width, e.height)),
1661                            _ => None,
1662                        }
1663                    } else {
1664                        None
1665                    }
1666                });
1667                let (w, h) = dims.unwrap_or((0, 0));
1668                // Get av1C property
1669                let av1c = meta.properties.iter().find_map(|p| {
1670                    if p.item_id == depth_id {
1671                        match &p.property {
1672                            ItemProperty::AV1Config(c) => Some(c.clone()),
1673                            _ => None,
1674                        }
1675                    } else {
1676                        None
1677                    }
1678                });
1679                // Get colr property
1680                let colr = meta.properties.iter().find_map(|p| {
1681                    if p.item_id == depth_id {
1682                        match &p.property {
1683                            ItemProperty::ColorInformation(c) => Some(c.clone()),
1684                            _ => None,
1685                        }
1686                    } else {
1687                        None
1688                    }
1689                });
1690                (Some(extents), w, h, av1c, colr)
1691            } else {
1692                (None, 0, 0, None, None)
1693            };
1694
1695        // Find EXIF/XMP items linked via cdsc references to the primary item
1696        let mut exif_item = None;
1697        let mut xmp_item = None;
1698        for iref in meta.item_references.iter() {
1699            if iref.to_item_id != meta.primary_item_id || iref.item_type != b"cdsc" {
1700                continue;
1701            }
1702            let desc_item_id = iref.from_item_id;
1703            let Some(info) = meta.item_infos.iter().find(|i| i.item_id == desc_item_id) else {
1704                continue;
1705            };
1706            if info.item_type == b"Exif" && exif_item.is_none() {
1707                exif_item = Some(Self::get_item_extents(&meta, desc_item_id)?);
1708            } else if info.item_type == b"mime" && xmp_item.is_none() {
1709                xmp_item = Some(Self::get_item_extents(&meta, desc_item_id)?);
1710            }
1711        }
1712
1713        // Check if primary item is a grid (tiled image)
1714        let is_grid = meta
1715            .item_infos
1716            .iter()
1717            .find(|x| x.item_id == meta.primary_item_id)
1718            .is_some_and(|info| info.item_type == b"grid");
1719
1720        // Extract grid configuration and tile extents if this is a grid
1721        let (grid_config, tiles) = if is_grid {
1722            let mut tiles_with_index: TryVec<(u32, u16)> = TryVec::new();
1723            for iref in meta.item_references.iter() {
1724                if iref.from_item_id == meta.primary_item_id && iref.item_type == b"dimg" {
1725                    tiles_with_index.push((iref.to_item_id, iref.reference_index))?;
1726                }
1727            }
1728
1729            tracker.validate_grid_tiles(tiles_with_index.len() as u32)?;
1730            tiles_with_index.sort_by_key(|&(_, idx)| idx);
1731
1732            let mut tile_extents = TryVec::new();
1733            for (tile_id, _) in tiles_with_index.iter() {
1734                tile_extents.push(Self::get_item_extents(&meta, *tile_id)?)?;
1735            }
1736
1737            let mut tile_ids = TryVec::new();
1738            for (tile_id, _) in tiles_with_index.iter() {
1739                tile_ids.push(*tile_id)?;
1740            }
1741
1742            let grid_config = Self::calculate_grid_config(&meta, &tile_ids)?;
1743
1744            // AVIF 1.2: transformative properties SHALL NOT be on grid tile items
1745            for (tile_id, _) in tiles_with_index.iter() {
1746                for prop in meta.properties.iter() {
1747                    if prop.item_id == *tile_id {
1748                        match &prop.property {
1749                            ItemProperty::Rotation(_)
1750                            | ItemProperty::Mirror(_)
1751                            | ItemProperty::CleanAperture(_) => {
1752                                warn!("grid tile {} has a transformative property (irot/imir/clap), violating AVIF spec", tile_id);
1753                            }
1754                            _ => {}
1755                        }
1756                    }
1757                }
1758            }
1759
1760            (Some(grid_config), tile_extents)
1761        } else {
1762            (None, TryVec::new())
1763        };
1764
1765        // Detect gain map (tmap derived image item)
1766        let (gain_map_metadata, gain_map, gain_map_color_info) = {
1767            let tmap_item = meta.item_infos.iter()
1768                .find(|info| info.item_type == b"tmap");
1769
1770            if let Some(tmap_info) = tmap_item {
1771                let tmap_id = tmap_info.item_id;
1772
1773                // Find dimg references FROM tmap TO its inputs
1774                let mut inputs: TryVec<(u32, u16)> = TryVec::new();
1775                for iref in meta.item_references.iter() {
1776                    if iref.from_item_id == tmap_id && iref.item_type == b"dimg" {
1777                        inputs.push((iref.to_item_id, iref.reference_index))?;
1778                    }
1779                }
1780                inputs.sort_by_key(|&(_, idx)| idx);
1781
1782                if inputs.len() >= 2 {
1783                    let base_item_id = inputs[0].0;
1784                    let gmap_item_id = inputs[1].0;
1785
1786                    if base_item_id == meta.primary_item_id {
1787                        // Read tmap item's data payload (ToneMapImage)
1788                        let tmap_extents = Self::get_item_extents(&meta, tmap_id)?;
1789                        let tmap_data = Self::resolve_extents_from_raw(
1790                            raw.as_ref(), &parsed.mdat_bounds, &tmap_extents,
1791                        )?;
1792                        let metadata = parse_tone_map_image(&tmap_data)?;
1793
1794                        // Get gain map image extents
1795                        let gmap_extents = Self::get_item_extents(&meta, gmap_item_id)?;
1796
1797                        // Get alternate color info from tmap item's properties
1798                        let alt_color = meta.properties.iter().find_map(|p| {
1799                            if p.item_id == tmap_id {
1800                                match &p.property {
1801                                    ItemProperty::ColorInformation(c) => Some(c.clone()),
1802                                    _ => None,
1803                                }
1804                            } else {
1805                                None
1806                            }
1807                        });
1808
1809                        (Some(metadata), Some(gmap_extents), alt_color)
1810                    } else {
1811                        (None, None, None)
1812                    }
1813                } else {
1814                    (None, None, None)
1815                }
1816            } else {
1817                (None, None, None)
1818            }
1819        };
1820
1821        // Extract properties for the primary item
1822        macro_rules! find_prop {
1823            ($variant:ident) => {
1824                meta.properties.iter().find_map(|p| {
1825                    if p.item_id == meta.primary_item_id {
1826                        match &p.property {
1827                            ItemProperty::$variant(c) => Some(c.clone()),
1828                            _ => None,
1829                        }
1830                    } else {
1831                        None
1832                    }
1833                })
1834            };
1835        }
1836
1837        let track_config = animation_data.as_ref().map(|a| &a.codec_config);
1838        let av1_config = find_prop!(AV1Config)
1839            .or_else(|| track_config.and_then(|c| c.av1_config.clone()));
1840        let color_info = find_prop!(ColorInformation)
1841            .or_else(|| track_config.and_then(|c| c.color_info.clone()));
1842        let rotation = find_prop!(Rotation);
1843        let mirror = find_prop!(Mirror);
1844        let clean_aperture = find_prop!(CleanAperture);
1845        let pixel_aspect_ratio = find_prop!(PixelAspectRatio);
1846        let content_light_level = find_prop!(ContentLightLevel);
1847        let mastering_display = find_prop!(MasteringDisplayColourVolume);
1848        let content_colour_volume = find_prop!(ContentColourVolume);
1849        let ambient_viewing = find_prop!(AmbientViewingEnvironment);
1850        let operating_point = find_prop!(OperatingPointSelector);
1851        let layer_selector = find_prop!(LayerSelector);
1852        let layered_image_indexing = find_prop!(AV1LayeredImageIndexing);
1853
1854        // Clone idat
1855        let idat = if let Some(ref idat_data) = meta.idat {
1856            let mut cloned = TryVec::new();
1857            cloned.extend_from_slice(idat_data)?;
1858            Some(cloned)
1859        } else {
1860            None
1861        };
1862
1863        Ok(Self {
1864            raw,
1865            mdat_bounds: parsed.mdat_bounds,
1866            idat,
1867            primary,
1868            alpha,
1869            grid_config,
1870            tiles,
1871            animation_data,
1872            premultiplied_alpha,
1873            av1_config,
1874            color_info,
1875            rotation,
1876            mirror,
1877            clean_aperture,
1878            pixel_aspect_ratio,
1879            content_light_level,
1880            mastering_display,
1881            content_colour_volume,
1882            ambient_viewing,
1883            operating_point,
1884            layer_selector,
1885            layered_image_indexing,
1886            exif_item,
1887            xmp_item,
1888            gain_map_metadata,
1889            gain_map,
1890            gain_map_color_info,
1891            depth_item,
1892            depth_width,
1893            depth_height,
1894            depth_av1_config,
1895            depth_color_info,
1896            major_brand: parsed.major_brand,
1897            compatible_brands: parsed.compatible_brands,
1898        })
1899    }
1900
1901    // ========================================
1902    // Internal helpers
1903    // ========================================
1904
1905    /// Get item extents (construction method + ranges) from metadata.
1906    fn get_item_extents(meta: &AvifInternalMeta, item_id: u32) -> Result<ItemExtents> {
1907        let item = meta
1908            .iloc_items
1909            .iter()
1910            .find(|item| item.item_id == item_id)
1911            .ok_or(Error::InvalidData("item not found in iloc"))?;
1912
1913        let mut extents = TryVec::new();
1914        for extent in &item.extents {
1915            extents.push(extent.extent_range.clone())?;
1916        }
1917        Ok(ItemExtents {
1918            construction_method: item.construction_method,
1919            extents,
1920        })
1921    }
1922
1923    /// Resolve file-based item extents from a raw buffer during `build()`,
1924    /// before `self` exists. Returns owned data (small payloads like tmap).
1925    fn resolve_extents_from_raw(
1926        raw: &[u8],
1927        mdat_bounds: &[MdatBounds],
1928        item: &ItemExtents,
1929    ) -> Result<std::vec::Vec<u8>> {
1930        if item.construction_method != ConstructionMethod::File {
1931            return Err(Error::Unsupported("tmap item must use file construction method"));
1932        }
1933        let mut data = std::vec::Vec::new();
1934        for extent in &item.extents {
1935            let file_offset = extent.start();
1936            let start = usize::try_from(file_offset)?;
1937            let end = match extent {
1938                ExtentRange::WithLength(range) => {
1939                    let len = range.end.checked_sub(range.start)
1940                        .ok_or(Error::InvalidData("extent range start > end"))?;
1941                    start.checked_add(usize::try_from(len)?)
1942                        .ok_or(Error::InvalidData("extent end overflow"))?
1943                }
1944                ExtentRange::ToEnd(_) => {
1945                    // Find the mdat that contains this offset
1946                    let mut found_end = raw.len();
1947                    for mdat in mdat_bounds {
1948                        if file_offset >= mdat.offset && file_offset < mdat.offset + mdat.length {
1949                            found_end = usize::try_from(mdat.offset + mdat.length)?;
1950                            break;
1951                        }
1952                    }
1953                    found_end
1954                }
1955            };
1956            let slice = raw.get(start..end)
1957                .ok_or(Error::InvalidData("tmap extent out of bounds"))?;
1958            data.extend_from_slice(slice);
1959        }
1960        Ok(data)
1961    }
1962
1963    /// Resolve an item's data from the raw buffer, returning `Cow::Borrowed`
1964    /// for single-extent file items and `Cow::Owned` for multi-extent or idat.
1965    fn resolve_item(&self, item: &ItemExtents) -> Result<Cow<'_, [u8]>> {
1966        match item.construction_method {
1967            ConstructionMethod::Idat => self.resolve_idat_extents(&item.extents),
1968            ConstructionMethod::File => self.resolve_file_extents(&item.extents),
1969            ConstructionMethod::Item => Err(Error::Unsupported("construction_method 'item' not supported")),
1970        }
1971    }
1972
1973    /// Resolve file-based extents from the raw buffer.
1974    fn resolve_file_extents(&self, extents: &[ExtentRange]) -> Result<Cow<'_, [u8]>> {
1975        let raw = self.raw.as_ref();
1976
1977        // Fast path: single extent → borrow directly from raw
1978        if extents.len() == 1 {
1979            let extent = &extents[0];
1980            let (start, end) = self.extent_byte_range(extent)?;
1981            let slice = raw.get(start..end).ok_or(Error::InvalidData("extent out of bounds in raw buffer"))?;
1982            return Ok(Cow::Borrowed(slice));
1983        }
1984
1985        // Multi-extent: concatenate into owned buffer
1986        let mut data = TryVec::new();
1987        for extent in extents {
1988            let (start, end) = self.extent_byte_range(extent)?;
1989            let slice = raw.get(start..end).ok_or(Error::InvalidData("extent out of bounds in raw buffer"))?;
1990            data.extend_from_slice(slice)?;
1991        }
1992        Ok(Cow::Owned(data.into_iter().collect()))
1993    }
1994
1995    /// Convert an ExtentRange to a (start, end) byte range within the raw buffer.
1996    fn extent_byte_range(&self, extent: &ExtentRange) -> Result<(usize, usize)> {
1997        let file_offset = extent.start();
1998        let start = usize::try_from(file_offset)?;
1999
2000        match extent {
2001            ExtentRange::WithLength(range) => {
2002                let len = range.end.checked_sub(range.start)
2003                    .ok_or(Error::InvalidData("extent range start > end"))?;
2004                let end = start.checked_add(usize::try_from(len)?)
2005                    .ok_or(Error::InvalidData("extent end overflow"))?;
2006                Ok((start, end))
2007            }
2008            ExtentRange::ToEnd(_) => {
2009                // Find the mdat that contains this offset and use its bounds
2010                for mdat in &self.mdat_bounds {
2011                    if file_offset >= mdat.offset && file_offset < mdat.offset + mdat.length {
2012                        let end = usize::try_from(mdat.offset + mdat.length)?;
2013                        return Ok((start, end));
2014                    }
2015                }
2016                // Fall back to end of raw buffer
2017                Ok((start, self.raw.len()))
2018            }
2019        }
2020    }
2021
2022    /// Resolve idat-based extents.
2023    fn resolve_idat_extents(&self, extents: &[ExtentRange]) -> Result<Cow<'_, [u8]>> {
2024        let idat_data = self.idat.as_ref()
2025            .ok_or(Error::InvalidData("idat box missing but construction_method is Idat"))?;
2026
2027        if extents.len() == 1 {
2028            let extent = &extents[0];
2029            let start = usize::try_from(extent.start())?;
2030            let slice = match extent {
2031                ExtentRange::WithLength(range) => {
2032                    let len = usize::try_from(range.end - range.start)?;
2033                    idat_data.get(start..start + len)
2034                        .ok_or(Error::InvalidData("idat extent out of bounds"))?
2035                }
2036                ExtentRange::ToEnd(_) => {
2037                    idat_data.get(start..)
2038                        .ok_or(Error::InvalidData("idat extent out of bounds"))?
2039                }
2040            };
2041            return Ok(Cow::Borrowed(slice));
2042        }
2043
2044        // Multi-extent idat: concatenate
2045        let mut data = TryVec::new();
2046        for extent in extents {
2047            let start = usize::try_from(extent.start())?;
2048            let slice = match extent {
2049                ExtentRange::WithLength(range) => {
2050                    let len = usize::try_from(range.end - range.start)?;
2051                    idat_data.get(start..start + len)
2052                        .ok_or(Error::InvalidData("idat extent out of bounds"))?
2053                }
2054                ExtentRange::ToEnd(_) => {
2055                    idat_data.get(start..)
2056                        .ok_or(Error::InvalidData("idat extent out of bounds"))?
2057                }
2058            };
2059            data.extend_from_slice(slice)?;
2060        }
2061        Ok(Cow::Owned(data.into_iter().collect()))
2062    }
2063
2064    /// Resolve a single animation frame from the raw buffer.
2065    fn resolve_frame(&self, index: usize) -> Result<FrameRef<'_>> {
2066        let anim = self.animation_data.as_ref()
2067            .ok_or(Error::InvalidData("not an animated AVIF"))?;
2068
2069        if index >= anim.sample_table.sample_sizes.len() {
2070            return Err(Error::InvalidData("frame index out of bounds"));
2071        }
2072
2073        let duration_ms = self.calculate_frame_duration(&anim.sample_table, anim.media_timescale, index)?;
2074        let (offset, size) = self.calculate_sample_location(&anim.sample_table, index)?;
2075
2076        let start = usize::try_from(offset)?;
2077        let end = start.checked_add(size as usize)
2078            .ok_or(Error::InvalidData("frame end overflow"))?;
2079
2080        let raw = self.raw.as_ref();
2081        let slice = raw.get(start..end)
2082            .ok_or(Error::InvalidData("frame not found in raw buffer"))?;
2083
2084        // Resolve alpha frame if alpha track exists and has this index
2085        let alpha_data = if let Some(ref alpha_st) = anim.alpha_sample_table {
2086            let alpha_timescale = anim.alpha_media_timescale.unwrap_or(anim.media_timescale);
2087            if index < alpha_st.sample_sizes.len() {
2088                let (a_offset, a_size) = self.calculate_sample_location(alpha_st, index)?;
2089                let a_start = usize::try_from(a_offset)?;
2090                let a_end = a_start.checked_add(a_size as usize)
2091                    .ok_or(Error::InvalidData("alpha frame end overflow"))?;
2092                let a_slice = raw.get(a_start..a_end)
2093                    .ok_or(Error::InvalidData("alpha frame not found in raw buffer"))?;
2094                let _ = alpha_timescale; // timescale used for duration, which comes from color track
2095                Some(Cow::Borrowed(a_slice))
2096            } else {
2097                warn!("alpha track has fewer frames than color track (index {})", index);
2098                None
2099            }
2100        } else {
2101            None
2102        };
2103
2104        Ok(FrameRef {
2105            data: Cow::Borrowed(slice),
2106            alpha_data,
2107            duration_ms,
2108        })
2109    }
2110
2111    /// Calculate grid configuration from metadata.
2112    fn calculate_grid_config(meta: &AvifInternalMeta, tile_ids: &[u32]) -> Result<GridConfig> {
2113        // Try explicit grid property first
2114        for prop in &meta.properties {
2115            if prop.item_id == meta.primary_item_id
2116                && let ItemProperty::ImageGrid(grid) = &prop.property {
2117                    return Ok(grid.clone());
2118                }
2119        }
2120
2121        // Fall back to ispe calculation
2122        let grid_dims = meta
2123            .properties
2124            .iter()
2125            .find(|p| p.item_id == meta.primary_item_id)
2126            .and_then(|p| match &p.property {
2127                ItemProperty::ImageSpatialExtents(e) => Some(e),
2128                _ => None,
2129            });
2130
2131        let tile_dims = tile_ids.first().and_then(|&tile_id| {
2132            meta.properties
2133                .iter()
2134                .find(|p| p.item_id == tile_id)
2135                .and_then(|p| match &p.property {
2136                    ItemProperty::ImageSpatialExtents(e) => Some(e),
2137                    _ => None,
2138                })
2139        });
2140
2141        if let (Some(grid), Some(tile)) = (grid_dims, tile_dims)
2142            && tile.width != 0
2143                && tile.height != 0
2144                && grid.width % tile.width == 0
2145                && grid.height % tile.height == 0
2146            {
2147                let columns = grid.width / tile.width;
2148                let rows = grid.height / tile.height;
2149
2150                if columns <= 255 && rows <= 255 {
2151                    return Ok(GridConfig {
2152                        rows: rows as u8,
2153                        columns: columns as u8,
2154                        output_width: grid.width,
2155                        output_height: grid.height,
2156                    });
2157                }
2158            }
2159
2160        let tile_count = tile_ids.len();
2161        Ok(GridConfig {
2162            rows: tile_count.min(255) as u8,
2163            columns: 1,
2164            output_width: 0,
2165            output_height: 0,
2166        })
2167    }
2168
2169    /// Calculate frame duration from sample table.
2170    fn calculate_frame_duration(
2171        &self,
2172        st: &SampleTable,
2173        timescale: u32,
2174        index: usize,
2175    ) -> Result<u32> {
2176        let mut current_sample = 0;
2177        for entry in &st.time_to_sample {
2178            if current_sample + entry.sample_count as usize > index {
2179                let duration_ms = if timescale > 0 {
2180                    ((entry.sample_delta as u64) * 1000) / (timescale as u64)
2181                } else {
2182                    0
2183                };
2184                return Ok(u32::try_from(duration_ms).unwrap_or(u32::MAX));
2185            }
2186            current_sample += entry.sample_count as usize;
2187        }
2188        Ok(0)
2189    }
2190
2191    /// Look up precomputed sample location (offset and size) from sample table.
2192    fn calculate_sample_location(&self, st: &SampleTable, index: usize) -> Result<(u64, u32)> {
2193        let offset = *st
2194            .sample_offsets
2195            .get(index)
2196            .ok_or(Error::InvalidData("sample index out of bounds"))?;
2197        let size = *st
2198            .sample_sizes
2199            .get(index)
2200            .ok_or(Error::InvalidData("sample index out of bounds"))?;
2201        Ok((offset, size))
2202    }
2203
2204    // ========================================
2205    // Public data access API (one way each)
2206    // ========================================
2207
2208    /// Get primary item data.
2209    ///
2210    /// Returns `Cow::Borrowed` for single-extent items, `Cow::Owned` for multi-extent.
2211    pub fn primary_data(&self) -> Result<Cow<'_, [u8]>> {
2212        self.resolve_item(&self.primary)
2213    }
2214
2215    /// Get alpha item data, if present.
2216    pub fn alpha_data(&self) -> Option<Result<Cow<'_, [u8]>>> {
2217        self.alpha.as_ref().map(|item| self.resolve_item(item))
2218    }
2219
2220    /// Get grid tile data by index.
2221    pub fn tile_data(&self, index: usize) -> Result<Cow<'_, [u8]>> {
2222        let item = self.tiles.get(index)
2223            .ok_or(Error::InvalidData("tile index out of bounds"))?;
2224        self.resolve_item(item)
2225    }
2226
2227    /// Get a single animation frame by index.
2228    pub fn frame(&self, index: usize) -> Result<FrameRef<'_>> {
2229        self.resolve_frame(index)
2230    }
2231
2232    /// Iterate over all animation frames.
2233    pub fn frames(&self) -> FrameIterator<'_> {
2234        let count = self
2235            .animation_info()
2236            .map(|info| info.frame_count)
2237            .unwrap_or(0);
2238        FrameIterator { parser: self, index: 0, count }
2239    }
2240
2241    // ========================================
2242    // Metadata (no data access)
2243    // ========================================
2244
2245    /// Get animation metadata (if animated).
2246    pub fn animation_info(&self) -> Option<AnimationInfo> {
2247        self.animation_data.as_ref().map(|data| AnimationInfo {
2248            frame_count: data.sample_table.sample_sizes.len(),
2249            loop_count: data.loop_count,
2250            has_alpha: data.alpha_sample_table.is_some(),
2251            timescale: data.media_timescale,
2252        })
2253    }
2254
2255    /// Get grid configuration (if grid image).
2256    pub fn grid_config(&self) -> Option<&GridConfig> {
2257        self.grid_config.as_ref()
2258    }
2259
2260    /// Get number of grid tiles.
2261    pub fn grid_tile_count(&self) -> usize {
2262        self.tiles.len()
2263    }
2264
2265    /// Check if alpha channel uses premultiplied alpha.
2266    pub fn premultiplied_alpha(&self) -> bool {
2267        self.premultiplied_alpha
2268    }
2269
2270    /// Get the AV1 codec configuration for the primary item, if present.
2271    ///
2272    /// This is parsed from the `av1C` property box in the container.
2273    pub fn av1_config(&self) -> Option<&AV1Config> {
2274        self.av1_config.as_ref()
2275    }
2276
2277    /// Get colour information for the primary item, if present.
2278    ///
2279    /// This is parsed from the `colr` property box in the container.
2280    /// For CICP/nclx values, this is the authoritative source and may
2281    /// differ from values in the AV1 bitstream sequence header.
2282    pub fn color_info(&self) -> Option<&ColorInformation> {
2283        self.color_info.as_ref()
2284    }
2285
2286    /// Get rotation for the primary item, if present.
2287    pub fn rotation(&self) -> Option<&ImageRotation> {
2288        self.rotation.as_ref()
2289    }
2290
2291    /// Get mirror for the primary item, if present.
2292    pub fn mirror(&self) -> Option<&ImageMirror> {
2293        self.mirror.as_ref()
2294    }
2295
2296    /// Get clean aperture (crop) for the primary item, if present.
2297    pub fn clean_aperture(&self) -> Option<&CleanAperture> {
2298        self.clean_aperture.as_ref()
2299    }
2300
2301    /// Get pixel aspect ratio for the primary item, if present.
2302    pub fn pixel_aspect_ratio(&self) -> Option<&PixelAspectRatio> {
2303        self.pixel_aspect_ratio.as_ref()
2304    }
2305
2306    /// Get content light level info for the primary item, if present.
2307    pub fn content_light_level(&self) -> Option<&ContentLightLevel> {
2308        self.content_light_level.as_ref()
2309    }
2310
2311    /// Get mastering display colour volume for the primary item, if present.
2312    pub fn mastering_display(&self) -> Option<&MasteringDisplayColourVolume> {
2313        self.mastering_display.as_ref()
2314    }
2315
2316    /// Get content colour volume for the primary item, if present.
2317    pub fn content_colour_volume(&self) -> Option<&ContentColourVolume> {
2318        self.content_colour_volume.as_ref()
2319    }
2320
2321    /// Get ambient viewing environment for the primary item, if present.
2322    pub fn ambient_viewing(&self) -> Option<&AmbientViewingEnvironment> {
2323        self.ambient_viewing.as_ref()
2324    }
2325
2326    /// Get operating point selector for the primary item, if present.
2327    pub fn operating_point(&self) -> Option<&OperatingPointSelector> {
2328        self.operating_point.as_ref()
2329    }
2330
2331    /// Get layer selector for the primary item, if present.
2332    pub fn layer_selector(&self) -> Option<&LayerSelector> {
2333        self.layer_selector.as_ref()
2334    }
2335
2336    /// Get AV1 layered image indexing for the primary item, if present.
2337    pub fn layered_image_indexing(&self) -> Option<&AV1LayeredImageIndexing> {
2338        self.layered_image_indexing.as_ref()
2339    }
2340
2341    /// Get EXIF metadata for the primary item, if present.
2342    ///
2343    /// Returns raw EXIF data (TIFF header onwards), with the 4-byte AVIF offset prefix stripped.
2344    pub fn exif(&self) -> Option<Result<Cow<'_, [u8]>>> {
2345        self.exif_item.as_ref().map(|item| {
2346            let raw = self.resolve_item(item)?;
2347            // AVIF EXIF items start with a 4-byte big-endian offset to the TIFF header
2348            if raw.len() <= 4 {
2349                return Err(Error::InvalidData("EXIF item too short"));
2350            }
2351            let offset = u32::from_be_bytes([raw[0], raw[1], raw[2], raw[3]]) as usize;
2352            let start = 4 + offset;
2353            if start >= raw.len() {
2354                return Err(Error::InvalidData("EXIF offset exceeds item size"));
2355            }
2356            match raw {
2357                Cow::Borrowed(slice) => Ok(Cow::Borrowed(&slice[start..])),
2358                Cow::Owned(vec) => Ok(Cow::Owned(vec[start..].to_vec())),
2359            }
2360        })
2361    }
2362
2363    /// Get XMP metadata for the primary item, if present.
2364    ///
2365    /// Returns raw XMP/XML data.
2366    pub fn xmp(&self) -> Option<Result<Cow<'_, [u8]>>> {
2367        self.xmp_item.as_ref().map(|item| self.resolve_item(item))
2368    }
2369
2370    /// Gain map metadata, if a `tmap` derived image item is present.
2371    ///
2372    /// Describes how to apply a gain map to reconstruct an HDR rendition
2373    /// from the SDR base image. See ISO 21496-1.
2374    pub fn gain_map_metadata(&self) -> Option<&GainMapMetadata> {
2375        self.gain_map_metadata.as_ref()
2376    }
2377
2378    /// Gain map image data (AV1-encoded), if present.
2379    pub fn gain_map_data(&self) -> Option<Result<Cow<'_, [u8]>>> {
2380        self.gain_map.as_ref().map(|item| self.resolve_item(item))
2381    }
2382
2383    /// Color information for the alternate (typically HDR) rendition.
2384    ///
2385    /// This comes from the `tmap` item's `colr` property and describes
2386    /// the colour space of the tone-mapped output.
2387    pub fn gain_map_color_info(&self) -> Option<&ColorInformation> {
2388        self.gain_map_color_info.as_ref()
2389    }
2390
2391    /// Get the full gain map bundle, if a `tmap` derived image item is present.
2392    ///
2393    /// Returns [`AvifGainMap`] containing metadata, raw AV1 gain map data,
2394    /// and alternate rendition color info. Returns `None` if no gain map
2395    /// is present, or `Some(Err(..))` if the gain map data cannot be resolved.
2396    pub fn gain_map(&self) -> Option<Result<AvifGainMap>> {
2397        let metadata = self.gain_map_metadata.as_ref()?.clone();
2398        let data_extents = self.gain_map.as_ref()?;
2399        let alt_color_info = self.gain_map_color_info.clone();
2400
2401        Some(self.resolve_item(data_extents).map(|data| AvifGainMap {
2402            metadata,
2403            gain_map_data: data.into_owned(),
2404            alt_color_info,
2405        }))
2406    }
2407
2408    /// Check if a depth auxiliary image is present.
2409    ///
2410    /// Returns `true` if the AVIF container has an `auxl`-linked item with
2411    /// a depth auxiliary type URN.
2412    pub fn has_depth_map(&self) -> bool {
2413        self.depth_item.is_some()
2414    }
2415
2416    /// Get the raw AV1 bitstream of the depth auxiliary image, if present.
2417    pub fn depth_map_data(&self) -> Option<Result<Cow<'_, [u8]>>> {
2418        self.depth_item.as_ref().map(|item| self.resolve_item(item))
2419    }
2420
2421    /// Get the full depth map bundle, if a depth auxiliary image is present.
2422    ///
2423    /// Returns [`AvifDepthMap`] containing the raw AV1 depth image data,
2424    /// dimensions, codec config, and color info. Returns `None` if no depth
2425    /// auxiliary is present, or `Some(Err(..))` if the data cannot be resolved.
2426    ///
2427    /// # Example
2428    ///
2429    /// ```no_run
2430    /// let bytes = std::fs::read("portrait.avif").unwrap();
2431    /// let parser = zenavif_parse::AvifParser::from_bytes(&bytes).unwrap();
2432    /// if let Some(Ok(dm)) = parser.depth_map() {
2433    ///     println!("Depth: {}x{}, {} bytes", dm.width, dm.height, dm.data.len());
2434    /// }
2435    /// ```
2436    pub fn depth_map(&self) -> Option<Result<AvifDepthMap>> {
2437        let data_extents = self.depth_item.as_ref()?;
2438        let av1_config = self.depth_av1_config.clone();
2439        let color_info = self.depth_color_info.clone();
2440        let width = self.depth_width;
2441        let height = self.depth_height;
2442
2443        Some(self.resolve_item(data_extents).map(|data| AvifDepthMap {
2444            data: data.into_owned(),
2445            width,
2446            height,
2447            av1_config,
2448            color_info,
2449        }))
2450    }
2451
2452    /// Get the major brand from the `ftyp` box (e.g., `*b"avif"` or `*b"avis"`).
2453    pub fn major_brand(&self) -> &[u8; 4] {
2454        &self.major_brand
2455    }
2456
2457    /// Get the compatible brands from the `ftyp` box.
2458    pub fn compatible_brands(&self) -> &[[u8; 4]] {
2459        &self.compatible_brands
2460    }
2461
2462    /// Parse AV1 metadata from the primary item.
2463    pub fn primary_metadata(&self) -> Result<AV1Metadata> {
2464        let data = self.primary_data()?;
2465        AV1Metadata::parse_av1_bitstream(&data)
2466    }
2467
2468    /// Parse AV1 metadata from the alpha item, if present.
2469    pub fn alpha_metadata(&self) -> Option<Result<AV1Metadata>> {
2470        self.alpha.as_ref().map(|item| {
2471            let data = self.resolve_item(item)?;
2472            AV1Metadata::parse_av1_bitstream(&data)
2473        })
2474    }
2475
2476    // ========================================
2477    // Conversion
2478    // ========================================
2479
2480    /// Convert to [`AvifData`] (eagerly loads all frames and tiles).
2481    ///
2482    /// Provided for migration from the eager API. Prefer using `AvifParser`
2483    /// methods directly.
2484    #[cfg(feature = "eager")]
2485    #[deprecated(since = "1.5.0", note = "Use AvifParser methods directly instead of converting to AvifData")]
2486    #[allow(deprecated)]
2487    pub fn to_avif_data(&self) -> Result<AvifData> {
2488        let primary_data = self.primary_data()?;
2489        let mut primary_item = TryVec::new();
2490        primary_item.extend_from_slice(&primary_data)?;
2491
2492        let alpha_item = match self.alpha_data() {
2493            Some(Ok(data)) => {
2494                let mut v = TryVec::new();
2495                v.extend_from_slice(&data)?;
2496                Some(v)
2497            }
2498            Some(Err(e)) => return Err(e),
2499            None => None,
2500        };
2501
2502        let mut grid_tiles = TryVec::new();
2503        for i in 0..self.grid_tile_count() {
2504            let data = self.tile_data(i)?;
2505            let mut v = TryVec::new();
2506            v.extend_from_slice(&data)?;
2507            grid_tiles.push(v)?;
2508        }
2509
2510        let animation = if let Some(info) = self.animation_info() {
2511            let mut frames = TryVec::new();
2512            for i in 0..info.frame_count {
2513                let frame_ref = self.frame(i)?;
2514                let mut data = TryVec::new();
2515                data.extend_from_slice(&frame_ref.data)?;
2516                frames.push(AnimationFrame { data, duration_ms: frame_ref.duration_ms })?;
2517            }
2518            Some(AnimationConfig {
2519                loop_count: info.loop_count,
2520                frames,
2521            })
2522        } else {
2523            None
2524        };
2525
2526        Ok(AvifData {
2527            primary_item,
2528            alpha_item,
2529            premultiplied_alpha: self.premultiplied_alpha,
2530            grid_config: self.grid_config.clone(),
2531            grid_tiles,
2532            animation,
2533            av1_config: self.av1_config.clone(),
2534            color_info: self.color_info.clone(),
2535            rotation: self.rotation,
2536            mirror: self.mirror,
2537            clean_aperture: self.clean_aperture,
2538            pixel_aspect_ratio: self.pixel_aspect_ratio,
2539            content_light_level: self.content_light_level,
2540            mastering_display: self.mastering_display,
2541            content_colour_volume: self.content_colour_volume,
2542            ambient_viewing: self.ambient_viewing,
2543            operating_point: self.operating_point,
2544            layer_selector: self.layer_selector,
2545            layered_image_indexing: self.layered_image_indexing,
2546            exif: self.exif().and_then(|r| r.ok()).map(|c| {
2547                let mut v = TryVec::new();
2548                let _ = v.extend_from_slice(&c);
2549                v
2550            }),
2551            xmp: self.xmp().and_then(|r| r.ok()).map(|c| {
2552                let mut v = TryVec::new();
2553                let _ = v.extend_from_slice(&c);
2554                v
2555            }),
2556            gain_map_metadata: self.gain_map_metadata.clone(),
2557            gain_map_item: self.gain_map_data().and_then(|r| r.ok()).map(|c| {
2558                let mut v = TryVec::new();
2559                let _ = v.extend_from_slice(&c);
2560                v
2561            }),
2562            gain_map_color_info: self.gain_map_color_info.clone(),
2563            depth_item: self.depth_map_data().and_then(|r| r.ok()).map(|c| {
2564                let mut v = TryVec::new();
2565                let _ = v.extend_from_slice(&c);
2566                v
2567            }),
2568            depth_width: self.depth_width,
2569            depth_height: self.depth_height,
2570            depth_av1_config: self.depth_av1_config.clone(),
2571            depth_color_info: self.depth_color_info.clone(),
2572            major_brand: self.major_brand,
2573            compatible_brands: self.compatible_brands.clone(),
2574        })
2575    }
2576}
2577
2578/// Iterator over animation frames.
2579///
2580/// Created by [`AvifParser::frames()`]. Yields [`FrameRef`] on demand.
2581pub struct FrameIterator<'a> {
2582    parser: &'a AvifParser<'a>,
2583    index: usize,
2584    count: usize,
2585}
2586
2587impl<'a> Iterator for FrameIterator<'a> {
2588    type Item = Result<FrameRef<'a>>;
2589
2590    fn next(&mut self) -> Option<Self::Item> {
2591        if self.index >= self.count {
2592            return None;
2593        }
2594        let result = self.parser.frame(self.index);
2595        self.index += 1;
2596        Some(result)
2597    }
2598
2599    fn size_hint(&self) -> (usize, Option<usize>) {
2600        let remaining = self.count.saturating_sub(self.index);
2601        (remaining, Some(remaining))
2602    }
2603}
2604
2605impl ExactSizeIterator for FrameIterator<'_> {
2606    fn len(&self) -> usize {
2607        self.count.saturating_sub(self.index)
2608    }
2609}
2610
2611struct AvifInternalMeta {
2612    item_references: TryVec<SingleItemTypeReferenceBox>,
2613    properties: TryVec<AssociatedProperty>,
2614    primary_item_id: u32,
2615    iloc_items: TryVec<ItemLocationBoxItem>,
2616    item_infos: TryVec<ItemInfoEntry>,
2617    idat: Option<TryVec<u8>>,
2618    #[allow(dead_code)] // Parsed for future altr group support
2619    entity_groups: TryVec<EntityGroup>,
2620}
2621
2622/// A Media Data Box
2623/// See ISO 14496-12:2015 § 8.1.1
2624#[cfg(feature = "eager")]
2625struct MediaDataBox {
2626    /// Offset of `data` from the beginning of the file. See `ConstructionMethod::File`
2627    offset: u64,
2628    data: TryVec<u8>,
2629}
2630
2631#[cfg(feature = "eager")]
2632impl MediaDataBox {
2633    /// Check whether the beginning of `extent` is within the bounds of the `MediaDataBox`.
2634    /// We assume extents to not cross box boundaries. If so, this will cause an error
2635    /// in `read_extent`.
2636    fn contains_extent(&self, extent: &ExtentRange) -> bool {
2637        if self.offset <= extent.start() {
2638            let start_offset = extent.start() - self.offset;
2639            start_offset < self.data.len().to_u64()
2640        } else {
2641            false
2642        }
2643    }
2644
2645    /// Check whether `extent` covers the `MediaDataBox` exactly.
2646    fn matches_extent(&self, extent: &ExtentRange) -> bool {
2647        if self.offset == extent.start() {
2648            match extent {
2649                ExtentRange::WithLength(range) => {
2650                    if let Some(end) = self.offset.checked_add(self.data.len().to_u64()) {
2651                        end == range.end
2652                    } else {
2653                        false
2654                    }
2655                },
2656                ExtentRange::ToEnd(_) => true,
2657            }
2658        } else {
2659            false
2660        }
2661    }
2662
2663    /// Copy the range specified by `extent` to the end of `buf` or return an error if the range
2664    /// is not fully contained within `MediaDataBox`.
2665    fn read_extent(&self, extent: &ExtentRange, buf: &mut TryVec<u8>) -> Result<()> {
2666        let start_offset = extent
2667            .start()
2668            .checked_sub(self.offset)
2669            .ok_or(Error::InvalidData("mdat does not contain extent"))?;
2670        let slice = match extent {
2671            ExtentRange::WithLength(range) => {
2672                let range_len = range
2673                    .end
2674                    .checked_sub(range.start)
2675                    .ok_or(Error::InvalidData("range start > end"))?;
2676                let end = start_offset
2677                    .checked_add(range_len)
2678                    .ok_or(Error::InvalidData("extent end overflow"))?;
2679                self.data.get(start_offset.try_into()?..end.try_into()?)
2680            },
2681            ExtentRange::ToEnd(_) => self.data.get(start_offset.try_into()?..),
2682        };
2683        let slice = slice.ok_or(Error::InvalidData("extent crosses box boundary"))?;
2684        buf.extend_from_slice(slice)?;
2685        Ok(())
2686    }
2687
2688}
2689
2690/// Used for 'infe' boxes within 'iinf' boxes
2691/// See ISO 14496-12:2015 § 8.11.6
2692/// Only versions {2, 3} are supported
2693#[derive(Debug)]
2694struct ItemInfoEntry {
2695    item_id: u32,
2696    item_type: FourCC,
2697}
2698
2699/// See ISO 14496-12:2015 § 8.11.12
2700#[derive(Debug)]
2701struct SingleItemTypeReferenceBox {
2702    item_type: FourCC,
2703    from_item_id: u32,
2704    to_item_id: u32,
2705    /// Index of this reference within the list of references of the same type from the same item
2706    /// (0-based). This is the dimgIdx for grid tiles.
2707    reference_index: u16,
2708}
2709
2710/// Potential sizes (in bytes) of variable-sized fields of the 'iloc' box
2711/// See ISO 14496-12:2015 § 8.11.3
2712#[derive(Debug)]
2713enum IlocFieldSize {
2714    Zero,
2715    Four,
2716    Eight,
2717}
2718
2719impl IlocFieldSize {
2720    const fn to_bits(&self) -> u8 {
2721        match self {
2722            Self::Zero => 0,
2723            Self::Four => 32,
2724            Self::Eight => 64,
2725        }
2726    }
2727}
2728
2729impl TryFrom<u8> for IlocFieldSize {
2730    type Error = Error;
2731
2732    fn try_from(value: u8) -> Result<Self> {
2733        match value {
2734            0 => Ok(Self::Zero),
2735            4 => Ok(Self::Four),
2736            8 => Ok(Self::Eight),
2737            _ => Err(Error::InvalidData("value must be in the set {0, 4, 8}")),
2738        }
2739    }
2740}
2741
2742#[derive(PartialEq)]
2743enum IlocVersion {
2744    Zero,
2745    One,
2746    Two,
2747}
2748
2749impl TryFrom<u8> for IlocVersion {
2750    type Error = Error;
2751
2752    fn try_from(value: u8) -> Result<Self> {
2753        match value {
2754            0 => Ok(Self::Zero),
2755            1 => Ok(Self::One),
2756            2 => Ok(Self::Two),
2757            _ => Err(Error::Unsupported("unsupported version in 'iloc' box")),
2758        }
2759    }
2760}
2761
2762/// Used for 'iloc' boxes
2763/// See ISO 14496-12:2015 § 8.11.3
2764/// `base_offset` is omitted since it is integrated into the ranges in `extents`
2765/// `data_reference_index` is omitted, since only 0 (i.e., this file) is supported
2766#[derive(Debug)]
2767struct ItemLocationBoxItem {
2768    item_id: u32,
2769    construction_method: ConstructionMethod,
2770    /// Unused for `ConstructionMethod::Idat`
2771    extents: TryVec<ItemLocationBoxExtent>,
2772}
2773
2774#[derive(Clone, Copy, Debug, PartialEq)]
2775enum ConstructionMethod {
2776    File,
2777    Idat,
2778    #[allow(dead_code)] // TODO: see https://github.com/mozilla/mp4parse-rust/issues/196
2779    Item,
2780}
2781
2782/// `extent_index` is omitted since it's only used for `ConstructionMethod::Item` which
2783/// is currently not implemented.
2784#[derive(Clone, Debug)]
2785struct ItemLocationBoxExtent {
2786    extent_range: ExtentRange,
2787}
2788
2789#[derive(Clone, Debug)]
2790enum ExtentRange {
2791    WithLength(Range<u64>),
2792    ToEnd(RangeFrom<u64>),
2793}
2794
2795impl ExtentRange {
2796    const fn start(&self) -> u64 {
2797        match self {
2798            Self::WithLength(r) => r.start,
2799            Self::ToEnd(r) => r.start,
2800        }
2801    }
2802}
2803
2804/// See ISO 14496-12:2015 § 4.2
2805struct BMFFBox<'a, T> {
2806    head: BoxHeader,
2807    content: Take<&'a mut T>,
2808}
2809
2810impl<T: Read> BMFFBox<'_, T> {
2811    fn read_into_try_vec(&mut self) -> std::io::Result<TryVec<u8>> {
2812        let limit = self.content.limit();
2813        // For size=0 boxes, size is set to u64::MAX, but after subtracting offset
2814        // (8 or 16 bytes), the limit will be slightly less. Check for values very
2815        // close to u64::MAX to detect these cases.
2816        // Cap pre-allocation to 256 MB — the actual read_to_end will
2817        // grow as needed if the box really is larger, and return early
2818        // if the underlying reader has less data than claimed.
2819        const MAX_PREALLOC: u64 = 256 * 1024 * 1024;
2820        let mut vec = if limit >= u64::MAX - BoxHeader::MIN_LARGE_SIZE {
2821            // Unknown size (size=0 box), read without pre-allocation
2822            std::vec::Vec::new()
2823        } else {
2824            let mut v = std::vec::Vec::new();
2825            v.try_reserve_exact(limit.min(MAX_PREALLOC) as usize)
2826                .map_err(|_| std::io::ErrorKind::OutOfMemory)?;
2827            v
2828        };
2829        self.content.read_to_end(&mut vec)?; // The default impl
2830        Ok(vec.into())
2831    }
2832}
2833
2834#[test]
2835fn box_read_to_end() {
2836    let tmp = &mut b"1234567890".as_slice();
2837    let mut src = BMFFBox {
2838        head: BoxHeader { name: BoxType::FileTypeBox, size: 5, offset: 0, uuid: None },
2839        content: <_ as Read>::take(tmp, 5),
2840    };
2841    let buf = src.read_into_try_vec().unwrap();
2842    assert_eq!(buf.len(), 5);
2843    assert_eq!(buf, b"12345".as_ref());
2844}
2845
2846#[test]
2847fn box_read_to_end_large_claim() {
2848    // A box claiming huge size but backed by only 10 bytes should still succeed —
2849    // read_to_end returns what's actually available, pre-allocation is capped.
2850    let tmp = &mut b"1234567890".as_slice();
2851    let mut src = BMFFBox {
2852        head: BoxHeader { name: BoxType::FileTypeBox, size: 5, offset: 0, uuid: None },
2853        content: <_ as Read>::take(tmp, u64::MAX / 2),
2854    };
2855    let buf = src.read_into_try_vec().unwrap();
2856    assert_eq!(buf.len(), 10);
2857}
2858
2859struct BoxIter<'a, T> {
2860    src: &'a mut T,
2861    /// Upper bound on bytes remaining in the source.
2862    ///
2863    /// Used to clamp claimed box sizes so that a malformed header
2864    /// (e.g. claiming 4 GB when only 26 bytes remain) does not cause
2865    /// multi-gigabyte allocations based on [`BMFFBox::bytes_left`].
2866    max_remaining: u64,
2867}
2868
2869impl<T: Read> BoxIter<'_, T> {
2870    /// Create a BoxIter without a known data bound (used by streaming readers).
2871    #[cfg(feature = "eager")]
2872    fn new(src: &mut T) -> BoxIter<'_, T> {
2873        BoxIter { src, max_remaining: u64::MAX }
2874    }
2875
2876    fn with_max_remaining(src: &mut T, max_remaining: u64) -> BoxIter<'_, T> {
2877        BoxIter { src, max_remaining }
2878    }
2879
2880    fn next_box(&mut self) -> Result<Option<BMFFBox<'_, T>>> {
2881        let r = read_box_header(self.src);
2882        match r {
2883            Ok(h) => {
2884                let claimed = h.size - h.offset;
2885                // Clamp the Take limit so that allocations based on
2886                // bytes_left() cannot exceed the actual data available.
2887                let clamped = claimed.min(self.max_remaining);
2888                // Decrease our remaining budget by the clamped content
2889                // size plus the header bytes already consumed.
2890                self.max_remaining = self.max_remaining.saturating_sub(clamped.saturating_add(h.offset));
2891                Ok(Some(BMFFBox {
2892                    head: h,
2893                    content: self.src.take(clamped),
2894                }))
2895            }
2896            Err(Error::UnexpectedEOF) => Ok(None),
2897            Err(e) => Err(e),
2898        }
2899    }
2900}
2901
2902impl<T: Read> Read for BMFFBox<'_, T> {
2903    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
2904        self.content.read(buf)
2905    }
2906}
2907
2908impl<T: Offset> Offset for BMFFBox<'_, T> {
2909    fn offset(&self) -> u64 {
2910        self.content.get_ref().offset()
2911    }
2912}
2913
2914impl<T: Read> BMFFBox<'_, T> {
2915    fn bytes_left(&self) -> u64 {
2916        self.content.limit()
2917    }
2918
2919    const fn get_header(&self) -> &BoxHeader {
2920        &self.head
2921    }
2922
2923    fn box_iter(&mut self) -> BoxIter<'_, Self> {
2924        BoxIter::with_max_remaining(self, self.bytes_left())
2925    }
2926}
2927
2928impl<T> Drop for BMFFBox<'_, T> {
2929    fn drop(&mut self) {
2930        if self.content.limit() > 0 {
2931            let name: FourCC = From::from(self.head.name);
2932            debug!("Dropping {} bytes in '{}'", self.content.limit(), name);
2933        }
2934    }
2935}
2936
2937/// Read and parse a box header.
2938///
2939/// Call this first to determine the type of a particular mp4 box
2940/// and its length. Used internally for dispatching to specific
2941/// parsers for the internal content, or to get the length to
2942/// skip unknown or uninteresting boxes.
2943///
2944/// See ISO 14496-12:2015 § 4.2
2945fn read_box_header<T: ReadBytesExt>(src: &mut T) -> Result<BoxHeader> {
2946    let size32 = be_u32(src)?;
2947    let name = BoxType::from(be_u32(src)?);
2948    let size = match size32 {
2949        // valid only for top-level box and indicates it's the last box in the file.  usually mdat.
2950        0 => {
2951            // Size=0 means box extends to EOF (ISOBMFF spec allows this for last box)
2952            u64::MAX
2953        },
2954        1 => {
2955            let size64 = be_u64(src)?;
2956            if size64 < BoxHeader::MIN_LARGE_SIZE {
2957                return Err(Error::InvalidData("malformed wide size"));
2958            }
2959            size64
2960        },
2961        _ => {
2962            if u64::from(size32) < BoxHeader::MIN_SIZE {
2963                return Err(Error::InvalidData("malformed size"));
2964            }
2965            u64::from(size32)
2966        },
2967    };
2968    let mut offset = match size32 {
2969        1 => BoxHeader::MIN_LARGE_SIZE,
2970        _ => BoxHeader::MIN_SIZE,
2971    };
2972    let uuid = if name == BoxType::UuidBox {
2973        if size >= offset + 16 {
2974            let mut buffer = [0u8; 16];
2975            let count = src.read(&mut buffer)?;
2976            offset += count.to_u64();
2977            if count == 16 {
2978                Some(buffer)
2979            } else {
2980                debug!("malformed uuid (short read), skipping");
2981                None
2982            }
2983        } else {
2984            debug!("malformed uuid, skipping");
2985            None
2986        }
2987    } else {
2988        None
2989    };
2990    if offset > size {
2991        return Err(Error::InvalidData("box header offset exceeds size"));
2992    }
2993    Ok(BoxHeader { name, size, offset, uuid })
2994}
2995
2996/// Parse the extra header fields for a full box.
2997fn read_fullbox_extra<T: ReadBytesExt>(src: &mut T) -> Result<(u8, u32)> {
2998    let version = src.read_u8()?;
2999    let flags_a = src.read_u8()?;
3000    let flags_b = src.read_u8()?;
3001    let flags_c = src.read_u8()?;
3002    Ok((
3003        version,
3004        u32::from(flags_a) << 16 | u32::from(flags_b) << 8 | u32::from(flags_c),
3005    ))
3006}
3007
3008// Parse the extra fields for a full box whose flag fields must be zero.
3009fn read_fullbox_version_no_flags<T: ReadBytesExt>(src: &mut T, options: &ParseOptions) -> Result<u8> {
3010    let (version, flags) = read_fullbox_extra(src)?;
3011
3012    if flags != 0 && !options.lenient {
3013        return Err(Error::Unsupported("expected flags to be 0"));
3014    }
3015
3016    Ok(version)
3017}
3018
3019/// Skip over the entire contents of a box.
3020fn skip_box_content<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<()> {
3021    // Skip the contents of unknown chunks.
3022    let to_skip = {
3023        let header = src.get_header();
3024        debug!("{header:?} (skipped)");
3025        header
3026            .size
3027            .checked_sub(header.offset)
3028            .ok_or(Error::InvalidData("header offset > size"))?
3029    };
3030    if to_skip != src.bytes_left() {
3031        return Err(Error::InvalidData("box content size mismatch"));
3032    }
3033    skip(src, to_skip)
3034}
3035
3036/// Skip over the remain data of a box.
3037fn skip_box_remain<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<()> {
3038    let remain = {
3039        let header = src.get_header();
3040        let len = src.bytes_left();
3041        debug!("remain {len} (skipped) in {header:?}");
3042        len
3043    };
3044    skip(src, remain)
3045}
3046
3047struct ResourceTracker<'a> {
3048    config: &'a DecodeConfig,
3049    #[cfg(feature = "eager")]
3050    current_memory: u64,
3051    #[cfg(feature = "eager")]
3052    peak_memory: u64,
3053}
3054
3055impl<'a> ResourceTracker<'a> {
3056    fn new(config: &'a DecodeConfig) -> Self {
3057        Self {
3058            config,
3059            #[cfg(feature = "eager")]
3060            current_memory: 0,
3061            #[cfg(feature = "eager")]
3062            peak_memory: 0,
3063        }
3064    }
3065
3066    #[cfg(feature = "eager")]
3067    fn reserve(&mut self, bytes: u64) -> Result<()> {
3068        self.current_memory = self.current_memory.saturating_add(bytes);
3069        self.peak_memory = self.peak_memory.max(self.current_memory);
3070
3071        if let Some(limit) = self.config.peak_memory_limit
3072            && self.peak_memory > limit {
3073                return Err(Error::ResourceLimitExceeded("peak memory limit exceeded"));
3074            }
3075
3076        Ok(())
3077    }
3078
3079    #[cfg(feature = "eager")]
3080    fn release(&mut self, bytes: u64) {
3081        self.current_memory = self.current_memory.saturating_sub(bytes);
3082    }
3083
3084    #[cfg(feature = "eager")]
3085    fn validate_total_megapixels(&self, width: u32, height: u32) -> Result<()> {
3086        if let Some(limit) = self.config.total_megapixels_limit {
3087            let megapixels = (width as u64)
3088                .checked_mul(height as u64)
3089                .ok_or(Error::InvalidData("dimension overflow"))?
3090                / 1_000_000;
3091
3092            if megapixels > limit as u64 {
3093                return Err(Error::ResourceLimitExceeded("total megapixels limit exceeded"));
3094            }
3095        }
3096
3097        Ok(())
3098    }
3099
3100    fn validate_animation_frames(&self, count: u32) -> Result<()> {
3101        if let Some(limit) = self.config.max_animation_frames
3102            && count > limit {
3103                return Err(Error::ResourceLimitExceeded("animation frame count limit exceeded"));
3104            }
3105
3106        Ok(())
3107    }
3108
3109    fn validate_grid_tiles(&self, count: u32) -> Result<()> {
3110        if let Some(limit) = self.config.max_grid_tiles
3111            && count > limit {
3112                return Err(Error::ResourceLimitExceeded("grid tile count limit exceeded"));
3113            }
3114
3115        Ok(())
3116    }
3117}
3118
3119/// Read the contents of an AVIF file with resource limits and cancellation support
3120///
3121/// This is the primary parsing function with full control over resource limits
3122/// and cooperative cancellation via the [`Stop`] trait.
3123///
3124/// # Arguments
3125///
3126/// * `f` - Reader for the AVIF file
3127/// * `config` - Resource limits and parsing options
3128/// * `stop` - Cancellation token (use [`Unstoppable`] if not needed)
3129#[cfg(feature = "eager")]
3130#[deprecated(since = "1.5.0", note = "Use `AvifParser::from_reader_with_config()` instead")]
3131#[allow(deprecated)]
3132pub fn read_avif_with_config<T: Read>(
3133    f: &mut T,
3134    config: &DecodeConfig,
3135    stop: &dyn Stop,
3136) -> Result<AvifData> {
3137    let mut tracker = ResourceTracker::new(config);
3138    let mut f = OffsetReader::new(f);
3139
3140    let mut iter = BoxIter::new(&mut f);
3141
3142    // 'ftyp' box must occur first; see ISO 14496-12:2015 § 4.3.1
3143    let (major_brand, compatible_brands) = if let Some(mut b) = iter.next_box()? {
3144        if b.head.name == BoxType::FileTypeBox {
3145            let ftyp = read_ftyp(&mut b)?;
3146            // Accept both 'avif' (single-frame) and 'avis' (animated) brands
3147            if ftyp.major_brand != b"avif" && ftyp.major_brand != b"avis" {
3148                warn!("major_brand: {}", ftyp.major_brand);
3149                return Err(Error::InvalidData("ftyp must be 'avif' or 'avis'"));
3150            }
3151            let major = ftyp.major_brand.value;
3152            let compat = ftyp.compatible_brands.iter().map(|b| b.value).collect();
3153            (major, compat)
3154        } else {
3155            return Err(Error::InvalidData("'ftyp' box must occur first"));
3156        }
3157    } else {
3158        return Err(Error::InvalidData("'ftyp' box must occur first"));
3159    };
3160
3161    let mut meta = None;
3162    let mut mdats = TryVec::new();
3163    let mut animation_data: Option<ParsedAnimationData> = None;
3164
3165    let parse_opts = ParseOptions { lenient: config.lenient };
3166
3167    while let Some(mut b) = iter.next_box()? {
3168        stop.check()?;
3169
3170        match b.head.name {
3171            BoxType::MetadataBox => {
3172                if meta.is_some() {
3173                    return Err(Error::InvalidData("There should be zero or one meta boxes per ISO 14496-12:2015 § 8.11.1.1"));
3174                }
3175                meta = Some(read_avif_meta(&mut b, &parse_opts)?);
3176            },
3177            BoxType::MovieBox => {
3178                let tracks = read_moov(&mut b)?;
3179                if !tracks.is_empty() {
3180                    animation_data = Some(associate_tracks(tracks)?);
3181                }
3182            },
3183            BoxType::MediaDataBox => {
3184                if b.bytes_left() > 0 {
3185                    let offset = b.offset();
3186                    let size = b.bytes_left();
3187                    tracker.reserve(size)?;
3188                    let data = b.read_into_try_vec()?;
3189                    tracker.release(size);
3190                    mdats.push(MediaDataBox { offset, data })?;
3191                }
3192            },
3193            _ => skip_box_content(&mut b)?,
3194        }
3195
3196        check_parser_state(&b.head, &b.content)?;
3197    }
3198
3199    // meta is required for still images; pure sequences can have only moov+mdat
3200    if meta.is_none() && animation_data.is_none() {
3201        return Err(Error::InvalidData("missing meta"));
3202    }
3203    let Some(meta) = meta else {
3204        // Pure sequence: return minimal AvifData with no items
3205        return Ok(AvifData {
3206            ..Default::default()
3207        });
3208    };
3209
3210    // Check if primary item is a grid (tiled image)
3211    let is_grid = meta
3212        .item_infos
3213        .iter()
3214        .find(|x| x.item_id == meta.primary_item_id)
3215        .is_some_and(|info| {
3216            let is_g = info.item_type == b"grid";
3217            if is_g {
3218                log::debug!("Grid image detected: primary_item_id={}", meta.primary_item_id);
3219            }
3220            is_g
3221        });
3222
3223    // Extract grid configuration if this is a grid image
3224    let mut grid_config = if is_grid {
3225        meta.properties
3226            .iter()
3227            .find(|prop| {
3228                prop.item_id == meta.primary_item_id
3229                    && matches!(prop.property, ItemProperty::ImageGrid(_))
3230            })
3231            .and_then(|prop| match &prop.property {
3232                ItemProperty::ImageGrid(config) => {
3233                    log::debug!("Grid: found explicit ImageGrid property: {:?}", config);
3234                    Some(config.clone())
3235                },
3236                _ => None,
3237            })
3238    } else {
3239        None
3240    };
3241
3242    // Find tile item IDs if this is a grid
3243    let tile_item_ids: TryVec<u32> = if is_grid {
3244        // Collect tiles with their reference index
3245        let mut tiles_with_index: TryVec<(u32, u16)> = TryVec::new();
3246        for iref in meta.item_references.iter() {
3247            // Grid items reference tiles via "dimg" (derived image) type
3248            if iref.from_item_id == meta.primary_item_id && iref.item_type == b"dimg" {
3249                tiles_with_index.push((iref.to_item_id, iref.reference_index))?;
3250            }
3251        }
3252
3253        // Validate tile count
3254        tracker.validate_grid_tiles(tiles_with_index.len() as u32)?;
3255
3256        // Sort tiles by reference_index to get correct grid order
3257        tiles_with_index.sort_by_key(|&(_, idx)| idx);
3258
3259        // Extract just the IDs in sorted order
3260        let mut ids = TryVec::new();
3261        for (tile_id, _) in tiles_with_index.iter() {
3262            ids.push(*tile_id)?;
3263        }
3264
3265        // No logging here - too verbose for production
3266
3267        // If no ImageGrid property found, calculate grid layout from ispe dimensions
3268        if grid_config.is_none() && !ids.is_empty() {
3269            // Try to calculate grid dimensions from ispe properties
3270            let grid_dims = meta.properties.iter()
3271                .find(|p| p.item_id == meta.primary_item_id)
3272                .and_then(|p| match &p.property {
3273                    ItemProperty::ImageSpatialExtents(e) => Some(e),
3274                    _ => None,
3275                });
3276
3277            let tile_dims = ids.first().and_then(|&tile_id| {
3278                meta.properties.iter()
3279                    .find(|p| p.item_id == tile_id)
3280                    .and_then(|p| match &p.property {
3281                        ItemProperty::ImageSpatialExtents(e) => Some(e),
3282                        _ => None,
3283                    })
3284            });
3285
3286            if let (Some(grid), Some(tile)) = (grid_dims, tile_dims) {
3287                // Validate grid output dimensions
3288                tracker.validate_total_megapixels(grid.width, grid.height)?;
3289
3290                // Validate tile dimensions are non-zero (already validated in read_ispe, but defensive)
3291                if tile.width == 0 || tile.height == 0 {
3292                    log::warn!("Grid: tile has zero dimensions, using fallback");
3293                } else if grid.width % tile.width == 0 && grid.height % tile.height == 0 {
3294                    // Calculate grid layout: grid_dims ÷ tile_dims
3295                    let columns = grid.width / tile.width;
3296                    let rows = grid.height / tile.height;
3297
3298                    // Validate grid dimensions fit in u8 (max 255×255 grid)
3299                    if columns > 255 || rows > 255 {
3300                        log::warn!("Grid: calculated dimensions {}×{} exceed 255, using fallback", rows, columns);
3301                    } else {
3302                        log::debug!("Grid: calculated {}×{} layout from ispe dimensions", rows, columns);
3303                        grid_config = Some(GridConfig {
3304                            rows: rows as u8,
3305                            columns: columns as u8,
3306                            output_width: grid.width,
3307                            output_height: grid.height,
3308                        });
3309                    }
3310                } else {
3311                    log::warn!("Grid: dimension mismatch - grid {}×{} not evenly divisible by tile {}×{}, using fallback",
3312                              grid.width, grid.height, tile.width, tile.height);
3313                }
3314            }
3315
3316            // Fallback: if calculation failed or ispe not available, use N×1 inference
3317            if grid_config.is_none() {
3318                log::debug!("Grid: using fallback {}×1 layout inference", ids.len());
3319                grid_config = Some(GridConfig {
3320                    rows: ids.len() as u8,  // Changed: vertical stack
3321                    columns: 1,              // Changed: single column
3322                    output_width: 0,  // Will be calculated from tiles
3323                    output_height: 0, // Will be calculated from tiles
3324                });
3325            }
3326        }
3327
3328        ids
3329    } else {
3330        TryVec::new()
3331    };
3332
3333    let alpha_item_id = meta
3334        .item_references
3335        .iter()
3336        // Auxiliary image for the primary image
3337        .filter(|iref| {
3338            iref.to_item_id == meta.primary_item_id
3339                && iref.from_item_id != meta.primary_item_id
3340                && iref.item_type == b"auxl"
3341        })
3342        .map(|iref| iref.from_item_id)
3343        // which has the alpha property
3344        .find(|&item_id| {
3345            meta.properties.iter().any(|prop| {
3346                prop.item_id == item_id
3347                    && match &prop.property {
3348                        ItemProperty::AuxiliaryType(urn) => {
3349                            urn.type_subtype().0 == b"urn:mpeg:mpegB:cicp:systems:auxiliary:alpha"
3350                        }
3351                        _ => false,
3352                    }
3353            })
3354        });
3355
3356    // Extract properties for the primary item
3357    macro_rules! find_prop {
3358        ($variant:ident) => {
3359            meta.properties.iter().find_map(|p| {
3360                if p.item_id == meta.primary_item_id {
3361                    match &p.property {
3362                        ItemProperty::$variant(c) => Some(c.clone()),
3363                        _ => None,
3364                    }
3365                } else {
3366                    None
3367                }
3368            })
3369        };
3370    }
3371
3372    let av1_config = find_prop!(AV1Config);
3373    let color_info = find_prop!(ColorInformation);
3374    let rotation = find_prop!(Rotation);
3375    let mirror = find_prop!(Mirror);
3376    let clean_aperture = find_prop!(CleanAperture);
3377    let pixel_aspect_ratio = find_prop!(PixelAspectRatio);
3378    let content_light_level = find_prop!(ContentLightLevel);
3379    let mastering_display = find_prop!(MasteringDisplayColourVolume);
3380    let content_colour_volume = find_prop!(ContentColourVolume);
3381    let ambient_viewing = find_prop!(AmbientViewingEnvironment);
3382    let operating_point = find_prop!(OperatingPointSelector);
3383    let layer_selector = find_prop!(LayerSelector);
3384    let layered_image_indexing = find_prop!(AV1LayeredImageIndexing);
3385
3386    let mut context = AvifData {
3387        premultiplied_alpha: alpha_item_id.is_some_and(|alpha_item_id| {
3388            meta.item_references.iter().any(|iref| {
3389                iref.from_item_id == meta.primary_item_id
3390                    && iref.to_item_id == alpha_item_id
3391                    && iref.item_type == b"prem"
3392            })
3393        }),
3394        av1_config,
3395        color_info,
3396        rotation,
3397        mirror,
3398        clean_aperture,
3399        pixel_aspect_ratio,
3400        content_light_level,
3401        mastering_display,
3402        content_colour_volume,
3403        ambient_viewing,
3404        operating_point,
3405        layer_selector,
3406        layered_image_indexing,
3407        major_brand,
3408        compatible_brands,
3409        ..Default::default()
3410    };
3411
3412    // Helper to extract item data from either mdat or idat
3413    let mut extract_item_data = |loc: &ItemLocationBoxItem, buf: &mut TryVec<u8>| -> Result<()> {
3414        match loc.construction_method {
3415            ConstructionMethod::File => {
3416                for extent in loc.extents.iter() {
3417                    let mut found = false;
3418                    for mdat in mdats.iter_mut() {
3419                        if mdat.matches_extent(&extent.extent_range) {
3420                            buf.append(&mut mdat.data)?;
3421                            found = true;
3422                            break;
3423                        } else if mdat.contains_extent(&extent.extent_range) {
3424                            mdat.read_extent(&extent.extent_range, buf)?;
3425                            found = true;
3426                            break;
3427                        }
3428                    }
3429                    if !found {
3430                        return Err(Error::InvalidData("iloc contains an extent that is not in mdat"));
3431                    }
3432                }
3433                Ok(())
3434            },
3435            ConstructionMethod::Idat => {
3436                let idat_data = meta.idat.as_ref().ok_or(Error::InvalidData("idat box missing but construction_method is Idat"))?;
3437                for extent in loc.extents.iter() {
3438                    match &extent.extent_range {
3439                        ExtentRange::WithLength(range) => {
3440                            let start = usize::try_from(range.start).map_err(|_| Error::InvalidData("extent start too large"))?;
3441                            let end = usize::try_from(range.end).map_err(|_| Error::InvalidData("extent end too large"))?;
3442                            if end > idat_data.len() {
3443                                return Err(Error::InvalidData("extent exceeds idat size"));
3444                            }
3445                            buf.extend_from_slice(&idat_data[start..end]).map_err(|_| Error::OutOfMemory)?;
3446                        },
3447                        ExtentRange::ToEnd(range) => {
3448                            let start = usize::try_from(range.start).map_err(|_| Error::InvalidData("extent start too large"))?;
3449                            if start >= idat_data.len() {
3450                                return Err(Error::InvalidData("extent start exceeds idat size"));
3451                            }
3452                            buf.extend_from_slice(&idat_data[start..]).map_err(|_| Error::OutOfMemory)?;
3453                        },
3454                    }
3455                }
3456                Ok(())
3457            },
3458            ConstructionMethod::Item => {
3459                Err(Error::Unsupported("construction_method 'item' not supported"))
3460            },
3461        }
3462    };
3463
3464    // load data of relevant items
3465    // For grid images, we need to load tiles in the order specified by iref
3466    if is_grid {
3467        // Extract each tile in order
3468        for (idx, &tile_id) in tile_item_ids.iter().enumerate() {
3469            if idx % 16 == 0 {
3470                stop.check()?;
3471            }
3472
3473            let mut tile_data = TryVec::new();
3474
3475            if let Some(loc) = meta.iloc_items.iter().find(|loc| loc.item_id == tile_id) {
3476                extract_item_data(loc, &mut tile_data)?;
3477            } else {
3478                return Err(Error::InvalidData("grid tile not found in iloc"));
3479            }
3480
3481            context.grid_tiles.push(tile_data)?;
3482        }
3483
3484        // Set grid_config in context
3485        context.grid_config = grid_config;
3486    } else {
3487        // Standard single-frame AVIF: load primary_item and optional alpha_item
3488        for loc in meta.iloc_items.iter() {
3489            let item_data = if loc.item_id == meta.primary_item_id {
3490                &mut context.primary_item
3491            } else if Some(loc.item_id) == alpha_item_id {
3492                context.alpha_item.get_or_insert_with(TryVec::new)
3493            } else {
3494                continue;
3495            };
3496
3497            extract_item_data(loc, item_data)?;
3498        }
3499    }
3500
3501    // Extract EXIF and XMP items linked via cdsc references to the primary item
3502    for iref in meta.item_references.iter() {
3503        if iref.to_item_id != meta.primary_item_id || iref.item_type != b"cdsc" {
3504            continue;
3505        }
3506        let desc_item_id = iref.from_item_id;
3507        let Some(info) = meta.item_infos.iter().find(|i| i.item_id == desc_item_id) else {
3508            continue;
3509        };
3510        if info.item_type == b"Exif" {
3511            if let Some(loc) = meta.iloc_items.iter().find(|l| l.item_id == desc_item_id) {
3512                let mut raw = TryVec::new();
3513                extract_item_data(loc, &mut raw)?;
3514                // AVIF EXIF items start with a 4-byte big-endian offset to the TIFF header
3515                if raw.len() > 4 {
3516                    let offset = u32::from_be_bytes([raw[0], raw[1], raw[2], raw[3]]) as usize;
3517                    let start = 4 + offset;
3518                    if start < raw.len() {
3519                        let mut exif = TryVec::new();
3520                        exif.extend_from_slice(&raw[start..])?;
3521                        context.exif = Some(exif);
3522                    }
3523                }
3524            }
3525        } else if info.item_type == b"mime"
3526            && let Some(loc) = meta.iloc_items.iter().find(|l| l.item_id == desc_item_id)
3527        {
3528            let mut xmp = TryVec::new();
3529            extract_item_data(loc, &mut xmp)?;
3530            context.xmp = Some(xmp);
3531        }
3532    }
3533
3534    // Extract gain map (tmap derived image item)
3535    if let Some(tmap_info) = meta.item_infos.iter().find(|info| info.item_type == b"tmap") {
3536        let tmap_id = tmap_info.item_id;
3537
3538        let mut inputs: TryVec<(u32, u16)> = TryVec::new();
3539        for iref in meta.item_references.iter() {
3540            if iref.from_item_id == tmap_id && iref.item_type == b"dimg" {
3541                inputs.push((iref.to_item_id, iref.reference_index))?;
3542            }
3543        }
3544        inputs.sort_by_key(|&(_, idx)| idx);
3545
3546        if inputs.len() >= 2 && inputs[0].0 == meta.primary_item_id {
3547            let gmap_item_id = inputs[1].0;
3548
3549            // Read tmap item payload
3550            if let Some(loc) = meta.iloc_items.iter().find(|l| l.item_id == tmap_id) {
3551                let mut tmap_data = TryVec::new();
3552                extract_item_data(loc, &mut tmap_data)?;
3553                if let Ok(metadata) = parse_tone_map_image(&tmap_data) {
3554                    context.gain_map_metadata = Some(metadata);
3555                }
3556            }
3557
3558            // Read gain map image data
3559            if let Some(loc) = meta.iloc_items.iter().find(|l| l.item_id == gmap_item_id) {
3560                let mut gmap_data = TryVec::new();
3561                extract_item_data(loc, &mut gmap_data)?;
3562                context.gain_map_item = Some(gmap_data);
3563            }
3564
3565            // Get alternate color info from tmap item's properties
3566            context.gain_map_color_info = meta.properties.iter().find_map(|p| {
3567                if p.item_id == tmap_id {
3568                    match &p.property {
3569                        ItemProperty::ColorInformation(c) => Some(c.clone()),
3570                        _ => None,
3571                    }
3572                } else {
3573                    None
3574                }
3575            });
3576        }
3577    }
3578
3579    // Extract depth auxiliary image
3580    {
3581        let depth_item_id = meta
3582            .item_references
3583            .iter()
3584            .filter(|iref| {
3585                iref.to_item_id == meta.primary_item_id
3586                    && iref.from_item_id != meta.primary_item_id
3587                    && iref.item_type == b"auxl"
3588            })
3589            .map(|iref| iref.from_item_id)
3590            .find(|&item_id| {
3591                if alpha_item_id == Some(item_id) {
3592                    return false;
3593                }
3594                meta.properties.iter().any(|prop| {
3595                    prop.item_id == item_id
3596                        && match &prop.property {
3597                            ItemProperty::AuxiliaryType(urn) => {
3598                                is_depth_auxiliary_urn(urn.type_subtype().0)
3599                            }
3600                            _ => false,
3601                        }
3602                })
3603            });
3604
3605        if let Some(depth_id) = depth_item_id {
3606            if let Some(loc) = meta.iloc_items.iter().find(|l| l.item_id == depth_id) {
3607                let mut depth_data = TryVec::new();
3608                extract_item_data(loc, &mut depth_data)?;
3609                context.depth_item = Some(depth_data);
3610            }
3611            // Get dimensions from ispe
3612            if let Some((w, h)) = meta.properties.iter().find_map(|p| {
3613                if p.item_id == depth_id {
3614                    match &p.property {
3615                        ItemProperty::ImageSpatialExtents(e) => Some((e.width, e.height)),
3616                        _ => None,
3617                    }
3618                } else {
3619                    None
3620                }
3621            }) {
3622                context.depth_width = w;
3623                context.depth_height = h;
3624            }
3625            // Get av1C
3626            context.depth_av1_config = meta.properties.iter().find_map(|p| {
3627                if p.item_id == depth_id {
3628                    match &p.property {
3629                        ItemProperty::AV1Config(c) => Some(c.clone()),
3630                        _ => None,
3631                    }
3632                } else {
3633                    None
3634                }
3635            });
3636            // Get colr
3637            context.depth_color_info = meta.properties.iter().find_map(|p| {
3638                if p.item_id == depth_id {
3639                    match &p.property {
3640                        ItemProperty::ColorInformation(c) => Some(c.clone()),
3641                        _ => None,
3642                    }
3643                } else {
3644                    None
3645                }
3646            });
3647        }
3648    }
3649
3650    // Extract animation frames if this is an animated AVIF
3651    if let Some(anim) = animation_data {
3652        let frame_count = anim.color_sample_table.sample_sizes.len() as u32;
3653        tracker.validate_animation_frames(frame_count)?;
3654
3655        log::debug!("Animation: extracting frames (media_timescale={})", anim.color_timescale);
3656        match extract_animation_frames(&anim.color_sample_table, anim.color_timescale, &mut mdats) {
3657            Ok(frames) => {
3658                if !frames.is_empty() {
3659                    log::debug!("Animation: extracted {} frames", frames.len());
3660                    context.animation = Some(AnimationConfig {
3661                        loop_count: anim.loop_count,
3662                        frames,
3663                    });
3664                }
3665            }
3666            Err(e) => {
3667                log::warn!("Animation: failed to extract frames: {}", e);
3668            }
3669        }
3670    }
3671
3672    Ok(context)
3673}
3674
3675/// Read the contents of an AVIF file with custom parsing options
3676///
3677/// Uses unlimited resource limits for backwards compatibility.
3678///
3679/// # Arguments
3680///
3681/// * `f` - Reader for the AVIF file
3682/// * `options` - Parsing options (e.g., lenient mode)
3683#[cfg(feature = "eager")]
3684#[deprecated(since = "1.5.0", note = "Use `AvifParser::from_reader_with_config()` with `DecodeConfig::lenient()` instead")]
3685#[allow(deprecated)]
3686pub fn read_avif_with_options<T: Read>(f: &mut T, options: &ParseOptions) -> Result<AvifData> {
3687    let config = DecodeConfig::unlimited().lenient(options.lenient);
3688    read_avif_with_config(f, &config, &Unstoppable)
3689}
3690
3691/// Read the contents of an AVIF file
3692///
3693/// Metadata is accumulated and returned in [`AvifData`] struct.
3694/// Uses strict validation and unlimited resource limits by default.
3695///
3696/// For resource limits, use [`read_avif_with_config`].
3697/// For lenient parsing, use [`read_avif_with_options`].
3698#[cfg(feature = "eager")]
3699#[deprecated(since = "1.5.0", note = "Use `AvifParser::from_reader()` instead")]
3700#[allow(deprecated)]
3701pub fn read_avif<T: Read>(f: &mut T) -> Result<AvifData> {
3702    read_avif_with_options(f, &ParseOptions::default())
3703}
3704
3705/// An entity group from a GroupsListBox (`grpl`).
3706///
3707/// See ISO 14496-12:2024 § 8.15.3.
3708#[allow(dead_code)] // Parsed for future altr group support
3709struct EntityGroup {
3710    group_type: FourCC,
3711    group_id: u32,
3712    entity_ids: TryVec<u32>,
3713}
3714
3715/// Parse a GroupsListBox (`grpl`).
3716///
3717/// Each child box is an EntityToGroupBox with a grouping type given by its box type.
3718/// See ISO 14496-12:2024 § 8.15.3.
3719fn read_grpl<T: Read + Offset>(src: &mut BMFFBox<'_, T>) -> Result<TryVec<EntityGroup>> {
3720    let mut groups = TryVec::new();
3721    let mut iter = src.box_iter();
3722    while let Some(mut b) = iter.next_box()? {
3723        let group_type = FourCC::from(u32::from(b.head.name));
3724        // Read version and flags (not validated per spec flexibility)
3725        let _version = b.read_u8()?;
3726        let mut flags_buf = [0u8; 3];
3727        b.read_exact(&mut flags_buf)?;
3728
3729        let group_id = be_u32(&mut b)?;
3730        let num_entities = be_u32(&mut b)?;
3731        // Each entity_id is 4 bytes
3732        if (num_entities as u64) * 4 > b.bytes_left() {
3733            return Err(Error::InvalidData(
3734                "grpl num_entities exceeds remaining box bytes",
3735            ));
3736        }
3737
3738        let mut entity_ids = TryVec::new();
3739        for _ in 0..num_entities {
3740            entity_ids.push(be_u32(&mut b)?)?;
3741        }
3742
3743        groups.push(EntityGroup {
3744            group_type,
3745            group_id,
3746            entity_ids,
3747        })?;
3748
3749        skip_box_remain(&mut b)?;
3750        check_parser_state(&b.head, &b.content)?;
3751    }
3752    Ok(groups)
3753}
3754
3755/// Parse a ToneMapImage (`tmap`) item payload into gain map metadata.
3756///
3757/// See ISO 21496-1:2025 for the payload format.
3758fn parse_tone_map_image(data: &[u8]) -> Result<GainMapMetadata> {
3759    let mut cursor = std::io::Cursor::new(data);
3760
3761    // version (u8) — must be 0
3762    let version = cursor.read_u8()?;
3763    if version != 0 {
3764        return Err(Error::Unsupported("tmap version"));
3765    }
3766
3767    // minimum_version (u16 BE) — must be 0
3768    let minimum_version = be_u16(&mut cursor)?;
3769    if minimum_version > 0 {
3770        return Err(Error::Unsupported("tmap minimum version"));
3771    }
3772
3773    // writer_version (u16 BE) — informational, must be >= minimum_version
3774    let writer_version = be_u16(&mut cursor)?;
3775    if writer_version < minimum_version {
3776        return Err(Error::InvalidData("tmap writer_version < minimum_version"));
3777    }
3778
3779    // Flags byte: is_multichannel (bit 7), use_base_colour_space (bit 6),
3780    // reserved (bit 5,4), common_denominator (bit 3), backward_direction (bit 2), reserved (bits 0-1)
3781    let flags = cursor.read_u8()?;
3782    let is_multichannel = (flags & 0x80) != 0;
3783    let use_base_colour_space = (flags & 0x40) != 0;
3784    let backward_direction = (flags & 0x04) != 0;
3785
3786    // base_hdr_headroom and alternate_hdr_headroom
3787    let base_hdr_headroom_n = be_u32(&mut cursor)?;
3788    let base_hdr_headroom_d = be_u32(&mut cursor)?;
3789    let alternate_hdr_headroom_n = be_u32(&mut cursor)?;
3790    let alternate_hdr_headroom_d = be_u32(&mut cursor)?;
3791
3792    let channel_count = if is_multichannel { 3 } else { 1 };
3793    let mut channels = [GainMapChannel {
3794        gain_map_min_n: 0, gain_map_min_d: 0,
3795        gain_map_max_n: 0, gain_map_max_d: 0,
3796        gamma_n: 0, gamma_d: 0,
3797        base_offset_n: 0, base_offset_d: 0,
3798        alternate_offset_n: 0, alternate_offset_d: 0,
3799    }; 3];
3800
3801    for ch in channels.iter_mut().take(channel_count) {
3802        ch.gain_map_min_n = be_i32(&mut cursor)?;
3803        ch.gain_map_min_d = be_u32(&mut cursor)?;
3804        ch.gain_map_max_n = be_i32(&mut cursor)?;
3805        ch.gain_map_max_d = be_u32(&mut cursor)?;
3806        ch.gamma_n = be_u32(&mut cursor)?;
3807        ch.gamma_d = be_u32(&mut cursor)?;
3808        ch.base_offset_n = be_i32(&mut cursor)?;
3809        ch.base_offset_d = be_u32(&mut cursor)?;
3810        ch.alternate_offset_n = be_i32(&mut cursor)?;
3811        ch.alternate_offset_d = be_u32(&mut cursor)?;
3812    }
3813
3814    // Copy channel 0 to channels 1 and 2 if single-channel
3815    if !is_multichannel {
3816        channels[1] = channels[0];
3817        channels[2] = channels[0];
3818    }
3819
3820    Ok(GainMapMetadata {
3821        is_multichannel,
3822        use_base_colour_space,
3823        backward_direction,
3824        base_hdr_headroom_n,
3825        base_hdr_headroom_d,
3826        alternate_hdr_headroom_n,
3827        alternate_hdr_headroom_d,
3828        channels,
3829    })
3830}
3831
3832/// Parse a metadata box in the context of an AVIF
3833/// Currently requires the primary item to be an av01 item type and generates
3834/// an error otherwise.
3835/// See ISO 14496-12:2015 § 8.11.1
3836fn read_avif_meta<T: Read + Offset>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<AvifInternalMeta> {
3837    let version = read_fullbox_version_no_flags(src, options)?;
3838
3839    if version != 0 {
3840        return Err(Error::Unsupported("unsupported meta version"));
3841    }
3842
3843    let mut primary_item_id = None;
3844    let mut item_infos = None;
3845    let mut iloc_items = None;
3846    let mut item_references = TryVec::new();
3847    let mut properties = TryVec::new();
3848    let mut idat = None;
3849    let mut entity_groups = TryVec::new();
3850
3851    let mut iter = src.box_iter();
3852    while let Some(mut b) = iter.next_box()? {
3853        match b.head.name {
3854            BoxType::ItemInfoBox => {
3855                if item_infos.is_some() {
3856                    return Err(Error::InvalidData("There should be zero or one iinf boxes per ISO 14496-12:2015 § 8.11.6.1"));
3857                }
3858                item_infos = Some(read_iinf(&mut b, options)?);
3859            },
3860            BoxType::ItemLocationBox => {
3861                if iloc_items.is_some() {
3862                    return Err(Error::InvalidData("There should be zero or one iloc boxes per ISO 14496-12:2015 § 8.11.3.1"));
3863                }
3864                iloc_items = Some(read_iloc(&mut b, options)?);
3865            },
3866            BoxType::PrimaryItemBox => {
3867                if primary_item_id.is_some() {
3868                    return Err(Error::InvalidData("There should be zero or one iloc boxes per ISO 14496-12:2015 § 8.11.4.1"));
3869                }
3870                primary_item_id = Some(read_pitm(&mut b, options)?);
3871            },
3872            BoxType::ImageReferenceBox => {
3873                item_references.append(&mut read_iref(&mut b, options)?)?;
3874            },
3875            BoxType::ImagePropertiesBox => {
3876                properties = read_iprp(&mut b, options)?;
3877            },
3878            BoxType::ItemDataBox => {
3879                if idat.is_some() {
3880                    return Err(Error::InvalidData("There should be zero or one idat boxes"));
3881                }
3882                idat = Some(b.read_into_try_vec()?);
3883            },
3884            BoxType::GroupsListBox => {
3885                entity_groups.append(&mut read_grpl(&mut b)?)?;
3886            },
3887            BoxType::HandlerBox => {
3888                let hdlr = read_hdlr(&mut b)?;
3889                if hdlr.handler_type != b"pict" {
3890                    warn!("hdlr handler_type: {}", hdlr.handler_type);
3891                    return Err(Error::InvalidData("meta handler_type must be 'pict' for AVIF"));
3892                }
3893            },
3894            _ => skip_box_content(&mut b)?,
3895        }
3896
3897        check_parser_state(&b.head, &b.content)?;
3898    }
3899
3900    let primary_item_id = primary_item_id.ok_or(Error::InvalidData("Required pitm box not present in meta box"))?;
3901
3902    let item_infos = item_infos.ok_or(Error::InvalidData("iinf missing"))?;
3903
3904    if let Some(item_info) = item_infos.iter().find(|x| x.item_id == primary_item_id) {
3905        // Allow both "av01" (standard single-frame) and "grid" (tiled) types
3906        if item_info.item_type != b"av01" && item_info.item_type != b"grid" {
3907            warn!("primary_item_id type: {}", item_info.item_type);
3908            return Err(Error::InvalidData("primary_item_id type is not av01 or grid"));
3909        }
3910    } else {
3911        return Err(Error::InvalidData("primary_item_id not present in iinf box"));
3912    }
3913
3914    Ok(AvifInternalMeta {
3915        properties,
3916        item_references,
3917        primary_item_id,
3918        iloc_items: iloc_items.ok_or(Error::InvalidData("iloc missing"))?,
3919        item_infos,
3920        idat,
3921        entity_groups,
3922    })
3923}
3924
3925/// Parse a Handler Reference Box
3926/// See ISO 14496-12:2015 § 8.4.3
3927fn read_hdlr<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<HandlerBox> {
3928    let (_version, _flags) = read_fullbox_extra(src)?;
3929    // pre_defined (4 bytes)
3930    skip(src, 4)?;
3931    // handler_type (4 bytes)
3932    let handler_type = be_u32(src)?;
3933    // reserved (3 × 4 bytes) + name (variable) — skip the rest
3934    skip_box_remain(src)?;
3935    Ok(HandlerBox {
3936        handler_type: FourCC::from(handler_type),
3937    })
3938}
3939
3940/// Parse a Primary Item Box
3941/// See ISO 14496-12:2015 § 8.11.4
3942fn read_pitm<T: Read>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<u32> {
3943    let version = read_fullbox_version_no_flags(src, options)?;
3944
3945    let item_id = match version {
3946        0 => be_u16(src)?.into(),
3947        1 => be_u32(src)?,
3948        _ => return Err(Error::Unsupported("unsupported pitm version")),
3949    };
3950
3951    Ok(item_id)
3952}
3953
3954/// Parse an Item Information Box
3955/// See ISO 14496-12:2015 § 8.11.6
3956fn read_iinf<T: Read>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<TryVec<ItemInfoEntry>> {
3957    let version = read_fullbox_version_no_flags(src, options)?;
3958
3959    match version {
3960        0 | 1 => (),
3961        _ => return Err(Error::Unsupported("unsupported iinf version")),
3962    }
3963
3964    let entry_count = if version == 0 {
3965        be_u16(src)?.to_usize()
3966    } else {
3967        be_u32(src)?.to_usize()
3968    };
3969    // Cap pre-allocation: entry_count is untrusted, actual items come from box_iter
3970    let mut item_infos = TryVec::with_capacity(entry_count.min(4096))?;
3971
3972    let mut iter = src.box_iter();
3973    while let Some(mut b) = iter.next_box()? {
3974        if b.head.name != BoxType::ItemInfoEntry {
3975            return Err(Error::InvalidData("iinf box should contain only infe boxes"));
3976        }
3977
3978        item_infos.push(read_infe(&mut b)?)?;
3979
3980        check_parser_state(&b.head, &b.content)?;
3981    }
3982
3983    Ok(item_infos)
3984}
3985
3986/// Parse an Item Info Entry
3987/// See ISO 14496-12:2015 § 8.11.6.2
3988fn read_infe<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<ItemInfoEntry> {
3989    // According to the standard, it seems the flags field should be 0, but
3990    // at least one sample AVIF image has a nonzero value.
3991    let (version, _) = read_fullbox_extra(src)?;
3992
3993    // mif1 brand (see ISO 23008-12:2017 § 10.2.1) only requires v2 and 3
3994    let item_id = match version {
3995        2 => be_u16(src)?.into(),
3996        3 => be_u32(src)?,
3997        _ => return Err(Error::Unsupported("unsupported version in 'infe' box")),
3998    };
3999
4000    let item_protection_index = be_u16(src)?;
4001
4002    if item_protection_index != 0 {
4003        return Err(Error::Unsupported("protected items (infe.item_protection_index != 0) are not supported"));
4004    }
4005
4006    let item_type = FourCC::from(be_u32(src)?);
4007    debug!("infe item_id {item_id} item_type: {item_type}");
4008
4009    // There are some additional fields here, but they're not of interest to us
4010    skip_box_remain(src)?;
4011
4012    Ok(ItemInfoEntry { item_id, item_type })
4013}
4014
4015fn read_iref<T: Read>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<TryVec<SingleItemTypeReferenceBox>> {
4016    let mut item_references = TryVec::new();
4017    let version = read_fullbox_version_no_flags(src, options)?;
4018    if version > 1 {
4019        return Err(Error::Unsupported("iref version"));
4020    }
4021
4022    let mut iter = src.box_iter();
4023    while let Some(mut b) = iter.next_box()? {
4024        let from_item_id = if version == 0 {
4025            be_u16(&mut b)?.into()
4026        } else {
4027            be_u32(&mut b)?
4028        };
4029        let reference_count = be_u16(&mut b)?;
4030        // Each to_item_id is 2 bytes (version 0) or 4 bytes (version 1)
4031        let bytes_per_ref: u64 = if version == 0 { 2 } else { 4 };
4032        if (reference_count as u64) * bytes_per_ref > b.bytes_left() {
4033            return Err(Error::InvalidData(
4034                "iref reference_count exceeds remaining box bytes",
4035            ));
4036        }
4037        for reference_index in 0..reference_count {
4038            let to_item_id = if version == 0 {
4039                be_u16(&mut b)?.into()
4040            } else {
4041                be_u32(&mut b)?
4042            };
4043            if from_item_id == to_item_id {
4044                return Err(Error::InvalidData("from_item_id and to_item_id must be different"));
4045            }
4046            item_references.push(SingleItemTypeReferenceBox {
4047                item_type: b.head.name.into(),
4048                from_item_id,
4049                to_item_id,
4050                reference_index,
4051            })?;
4052        }
4053        check_parser_state(&b.head, &b.content)?;
4054    }
4055    Ok(item_references)
4056}
4057
4058/// Properties that MUST be marked essential when associated with an item.
4059/// See AVIF § 2.3.2.1.1 (a1op), HEIF § 6.5.11.1 (lsel), MIAF § 7.3.9 (clap, irot, imir).
4060const MUST_BE_ESSENTIAL: &[&[u8; 4]] = &[b"a1op", b"lsel", b"clap", b"irot", b"imir"];
4061
4062/// Properties that MUST NOT be marked essential when associated with an item.
4063/// See AVIF § 2.3.2.3.2 (a1lx).
4064const MUST_NOT_BE_ESSENTIAL: &[&[u8; 4]] = &[b"a1lx"];
4065
4066fn read_iprp<T: Read>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<TryVec<AssociatedProperty>> {
4067    let mut iter = src.box_iter();
4068    let mut properties = TryVec::new();
4069    let mut associations = TryVec::new();
4070
4071    while let Some(mut b) = iter.next_box()? {
4072        match b.head.name {
4073            BoxType::ItemPropertyContainerBox => {
4074                properties = read_ipco(&mut b, options)?;
4075            },
4076            BoxType::ItemPropertyAssociationBox => {
4077                associations = read_ipma(&mut b)?;
4078            },
4079            _ => return Err(Error::InvalidData("unexpected ipco child")),
4080        }
4081    }
4082
4083    let mut associated = TryVec::new();
4084    for a in associations {
4085        let index = match a.property_index {
4086            0 => {
4087                // property_index 0 means no association; essential must also be 0
4088                if a.essential {
4089                    return Err(Error::InvalidData(
4090                        "ipma property_index 0 must not be marked essential",
4091                    ));
4092                }
4093                continue;
4094            }
4095            x => x as usize - 1,
4096        };
4097
4098        let Some(entry) = properties.get(index) else {
4099            continue;
4100        };
4101
4102        let is_supported = entry.property != ItemProperty::Unsupported;
4103        let fourcc_bytes = &entry.fourcc.value;
4104
4105        if is_supported {
4106            // Validate essential flag for known property types
4107            if a.essential && MUST_NOT_BE_ESSENTIAL.contains(&fourcc_bytes) {
4108                warn!("item {} has {} marked essential (spec forbids it)", a.item_id, entry.fourcc);
4109                if !options.lenient {
4110                    return Err(Error::InvalidData(
4111                        "property must not be marked essential",
4112                    ));
4113                }
4114            }
4115            if !a.essential && MUST_BE_ESSENTIAL.contains(&fourcc_bytes) {
4116                warn!("item {} has {} not marked essential (spec requires it)", a.item_id, entry.fourcc);
4117                if !options.lenient {
4118                    return Err(Error::InvalidData(
4119                        "property must be marked essential",
4120                    ));
4121                }
4122            }
4123
4124            associated.push(AssociatedProperty {
4125                item_id: a.item_id,
4126                property: entry.property.try_clone()?,
4127            })?;
4128        } else if a.essential {
4129            // Unknown property marked essential — this item cannot be correctly processed
4130            warn!(
4131                "item {} has unsupported property {} marked essential; item will be unusable",
4132                a.item_id, entry.fourcc
4133            );
4134            if !options.lenient {
4135                return Err(Error::Unsupported(
4136                    "unsupported property marked as essential",
4137                ));
4138            }
4139        }
4140        // Unknown non-essential properties are silently skipped (they're optional)
4141    }
4142    Ok(associated)
4143}
4144
4145/// Image spatial extents (dimensions)
4146#[derive(Debug, Clone, Copy, PartialEq, Eq)]
4147pub(crate) struct ImageSpatialExtents {
4148    pub(crate) width: u32,
4149    pub(crate) height: u32,
4150}
4151
4152#[derive(Debug, PartialEq)]
4153pub(crate) enum ItemProperty {
4154    Channels(ArrayVec<u8, 16>),
4155    AuxiliaryType(AuxiliaryTypeProperty),
4156    ImageSpatialExtents(ImageSpatialExtents),
4157    ImageGrid(GridConfig),
4158    AV1Config(AV1Config),
4159    ColorInformation(ColorInformation),
4160    Rotation(ImageRotation),
4161    Mirror(ImageMirror),
4162    CleanAperture(CleanAperture),
4163    PixelAspectRatio(PixelAspectRatio),
4164    ContentLightLevel(ContentLightLevel),
4165    MasteringDisplayColourVolume(MasteringDisplayColourVolume),
4166    ContentColourVolume(ContentColourVolume),
4167    AmbientViewingEnvironment(AmbientViewingEnvironment),
4168    OperatingPointSelector(OperatingPointSelector),
4169    LayerSelector(LayerSelector),
4170    AV1LayeredImageIndexing(AV1LayeredImageIndexing),
4171    Unsupported,
4172}
4173
4174impl TryClone for ItemProperty {
4175    fn try_clone(&self) -> Result<Self, TryReserveError> {
4176        Ok(match self {
4177            Self::Channels(val) => Self::Channels(val.clone()),
4178            Self::AuxiliaryType(val) => Self::AuxiliaryType(val.try_clone()?),
4179            Self::ImageSpatialExtents(val) => Self::ImageSpatialExtents(*val),
4180            Self::ImageGrid(val) => Self::ImageGrid(val.clone()),
4181            Self::AV1Config(val) => Self::AV1Config(val.clone()),
4182            Self::ColorInformation(val) => Self::ColorInformation(val.clone()),
4183            Self::Rotation(val) => Self::Rotation(*val),
4184            Self::Mirror(val) => Self::Mirror(*val),
4185            Self::CleanAperture(val) => Self::CleanAperture(*val),
4186            Self::PixelAspectRatio(val) => Self::PixelAspectRatio(*val),
4187            Self::ContentLightLevel(val) => Self::ContentLightLevel(*val),
4188            Self::MasteringDisplayColourVolume(val) => Self::MasteringDisplayColourVolume(*val),
4189            Self::ContentColourVolume(val) => Self::ContentColourVolume(*val),
4190            Self::AmbientViewingEnvironment(val) => Self::AmbientViewingEnvironment(*val),
4191            Self::OperatingPointSelector(val) => Self::OperatingPointSelector(*val),
4192            Self::LayerSelector(val) => Self::LayerSelector(*val),
4193            Self::AV1LayeredImageIndexing(val) => Self::AV1LayeredImageIndexing(*val),
4194            Self::Unsupported => Self::Unsupported,
4195        })
4196    }
4197}
4198
4199struct Association {
4200    item_id: u32,
4201    essential: bool,
4202    property_index: u16,
4203}
4204
4205pub(crate) struct AssociatedProperty {
4206    pub item_id: u32,
4207    pub property: ItemProperty,
4208}
4209
4210fn read_ipma<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<TryVec<Association>> {
4211    let (version, flags) = read_fullbox_extra(src)?;
4212
4213    let mut associations = TryVec::new();
4214
4215    let entry_count = be_u32(src)?;
4216    // Each entry has at minimum: item_id (2 or 4 bytes) + association_count (1 byte)
4217    let min_bytes_per_entry: u64 = if version == 0 { 3 } else { 5 };
4218    if (entry_count as u64) * min_bytes_per_entry > src.bytes_left() {
4219        return Err(Error::InvalidData(
4220            "ipma entry_count exceeds remaining box bytes",
4221        ));
4222    }
4223    for _ in 0..entry_count {
4224        let item_id = if version == 0 {
4225            be_u16(src)?.into()
4226        } else {
4227            be_u32(src)?
4228        };
4229        let association_count = src.read_u8()?;
4230        for _ in 0..association_count {
4231            let num_association_bytes = if flags & 1 == 1 { 2 } else { 1 };
4232            let association = &mut [0; 2][..num_association_bytes];
4233            src.read_exact(association)?;
4234            let mut association = BitReader::new(association);
4235            let essential = association.read_bool()?;
4236            let property_index = association.read_u16(association.remaining().try_into()?)?;
4237            associations.push(Association {
4238                item_id,
4239                essential,
4240                property_index,
4241            })?;
4242        }
4243    }
4244    Ok(associations)
4245}
4246
4247/// A parsed property with its box FourCC, for essential flag validation.
4248struct IndexedProperty {
4249    fourcc: FourCC,
4250    property: ItemProperty,
4251}
4252
4253fn read_ipco<T: Read>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<TryVec<IndexedProperty>> {
4254    let mut properties = TryVec::new();
4255
4256    let mut iter = src.box_iter();
4257    while let Some(mut b) = iter.next_box()? {
4258        let fourcc: FourCC = b.head.name.into();
4259        // Must push for every property to have correct index for them
4260        let prop = match b.head.name {
4261            BoxType::PixelInformationBox => ItemProperty::Channels(read_pixi(&mut b, options)?),
4262            BoxType::AuxiliaryTypeProperty => ItemProperty::AuxiliaryType(read_auxc(&mut b, options)?),
4263            BoxType::ImageSpatialExtentsBox => ItemProperty::ImageSpatialExtents(read_ispe(&mut b, options)?),
4264            BoxType::ImageGridBox => ItemProperty::ImageGrid(read_grid(&mut b, options)?),
4265            BoxType::AV1CodecConfigurationBox => ItemProperty::AV1Config(read_av1c(&mut b)?),
4266            BoxType::ColorInformationBox => {
4267                match read_colr(&mut b) {
4268                    Ok(colr) => ItemProperty::ColorInformation(colr),
4269                    Err(_) => ItemProperty::Unsupported,
4270                }
4271            },
4272            BoxType::ImageRotationBox => ItemProperty::Rotation(read_irot(&mut b)?),
4273            BoxType::ImageMirrorBox => ItemProperty::Mirror(read_imir(&mut b)?),
4274            BoxType::CleanApertureBox => ItemProperty::CleanAperture(read_clap(&mut b)?),
4275            BoxType::PixelAspectRatioBox => ItemProperty::PixelAspectRatio(read_pasp(&mut b)?),
4276            BoxType::ContentLightLevelBox => ItemProperty::ContentLightLevel(read_clli(&mut b)?),
4277            BoxType::MasteringDisplayColourVolumeBox => ItemProperty::MasteringDisplayColourVolume(read_mdcv(&mut b)?),
4278            BoxType::ContentColourVolumeBox => ItemProperty::ContentColourVolume(read_cclv(&mut b)?),
4279            BoxType::AmbientViewingEnvironmentBox => ItemProperty::AmbientViewingEnvironment(read_amve(&mut b)?),
4280            BoxType::OperatingPointSelectorBox => ItemProperty::OperatingPointSelector(read_a1op(&mut b)?),
4281            BoxType::LayerSelectorBox => ItemProperty::LayerSelector(read_lsel(&mut b)?),
4282            BoxType::AV1LayeredImageIndexingBox => ItemProperty::AV1LayeredImageIndexing(read_a1lx(&mut b)?),
4283            _ => {
4284                skip_box_remain(&mut b)?;
4285                ItemProperty::Unsupported
4286            },
4287        };
4288        properties.push(IndexedProperty { fourcc, property: prop })?;
4289    }
4290    Ok(properties)
4291}
4292
4293fn read_pixi<T: Read>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<ArrayVec<u8, 16>> {
4294    let version = read_fullbox_version_no_flags(src, options)?;
4295    if version != 0 {
4296        return Err(Error::Unsupported("pixi version"));
4297    }
4298
4299    let num_channels = usize::from(src.read_u8()?);
4300    let mut channels = ArrayVec::new();
4301    let clamped = num_channels.min(channels.capacity());
4302    channels.extend((0..clamped).map(|_| 0));
4303    src.read_exact(&mut channels).map_err(|_| Error::InvalidData("invalid num_channels"))?;
4304
4305    // In lenient mode, skip any extra bytes (e.g., extended_pixi.avif has 6 extra bytes)
4306    if options.lenient && src.bytes_left() > 0 {
4307        skip(src, src.bytes_left())?;
4308    }
4309
4310    check_parser_state(&src.head, &src.content)?;
4311    Ok(channels)
4312}
4313
4314#[derive(Debug, PartialEq)]
4315struct AuxiliaryTypeProperty {
4316    aux_data: TryString,
4317}
4318
4319impl AuxiliaryTypeProperty {
4320    #[must_use]
4321    fn type_subtype(&self) -> (&[u8], &[u8]) {
4322        let split = self.aux_data.iter().position(|&b| b == b'\0')
4323            .map(|pos| self.aux_data.split_at(pos));
4324        if let Some((aux_type, rest)) = split {
4325            (aux_type, &rest[1..])
4326        } else {
4327            (&self.aux_data, &[])
4328        }
4329    }
4330}
4331
4332impl TryClone for AuxiliaryTypeProperty {
4333    fn try_clone(&self) -> Result<Self, TryReserveError> {
4334        Ok(Self {
4335            aux_data: self.aux_data.try_clone()?,
4336        })
4337    }
4338}
4339
4340fn read_auxc<T: Read>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<AuxiliaryTypeProperty> {
4341    let version = read_fullbox_version_no_flags(src, options)?;
4342    if version != 0 {
4343        return Err(Error::Unsupported("auxC version"));
4344    }
4345
4346    let aux_data = src.read_into_try_vec()?;
4347
4348    Ok(AuxiliaryTypeProperty { aux_data })
4349}
4350
4351/// Check if an auxiliary type URN identifies a depth auxiliary image.
4352///
4353/// Recognizes two standard URNs:
4354/// - `urn:mpeg:mpegB:cicp:systems:auxiliary:depth` (MPEG-B Part 23 / ISO 23091-2)
4355/// - `urn:mpeg:hevc:2015:auxid:2` (HEVC-style, auxid 2 = depth)
4356fn is_depth_auxiliary_urn(urn: &[u8]) -> bool {
4357    urn == b"urn:mpeg:mpegB:cicp:systems:auxiliary:depth"
4358        || urn == b"urn:mpeg:hevc:2015:auxid:2"
4359}
4360
4361/// Parse an AV1 Codec Configuration property box
4362/// See AV1-ISOBMFF § 2.3
4363fn read_av1c<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<AV1Config> {
4364    // av1C is NOT a FullBox — it has no version/flags
4365    let byte0 = src.read_u8()?;
4366    let marker = byte0 >> 7;
4367    let version = byte0 & 0x7F;
4368
4369    if marker != 1 {
4370        return Err(Error::InvalidData("av1C marker must be 1"));
4371    }
4372    if version != 1 {
4373        return Err(Error::Unsupported("av1C version must be 1"));
4374    }
4375
4376    let byte1 = src.read_u8()?;
4377    let profile = byte1 >> 5;
4378    let level = byte1 & 0x1F;
4379
4380    let byte2 = src.read_u8()?;
4381    let tier = byte2 >> 7;
4382    let high_bitdepth = (byte2 >> 6) & 1;
4383    let twelve_bit = (byte2 >> 5) & 1;
4384    let monochrome = (byte2 >> 4) & 1 != 0;
4385    let chroma_subsampling_x = (byte2 >> 3) & 1;
4386    let chroma_subsampling_y = (byte2 >> 2) & 1;
4387    let chroma_sample_position = byte2 & 0x03;
4388
4389    let byte3 = src.read_u8()?;
4390    // byte3: 3 bits reserved, 1 bit initial_presentation_delay_present, 4 bits delay/reserved
4391    // Not needed for image decoding.
4392    let _ = byte3;
4393
4394    let bit_depth = if high_bitdepth != 0 {
4395        if twelve_bit != 0 { 12 } else { 10 }
4396    } else {
4397        8
4398    };
4399
4400    // Skip any configOBUs (remainder of box)
4401    skip_box_remain(src)?;
4402
4403    Ok(AV1Config {
4404        profile,
4405        level,
4406        tier,
4407        bit_depth,
4408        monochrome,
4409        chroma_subsampling_x,
4410        chroma_subsampling_y,
4411        chroma_sample_position,
4412    })
4413}
4414
4415/// Parse a Colour Information property box
4416/// See ISOBMFF § 12.1.5
4417fn read_colr<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<ColorInformation> {
4418    // colr is NOT a FullBox — no version/flags
4419    let colour_type = be_u32(src)?;
4420
4421    match &colour_type.to_be_bytes() {
4422        b"nclx" => {
4423            let color_primaries = be_u16(src)?;
4424            let transfer_characteristics = be_u16(src)?;
4425            let matrix_coefficients = be_u16(src)?;
4426            let full_range_byte = src.read_u8()?;
4427            let full_range = (full_range_byte >> 7) != 0;
4428            // Skip any remaining bytes
4429            skip_box_remain(src)?;
4430            Ok(ColorInformation::Nclx {
4431                color_primaries,
4432                transfer_characteristics,
4433                matrix_coefficients,
4434                full_range,
4435            })
4436        }
4437        b"rICC" | b"prof" => {
4438            let icc_data = src.read_into_try_vec()?;
4439            Ok(ColorInformation::IccProfile(icc_data.to_vec()))
4440        }
4441        _ => {
4442            skip_box_remain(src)?;
4443            Err(Error::Unsupported("unsupported colr colour_type"))
4444        }
4445    }
4446}
4447
4448/// Parse an Image Rotation property box.
4449/// See ISOBMFF § 12.1.4. NOT a FullBox.
4450fn read_irot<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<ImageRotation> {
4451    let byte = src.read_u8()?;
4452    let angle_code = byte & 0x03;
4453    let angle = match angle_code {
4454        0 => 0,
4455        1 => 90,
4456        2 => 180,
4457        _ => 270, // angle_code & 0x03 can only be 0..=3
4458    };
4459    skip_box_remain(src)?;
4460    Ok(ImageRotation { angle })
4461}
4462
4463/// Parse an Image Mirror property box.
4464/// See ISOBMFF § 12.1.4. NOT a FullBox.
4465fn read_imir<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<ImageMirror> {
4466    let byte = src.read_u8()?;
4467    let axis = byte & 0x01;
4468    skip_box_remain(src)?;
4469    Ok(ImageMirror { axis })
4470}
4471
4472/// Parse a Clean Aperture property box.
4473/// See ISOBMFF § 12.1.4. NOT a FullBox.
4474fn read_clap<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<CleanAperture> {
4475    let width_n = be_u32(src)?;
4476    let width_d = be_u32(src)?;
4477    let height_n = be_u32(src)?;
4478    let height_d = be_u32(src)?;
4479    let horiz_off_n = be_i32(src)?;
4480    let horiz_off_d = be_u32(src)?;
4481    let vert_off_n = be_i32(src)?;
4482    let vert_off_d = be_u32(src)?;
4483    // Validate denominators are non-zero
4484    if width_d == 0 || height_d == 0 || horiz_off_d == 0 || vert_off_d == 0 {
4485        return Err(Error::InvalidData("clap denominator cannot be zero"));
4486    }
4487    skip_box_remain(src)?;
4488    Ok(CleanAperture {
4489        width_n, width_d,
4490        height_n, height_d,
4491        horiz_off_n, horiz_off_d,
4492        vert_off_n, vert_off_d,
4493    })
4494}
4495
4496/// Parse a Pixel Aspect Ratio property box.
4497/// See ISOBMFF § 12.1.4. NOT a FullBox.
4498fn read_pasp<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<PixelAspectRatio> {
4499    let h_spacing = be_u32(src)?;
4500    let v_spacing = be_u32(src)?;
4501    skip_box_remain(src)?;
4502    Ok(PixelAspectRatio { h_spacing, v_spacing })
4503}
4504
4505/// Parse a Content Light Level Info property box.
4506/// See ISOBMFF § 12.1.5 / ITU-T H.274. NOT a FullBox.
4507fn read_clli<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<ContentLightLevel> {
4508    let max_content_light_level = be_u16(src)?;
4509    let max_pic_average_light_level = be_u16(src)?;
4510    skip_box_remain(src)?;
4511    Ok(ContentLightLevel {
4512        max_content_light_level,
4513        max_pic_average_light_level,
4514    })
4515}
4516
4517/// Parse a Mastering Display Colour Volume property box.
4518/// See ISOBMFF § 12.1.5 / SMPTE ST 2086. NOT a FullBox.
4519fn read_mdcv<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<MasteringDisplayColourVolume> {
4520    // 3 primaries, each (x, y) as u16
4521    let primaries = [
4522        (be_u16(src)?, be_u16(src)?),
4523        (be_u16(src)?, be_u16(src)?),
4524        (be_u16(src)?, be_u16(src)?),
4525    ];
4526    let white_point = (be_u16(src)?, be_u16(src)?);
4527    let max_luminance = be_u32(src)?;
4528    let min_luminance = be_u32(src)?;
4529    skip_box_remain(src)?;
4530    Ok(MasteringDisplayColourVolume {
4531        primaries,
4532        white_point,
4533        max_luminance,
4534        min_luminance,
4535    })
4536}
4537
4538/// Parse a Content Colour Volume property box.
4539/// See ISOBMFF § 12.1.5 / H.265 D.2.40. NOT a FullBox.
4540fn read_cclv<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<ContentColourVolume> {
4541    let flags = src.read_u8()?;
4542    let primaries_present = flags & 0x20 != 0;
4543    let min_lum_present = flags & 0x10 != 0;
4544    let max_lum_present = flags & 0x08 != 0;
4545    let avg_lum_present = flags & 0x04 != 0;
4546
4547    let primaries = if primaries_present {
4548        Some([
4549            (be_i32(src)?, be_i32(src)?),
4550            (be_i32(src)?, be_i32(src)?),
4551            (be_i32(src)?, be_i32(src)?),
4552        ])
4553    } else {
4554        None
4555    };
4556
4557    let min_luminance = if min_lum_present { Some(be_u32(src)?) } else { None };
4558    let max_luminance = if max_lum_present { Some(be_u32(src)?) } else { None };
4559    let avg_luminance = if avg_lum_present { Some(be_u32(src)?) } else { None };
4560
4561    skip_box_remain(src)?;
4562    Ok(ContentColourVolume {
4563        primaries,
4564        min_luminance,
4565        max_luminance,
4566        avg_luminance,
4567    })
4568}
4569
4570/// Parse an Ambient Viewing Environment property box.
4571/// See ISOBMFF § 12.1.5 / H.265 D.2.39. NOT a FullBox.
4572fn read_amve<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<AmbientViewingEnvironment> {
4573    let ambient_illuminance = be_u32(src)?;
4574    let ambient_light_x = be_u16(src)?;
4575    let ambient_light_y = be_u16(src)?;
4576    skip_box_remain(src)?;
4577    Ok(AmbientViewingEnvironment {
4578        ambient_illuminance,
4579        ambient_light_x,
4580        ambient_light_y,
4581    })
4582}
4583
4584/// Parse an Operating Point Selector property box.
4585/// See AVIF § 4.3.4. NOT a FullBox.
4586fn read_a1op<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<OperatingPointSelector> {
4587    let op_index = src.read_u8()?;
4588    if op_index > 31 {
4589        return Err(Error::InvalidData("a1op op_index must be 0..31"));
4590    }
4591    skip_box_remain(src)?;
4592    Ok(OperatingPointSelector { op_index })
4593}
4594
4595/// Parse a Layer Selector property box.
4596/// See HEIF (ISO 23008-12). NOT a FullBox.
4597fn read_lsel<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<LayerSelector> {
4598    let layer_id = be_u16(src)?;
4599    skip_box_remain(src)?;
4600    Ok(LayerSelector { layer_id })
4601}
4602
4603/// Parse an AV1 Layered Image Indexing property box.
4604/// See AVIF § 4.3.6. NOT a FullBox.
4605fn read_a1lx<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<AV1LayeredImageIndexing> {
4606    let flags = src.read_u8()?;
4607    let large_size = flags & 0x01 != 0;
4608    let layer_sizes = if large_size {
4609        [be_u32(src)?, be_u32(src)?, be_u32(src)?]
4610    } else {
4611        [u32::from(be_u16(src)?), u32::from(be_u16(src)?), u32::from(be_u16(src)?)]
4612    };
4613    skip_box_remain(src)?;
4614    Ok(AV1LayeredImageIndexing { layer_sizes })
4615}
4616
4617/// Parse an Image Spatial Extents property box
4618/// See ISO/IEC 23008-12:2017 § 6.5.3
4619fn read_ispe<T: Read>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<ImageSpatialExtents> {
4620    let _version = read_fullbox_version_no_flags(src, options)?;
4621    // Version is always 0 for ispe
4622
4623    let width = be_u32(src)?;
4624    let height = be_u32(src)?;
4625
4626    // Validate dimensions are non-zero (0×0 images are invalid)
4627    if width == 0 || height == 0 {
4628        return Err(Error::InvalidData("ispe dimensions cannot be zero"));
4629    }
4630
4631    Ok(ImageSpatialExtents { width, height })
4632}
4633
4634/// Parse a Movie Header box (mvhd)
4635/// See ISO/IEC 14496-12:2015 § 8.2.2
4636fn read_mvhd<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<MovieHeader> {
4637    let version = src.read_u8()?;
4638    let _flags = [src.read_u8()?, src.read_u8()?, src.read_u8()?];
4639
4640    let (timescale, duration) = if version == 1 {
4641        let _creation_time = be_u64(src)?;
4642        let _modification_time = be_u64(src)?;
4643        let timescale = be_u32(src)?;
4644        let duration = be_u64(src)?;
4645        (timescale, duration)
4646    } else {
4647        let _creation_time = be_u32(src)?;
4648        let _modification_time = be_u32(src)?;
4649        let timescale = be_u32(src)?;
4650        let duration = be_u32(src)?;
4651        (timescale, duration as u64)
4652    };
4653
4654    // Skip rest of mvhd (rate, volume, matrix, etc.)
4655    skip_box_remain(src)?;
4656
4657    Ok(MovieHeader { _timescale: timescale, _duration: duration })
4658}
4659
4660/// Parse a Media Header box (mdhd)
4661/// See ISO/IEC 14496-12:2015 § 8.4.2
4662fn read_mdhd<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<MediaHeader> {
4663    let version = src.read_u8()?;
4664    let _flags = [src.read_u8()?, src.read_u8()?, src.read_u8()?];
4665
4666    let (timescale, duration) = if version == 1 {
4667        let _creation_time = be_u64(src)?;
4668        let _modification_time = be_u64(src)?;
4669        let timescale = be_u32(src)?;
4670        let duration = be_u64(src)?;
4671        (timescale, duration)
4672    } else {
4673        let _creation_time = be_u32(src)?;
4674        let _modification_time = be_u32(src)?;
4675        let timescale = be_u32(src)?;
4676        let duration = be_u32(src)?;
4677        (timescale, duration as u64)
4678    };
4679
4680    // Skip language and pre_defined
4681    skip_box_remain(src)?;
4682
4683    Ok(MediaHeader { timescale, _duration: duration })
4684}
4685
4686/// Parse Time To Sample box (stts)
4687/// See ISO/IEC 14496-12:2015 § 8.6.1.2
4688fn read_stts<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<TryVec<TimeToSampleEntry>> {
4689    let _version = src.read_u8()?;
4690    let _flags = [src.read_u8()?, src.read_u8()?, src.read_u8()?];
4691    let entry_count = be_u32(src)?;
4692    // Each entry: sample_count (4) + sample_delta (4) = 8 bytes
4693    if (entry_count as u64) * 8 > src.bytes_left() {
4694        return Err(Error::InvalidData(
4695            "stts entry_count exceeds remaining box bytes",
4696        ));
4697    }
4698
4699    let mut entries = TryVec::new();
4700    for _ in 0..entry_count {
4701        entries.push(TimeToSampleEntry {
4702            sample_count: be_u32(src)?,
4703            sample_delta: be_u32(src)?,
4704        })?;
4705    }
4706
4707    Ok(entries)
4708}
4709
4710/// Parse Sample To Chunk box (stsc)
4711/// See ISO/IEC 14496-12:2015 § 8.7.4
4712fn read_stsc<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<TryVec<SampleToChunkEntry>> {
4713    let _version = src.read_u8()?;
4714    let _flags = [src.read_u8()?, src.read_u8()?, src.read_u8()?];
4715    let entry_count = be_u32(src)?;
4716    // Each entry: first_chunk (4) + samples_per_chunk (4) + sample_desc_index (4) = 12 bytes
4717    if (entry_count as u64) * 12 > src.bytes_left() {
4718        return Err(Error::InvalidData(
4719            "stsc entry_count exceeds remaining box bytes",
4720        ));
4721    }
4722
4723    let mut entries = TryVec::new();
4724    for _ in 0..entry_count {
4725        entries.push(SampleToChunkEntry {
4726            first_chunk: be_u32(src)?,
4727            samples_per_chunk: be_u32(src)?,
4728            _sample_description_index: be_u32(src)?,
4729        })?;
4730    }
4731
4732    Ok(entries)
4733}
4734
4735/// Parse Sample Size box (stsz)
4736/// See ISO/IEC 14496-12:2015 § 8.7.3
4737fn read_stsz<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<TryVec<u32>> {
4738    let _version = src.read_u8()?;
4739    let _flags = [src.read_u8()?, src.read_u8()?, src.read_u8()?];
4740    let sample_size = be_u32(src)?;
4741    let sample_count = be_u32(src)?;
4742
4743    // Cap sample_count to avoid multi-GB allocations from malformed data.
4744    // 64M entries * 4 bytes = 256 MB, a generous upper bound for real AVIF files.
4745    const MAX_SAMPLE_COUNT: u32 = 64 * 1024 * 1024;
4746    if sample_count > MAX_SAMPLE_COUNT {
4747        return Err(Error::InvalidData("stsz sample_count exceeds maximum"));
4748    }
4749
4750    let mut sizes = TryVec::new();
4751    if sample_size == 0 {
4752        // Variable sizes: each entry is 4 bytes
4753        if (sample_count as u64) * 4 > src.bytes_left() {
4754            return Err(Error::InvalidData(
4755                "stsz sample_count exceeds remaining box bytes",
4756            ));
4757        }
4758        // Variable sizes - read each one
4759        for _ in 0..sample_count {
4760            sizes.push(be_u32(src)?)?;
4761        }
4762    } else {
4763        // Constant size for all samples
4764        for _ in 0..sample_count {
4765            sizes.push(sample_size)?;
4766        }
4767    }
4768
4769    Ok(sizes)
4770}
4771
4772/// Parse Chunk Offset box (stco or co64)
4773/// See ISO/IEC 14496-12:2015 § 8.7.5
4774fn read_chunk_offsets<T: Read>(src: &mut BMFFBox<'_, T>, is_64bit: bool) -> Result<TryVec<u64>> {
4775    let _version = src.read_u8()?;
4776    let _flags = [src.read_u8()?, src.read_u8()?, src.read_u8()?];
4777    let entry_count = be_u32(src)?;
4778    let bytes_per_entry: u64 = if is_64bit { 8 } else { 4 };
4779    if (entry_count as u64) * bytes_per_entry > src.bytes_left() {
4780        return Err(Error::InvalidData(
4781            "chunk offset entry_count exceeds remaining box bytes",
4782        ));
4783    }
4784
4785    let mut offsets = TryVec::new();
4786    for _ in 0..entry_count {
4787        let offset = if is_64bit {
4788            be_u64(src)?
4789        } else {
4790            be_u32(src)? as u64
4791        };
4792        offsets.push(offset)?;
4793    }
4794
4795    Ok(offsets)
4796}
4797
4798/// Parse Sample Description box (stsd) to extract codec config from VisualSampleEntry.
4799/// See ISO/IEC 14496-12:2015 § 8.5.2
4800///
4801/// For AVIF sequences, the VisualSampleEntry is `av01` which contains sub-boxes
4802/// like `av1C` (codec config) and `colr` (color info), similar to ipco properties.
4803fn read_stsd<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<TrackCodecConfig> {
4804    let _version = src.read_u8()?;
4805    let _flags = [src.read_u8()?, src.read_u8()?, src.read_u8()?];
4806    let entry_count = be_u32(src)?;
4807
4808    let mut config = TrackCodecConfig::default();
4809
4810    // Parse first entry only (AVIF tracks have one sample description)
4811    let mut iter = src.box_iter();
4812    for _ in 0..entry_count {
4813        let Some(mut entry_box) = iter.next_box()? else {
4814            break;
4815        };
4816
4817        // Check if this is an av01 VisualSampleEntry
4818        if entry_box.head.name != BoxType::AV1SampleEntry {
4819            skip_box_remain(&mut entry_box)?;
4820            continue;
4821        }
4822
4823        // Skip VisualSampleEntry fixed fields (78 bytes total):
4824        //   reserved[6] + data_ref_index[2] + pre_defined[2] + reserved[2] +
4825        //   pre_defined[12] + width[2] + height[2] + horiz_res[4] + vert_res[4] +
4826        //   reserved[4] + frame_count[2] + compressorname[32] + depth[2] + pre_defined[2]
4827        const VISUAL_SAMPLE_ENTRY_SIZE: u64 = 78;
4828        if entry_box.bytes_left() < VISUAL_SAMPLE_ENTRY_SIZE {
4829            skip_box_remain(&mut entry_box)?;
4830            continue;
4831        }
4832        skip(&mut entry_box, VISUAL_SAMPLE_ENTRY_SIZE)?;
4833
4834        // Parse sub-boxes within the VisualSampleEntry for av1C and colr
4835        let mut sub_iter = entry_box.box_iter();
4836        while let Some(mut sub_box) = sub_iter.next_box()? {
4837            match sub_box.head.name {
4838                BoxType::AV1CodecConfigurationBox => {
4839                    config.av1_config = Some(read_av1c(&mut sub_box)?);
4840                }
4841                BoxType::ColorInformationBox => {
4842                    if let Ok(colr) = read_colr(&mut sub_box) {
4843                        config.color_info = Some(colr);
4844                    } else {
4845                        skip_box_remain(&mut sub_box)?;
4846                    }
4847                }
4848                _ => {
4849                    skip_box_remain(&mut sub_box)?;
4850                }
4851            }
4852        }
4853
4854        // Only need the first av01 entry
4855        if config.av1_config.is_some() {
4856            break;
4857        }
4858    }
4859
4860    Ok(config)
4861}
4862
4863/// Parse Sample Table box (stbl)
4864/// See ISO/IEC 14496-12:2015 § 8.5
4865fn read_stbl<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<(SampleTable, TrackCodecConfig)> {
4866    let mut time_to_sample = TryVec::new();
4867    let mut sample_to_chunk = TryVec::new();
4868    let mut sample_sizes = TryVec::new();
4869    let mut chunk_offsets = TryVec::new();
4870    let mut codec_config = TrackCodecConfig::default();
4871
4872    let mut iter = src.box_iter();
4873    while let Some(mut b) = iter.next_box()? {
4874        match b.head.name {
4875            BoxType::SampleDescriptionBox => {
4876                codec_config = read_stsd(&mut b)?;
4877            }
4878            BoxType::TimeToSampleBox => {
4879                time_to_sample = read_stts(&mut b)?;
4880            }
4881            BoxType::SampleToChunkBox => {
4882                sample_to_chunk = read_stsc(&mut b)?;
4883            }
4884            BoxType::SampleSizeBox => {
4885                sample_sizes = read_stsz(&mut b)?;
4886            }
4887            BoxType::ChunkOffsetBox => {
4888                chunk_offsets = read_chunk_offsets(&mut b, false)?;
4889            }
4890            BoxType::ChunkLargeOffsetBox => {
4891                chunk_offsets = read_chunk_offsets(&mut b, true)?;
4892            }
4893            _ => {
4894                skip_box_remain(&mut b)?;
4895            }
4896        }
4897    }
4898
4899    // Precompute per-sample byte offsets from sample_to_chunk + chunk_offsets + sample_sizes.
4900    // This flattens the ISOBMFF indirection into a simple array for O(1) frame lookup.
4901    let mut sample_offsets = TryVec::new();
4902    let mut sample_idx = 0usize;
4903    for (i, entry) in sample_to_chunk.iter().enumerate() {
4904        let next_first_chunk = sample_to_chunk
4905            .get(i + 1)
4906            .map(|e| e.first_chunk)
4907            .unwrap_or(u32::MAX);
4908
4909        for chunk_no in entry.first_chunk..next_first_chunk {
4910            if chunk_no == 0 {
4911                break;
4912            }
4913            let co_idx = (chunk_no - 1) as usize;
4914            let chunk_offset = match chunk_offsets.get(co_idx) {
4915                Some(&o) => o,
4916                None => break,
4917            };
4918
4919            let mut offset = chunk_offset;
4920            for _ in 0..entry.samples_per_chunk {
4921                if sample_idx >= sample_sizes.len() {
4922                    break;
4923                }
4924                sample_offsets.push(offset)?;
4925                offset += *sample_sizes.get(sample_idx)
4926                    .ok_or(Error::InvalidData("sample index mismatch"))? as u64;
4927                sample_idx += 1;
4928            }
4929        }
4930    }
4931
4932    Ok((SampleTable {
4933        time_to_sample,
4934        sample_sizes,
4935        sample_offsets,
4936    }, codec_config))
4937}
4938
4939/// Parse Track Header box (tkhd)
4940/// See ISO/IEC 14496-12:2015 § 8.3.2
4941fn read_tkhd<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<u32> {
4942    let version = src.read_u8()?;
4943    let _flags = [src.read_u8()?, src.read_u8()?, src.read_u8()?];
4944
4945    let track_id = if version == 1 {
4946        let _creation_time = be_u64(src)?;
4947        let _modification_time = be_u64(src)?;
4948        let track_id = be_u32(src)?;
4949        let _reserved = be_u32(src)?;
4950        let _duration = be_u64(src)?;
4951        track_id
4952    } else {
4953        let _creation_time = be_u32(src)?;
4954        let _modification_time = be_u32(src)?;
4955        let track_id = be_u32(src)?;
4956        let _reserved = be_u32(src)?;
4957        let _duration = be_u32(src)?;
4958        track_id
4959    };
4960
4961    // Skip rest (reserved, layer, alternate_group, volume, matrix, width, height)
4962    skip_box_remain(src)?;
4963    Ok(track_id)
4964}
4965
4966/// Parse Track Reference box (tref)
4967/// See ISO/IEC 14496-12:2015 § 8.3.3
4968///
4969/// Contains sub-boxes typed by FourCC (e.g., `auxl`, `cdsc`), each with a list of track IDs.
4970fn read_tref<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<TryVec<TrackReference>> {
4971    let mut refs = TryVec::new();
4972    let mut iter = src.box_iter();
4973    while let Some(mut b) = iter.next_box()? {
4974        let reference_type = FourCC::from(u32::from(b.head.name));
4975        let bytes_left = b.bytes_left();
4976        if bytes_left < 4 || bytes_left % 4 != 0 {
4977            skip_box_remain(&mut b)?;
4978            continue;
4979        }
4980        let count = bytes_left / 4;
4981        let mut track_ids = TryVec::new();
4982        for _ in 0..count {
4983            track_ids.push(be_u32(&mut b)?)?;
4984        }
4985        refs.push(TrackReference { reference_type, track_ids })?;
4986    }
4987    Ok(refs)
4988}
4989
4990/// Parse Edit List box (elst) to extract loop count from flags.
4991/// See ISO/IEC 14496-12:2015 § 8.6.6
4992///
4993/// Returns the loop count: flags bit 0 set = infinite looping (0), otherwise 1.
4994fn read_elst<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<u32> {
4995    let (version, flags) = read_fullbox_extra(src)?;
4996
4997    let entry_count = be_u32(src)?;
4998    // Skip all entries
4999    let entry_size: u64 = if version == 1 { 20 } else { 12 };
5000    skip(src, (entry_count as u64).checked_mul(entry_size)
5001        .ok_or(Error::InvalidData("edit list entry count overflow"))?)?;
5002    skip_box_remain(src)?;
5003
5004    // Bit 0 of flags: repeat (1 = infinite loop → loop_count=0, 0 = play once → loop_count=1)
5005    if flags & 1 != 0 {
5006        Ok(0) // infinite
5007    } else {
5008        Ok(1) // play once
5009    }
5010}
5011
5012/// Parse animation from moov box.
5013/// Returns all parsed tracks.
5014fn read_moov<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<TryVec<ParsedTrack>> {
5015    let mut tracks = TryVec::new();
5016
5017    let mut iter = src.box_iter();
5018    while let Some(mut b) = iter.next_box()? {
5019        match b.head.name {
5020            BoxType::MovieHeaderBox => {
5021                let _mvhd = read_mvhd(&mut b)?;
5022            }
5023            BoxType::TrackBox => {
5024                if let Some(track) = read_trak(&mut b)? {
5025                    tracks.push(track)?;
5026                }
5027            }
5028            _ => {
5029                skip_box_remain(&mut b)?;
5030            }
5031        }
5032    }
5033
5034    Ok(tracks)
5035}
5036
5037/// Parse track box (trak).
5038/// Returns a ParsedTrack if this track has a valid sample table.
5039fn read_trak<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<Option<ParsedTrack>> {
5040    let mut track_id = 0u32;
5041    let mut references = TryVec::new();
5042    let mut loop_count = 1u32; // default: play once
5043    let mut mdia_result: Option<(FourCC, u32, SampleTable, TrackCodecConfig)> = None;
5044
5045    let mut iter = src.box_iter();
5046    while let Some(mut b) = iter.next_box()? {
5047        match b.head.name {
5048            BoxType::TrackHeaderBox => {
5049                track_id = read_tkhd(&mut b)?;
5050            }
5051            BoxType::TrackReferenceBox => {
5052                references = read_tref(&mut b)?;
5053            }
5054            BoxType::EditBox => {
5055                // Parse edts to find elst
5056                let mut edts_iter = b.box_iter();
5057                while let Some(mut eb) = edts_iter.next_box()? {
5058                    if eb.head.name == BoxType::EditListBox {
5059                        loop_count = read_elst(&mut eb)?;
5060                    } else {
5061                        skip_box_remain(&mut eb)?;
5062                    }
5063                }
5064            }
5065            BoxType::MediaBox => {
5066                mdia_result = read_mdia(&mut b)?;
5067            }
5068            _ => {
5069                skip_box_remain(&mut b)?;
5070            }
5071        }
5072    }
5073
5074    if let Some((handler_type, media_timescale, sample_table, codec_config)) = mdia_result {
5075        Ok(Some(ParsedTrack {
5076            track_id,
5077            handler_type,
5078            media_timescale,
5079            sample_table,
5080            references,
5081            loop_count,
5082            codec_config,
5083        }))
5084    } else {
5085        Ok(None)
5086    }
5087}
5088
5089/// Parse media box (mdia).
5090/// Returns (handler_type, media_timescale, sample_table, codec_config) if valid.
5091fn read_mdia<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<Option<(FourCC, u32, SampleTable, TrackCodecConfig)>> {
5092    let mut media_timescale = 1000; // default
5093    let mut handler_type = FourCC::default();
5094    let mut stbl_result: Option<(SampleTable, TrackCodecConfig)> = None;
5095
5096    let mut iter = src.box_iter();
5097    while let Some(mut b) = iter.next_box()? {
5098        match b.head.name {
5099            BoxType::MediaHeaderBox => {
5100                let mdhd = read_mdhd(&mut b)?;
5101                media_timescale = mdhd.timescale;
5102            }
5103            BoxType::HandlerBox => {
5104                let hdlr = read_hdlr(&mut b)?;
5105                handler_type = hdlr.handler_type;
5106            }
5107            BoxType::MediaInformationBox => {
5108                stbl_result = read_minf(&mut b)?;
5109            }
5110            _ => {
5111                skip_box_remain(&mut b)?;
5112            }
5113        }
5114    }
5115
5116    if let Some((stbl, codec_config)) = stbl_result {
5117        Ok(Some((handler_type, media_timescale, stbl, codec_config)))
5118    } else {
5119        Ok(None)
5120    }
5121}
5122
5123/// Associate parsed tracks into color + optional alpha animation data.
5124///
5125/// - Color track: first with handler `pict` (fallback: first track with a sample table)
5126/// - Alpha track: handler `auxv` with `tref/auxl` referencing color's track_id
5127/// - Audio tracks (handler `soun`) are skipped
5128fn associate_tracks(tracks: TryVec<ParsedTrack>) -> Result<ParsedAnimationData> {
5129    // Find color track: first with handler_type == "pict"
5130    let color_idx = tracks
5131        .iter()
5132        .position(|t| t.handler_type == b"pict")
5133        .or_else(|| {
5134            // Fallback: first track that isn't audio
5135            tracks.iter().position(|t| t.handler_type != b"soun")
5136        })
5137        .ok_or(Error::InvalidData("no color track found in moov"))?;
5138
5139    let color_track = tracks.get(color_idx)
5140        .ok_or(Error::InvalidData("color track index out of bounds"))?;
5141    let color_track_id = color_track.track_id;
5142
5143    // Find alpha track: handler_type == "auxv" with tref/auxl referencing color track
5144    let alpha_idx = tracks.iter().position(|t| {
5145        t.handler_type == b"auxv"
5146            && t.references.iter().any(|r| {
5147                r.reference_type == b"auxl"
5148                    && r.track_ids.iter().any(|&id| id == color_track_id)
5149            })
5150    });
5151
5152    if let Some(ai) = alpha_idx {
5153        let alpha_track = tracks.get(ai)
5154            .ok_or(Error::InvalidData("alpha track index out of bounds"))?;
5155        let color_track = tracks.get(color_idx)
5156            .ok_or(Error::InvalidData("color track index out of bounds"))?;
5157        let alpha_frames = alpha_track.sample_table.sample_sizes.len();
5158        let color_frames = color_track.sample_table.sample_sizes.len();
5159        if alpha_frames != color_frames {
5160            warn!(
5161                "alpha track has {} frames but color track has {} frames",
5162                alpha_frames, color_frames
5163            );
5164        }
5165    }
5166
5167    // Destructure — we need to consume the vec
5168    // Convert to a std vec so we can remove by index
5169    let mut tracks_vec: std::vec::Vec<ParsedTrack> = tracks.into_iter().collect();
5170
5171    // Remove alpha first if it has a higher index to avoid shifting
5172    let (color_track, alpha_track) = if let Some(ai) = alpha_idx {
5173        if ai > color_idx {
5174            let alpha = tracks_vec.remove(ai);
5175            let color = tracks_vec.remove(color_idx);
5176            (color, Some(alpha))
5177        } else {
5178            let color = tracks_vec.remove(color_idx);
5179            let alpha = tracks_vec.remove(ai);
5180            (color, Some(alpha))
5181        }
5182    } else {
5183        let color = tracks_vec.remove(color_idx);
5184        (color, None)
5185    };
5186
5187    let (alpha_timescale, alpha_sample_table) = match alpha_track {
5188        Some(t) => (Some(t.media_timescale), Some(t.sample_table)),
5189        None => (None, None),
5190    };
5191
5192    Ok(ParsedAnimationData {
5193        color_timescale: color_track.media_timescale,
5194        color_codec_config: color_track.codec_config,
5195        color_sample_table: color_track.sample_table,
5196        alpha_timescale,
5197        alpha_sample_table,
5198        loop_count: color_track.loop_count,
5199    })
5200}
5201
5202/// Parse media information box (minf)
5203fn read_minf<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<Option<(SampleTable, TrackCodecConfig)>> {
5204    let mut iter = src.box_iter();
5205    while let Some(mut b) = iter.next_box()? {
5206        if b.head.name == BoxType::SampleTableBox {
5207            return Ok(Some(read_stbl(&mut b)?));
5208        } else {
5209            skip_box_remain(&mut b)?;
5210        }
5211    }
5212    Ok(None)
5213}
5214
5215/// Extract animation frames using sample table
5216#[cfg(feature = "eager")]
5217#[allow(deprecated)]
5218fn extract_animation_frames(
5219    sample_table: &SampleTable,
5220    media_timescale: u32,
5221    mdats: &mut [MediaDataBox],
5222) -> Result<TryVec<AnimationFrame>> {
5223    let mut frames = TryVec::new();
5224
5225    // Calculate frame durations from time-to-sample
5226    let mut frame_durations = TryVec::new();
5227    for entry in &sample_table.time_to_sample {
5228        for _ in 0..entry.sample_count {
5229            let duration_ms = if media_timescale > 0 {
5230                ((entry.sample_delta as u64) * 1000) / (media_timescale as u64)
5231            } else {
5232                0
5233            };
5234            frame_durations.push(u32::try_from(duration_ms).unwrap_or(u32::MAX))?;
5235        }
5236    }
5237
5238    // Extract each frame using precomputed sample offsets
5239    for i in 0..sample_table.sample_sizes.len() {
5240        let sample_offset = *sample_table.sample_offsets.get(i)
5241            .ok_or(Error::InvalidData("sample offset index out of bounds"))?;
5242        let sample_size = *sample_table.sample_sizes.get(i)
5243            .ok_or(Error::InvalidData("sample size index out of bounds"))?;
5244        let duration_ms = frame_durations.get(i).copied().unwrap_or(0);
5245
5246        let mut frame_data = TryVec::new();
5247        let mut found = false;
5248
5249        for mdat in mdats.iter_mut() {
5250            let range = ExtentRange::WithLength(Range {
5251                start: sample_offset,
5252                end: sample_offset + sample_size as u64,
5253            });
5254
5255            if mdat.contains_extent(&range) {
5256                mdat.read_extent(&range, &mut frame_data)?;
5257                found = true;
5258                break;
5259            }
5260        }
5261
5262        if !found {
5263            log::warn!("Animation frame {} not found in mdat", i);
5264        }
5265
5266        frames.push(AnimationFrame {
5267            data: frame_data,
5268            duration_ms,
5269        })?;
5270    }
5271
5272    Ok(frames)
5273}
5274
5275/// Parse an ImageGrid property box
5276/// See ISO/IEC 23008-12:2017 § 6.6.2.3
5277fn read_grid<T: Read>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<GridConfig> {
5278    let version = read_fullbox_version_no_flags(src, options)?;
5279    if version > 0 {
5280        return Err(Error::Unsupported("grid version > 0"));
5281    }
5282
5283    let flags_byte = src.read_u8()?;
5284    let rows = src.read_u8()?;
5285    let columns = src.read_u8()?;
5286
5287    // flags & 1 determines field size: 0 = 16-bit, 1 = 32-bit
5288    let (output_width, output_height) = if flags_byte & 1 == 0 {
5289        // 16-bit fields
5290        (u32::from(be_u16(src)?), u32::from(be_u16(src)?))
5291    } else {
5292        // 32-bit fields
5293        (be_u32(src)?, be_u32(src)?)
5294    };
5295
5296    Ok(GridConfig {
5297        rows,
5298        columns,
5299        output_width,
5300        output_height,
5301    })
5302}
5303
5304/// Parse an item location box inside a meta box
5305/// See ISO 14496-12:2015 § 8.11.3
5306fn read_iloc<T: Read>(src: &mut BMFFBox<'_, T>, options: &ParseOptions) -> Result<TryVec<ItemLocationBoxItem>> {
5307    let version: IlocVersion = read_fullbox_version_no_flags(src, options)?.try_into()?;
5308
5309    let iloc = src.read_into_try_vec()?;
5310    let mut iloc = BitReader::new(&iloc);
5311
5312    let offset_size: IlocFieldSize = iloc.read_u8(4)?.try_into()?;
5313    let length_size: IlocFieldSize = iloc.read_u8(4)?.try_into()?;
5314    let base_offset_size: IlocFieldSize = iloc.read_u8(4)?.try_into()?;
5315
5316    let index_size: Option<IlocFieldSize> = match version {
5317        IlocVersion::One | IlocVersion::Two => Some(iloc.read_u8(4)?.try_into()?),
5318        IlocVersion::Zero => {
5319            let _reserved = iloc.read_u8(4)?;
5320            None
5321        },
5322    };
5323
5324    let item_count = match version {
5325        IlocVersion::Zero | IlocVersion::One => iloc.read_u32(16)?,
5326        IlocVersion::Two => iloc.read_u32(32)?,
5327    };
5328
5329    // Cap pre-allocation: item_count is untrusted, actual data is bounded by bitstream
5330    let mut items = TryVec::with_capacity(item_count.to_usize().min(4096))?;
5331
5332    for _ in 0..item_count {
5333        let item_id = match version {
5334            IlocVersion::Zero | IlocVersion::One => iloc.read_u32(16)?,
5335            IlocVersion::Two => iloc.read_u32(32)?,
5336        };
5337
5338        // The spec isn't entirely clear how an `iloc` should be interpreted for version 0,
5339        // which has no `construction_method` field. It does say:
5340        // "For maximum compatibility, version 0 of this box should be used in preference to
5341        //  version 1 with `construction_method==0`, or version 2 when possible."
5342        // We take this to imply version 0 can be interpreted as using file offsets.
5343        let construction_method = match version {
5344            IlocVersion::Zero => ConstructionMethod::File,
5345            IlocVersion::One | IlocVersion::Two => {
5346                let _reserved = iloc.read_u16(12)?;
5347                match iloc.read_u16(4)? {
5348                    0 => ConstructionMethod::File,
5349                    1 => ConstructionMethod::Idat,
5350                    2 => return Err(Error::Unsupported("construction_method 'item_offset' is not supported")),
5351                    _ => return Err(Error::InvalidData("construction_method is taken from the set 0, 1 or 2 per ISO 14496-12:2015 § 8.11.3.3")),
5352                }
5353            },
5354        };
5355
5356        let data_reference_index = iloc.read_u16(16)?;
5357
5358        if data_reference_index != 0 {
5359            return Err(Error::Unsupported("external file references (iloc.data_reference_index != 0) are not supported"));
5360        }
5361
5362        let base_offset = iloc.read_u64(base_offset_size.to_bits())?;
5363        let extent_count = iloc.read_u16(16)?;
5364
5365        if extent_count < 1 {
5366            return Err(Error::InvalidData("extent_count must have a value 1 or greater per ISO 14496-12:2015 § 8.11.3.3"));
5367        }
5368
5369        let mut extents = TryVec::with_capacity(extent_count.to_usize())?;
5370
5371        for _ in 0..extent_count {
5372            // Parsed but currently ignored, see `ItemLocationBoxExtent`
5373            let _extent_index = match &index_size {
5374                None | Some(IlocFieldSize::Zero) => None,
5375                Some(index_size) => {
5376                    debug_assert!(version == IlocVersion::One || version == IlocVersion::Two);
5377                    Some(iloc.read_u64(index_size.to_bits())?)
5378                },
5379            };
5380
5381            // Per ISO 14496-12:2015 § 8.11.3.1:
5382            // "If the offset is not identified (the field has a length of zero), then the
5383            //  beginning of the source (offset 0) is implied"
5384            // This behavior will follow from BitReader::read_u64(0) -> 0.
5385            let extent_offset = iloc.read_u64(offset_size.to_bits())?;
5386            let extent_length = iloc.read_u64(length_size.to_bits())?;
5387
5388            // "If the length is not specified, or specified as zero, then the entire length of
5389            //  the source is implied" (ibid)
5390            let start = base_offset
5391                .checked_add(extent_offset)
5392                .ok_or(Error::InvalidData("offset calculation overflow"))?;
5393            let extent_range = if extent_length == 0 {
5394                ExtentRange::ToEnd(RangeFrom { start })
5395            } else {
5396                let end = start
5397                    .checked_add(extent_length)
5398                    .ok_or(Error::InvalidData("end calculation overflow"))?;
5399                ExtentRange::WithLength(Range { start, end })
5400            };
5401
5402            extents.push(ItemLocationBoxExtent { extent_range })?;
5403        }
5404
5405        items.push(ItemLocationBoxItem { item_id, construction_method, extents })?;
5406    }
5407
5408    if iloc.remaining() == 0 {
5409        Ok(items)
5410    } else {
5411        Err(Error::InvalidData("invalid iloc size"))
5412    }
5413}
5414
5415/// Parse an ftyp box.
5416/// See ISO 14496-12:2015 § 4.3
5417fn read_ftyp<T: Read>(src: &mut BMFFBox<'_, T>) -> Result<FileTypeBox> {
5418    let major = be_u32(src)?;
5419    let minor = be_u32(src)?;
5420    let bytes_left = src.bytes_left();
5421    if !bytes_left.is_multiple_of(4) {
5422        return Err(Error::InvalidData("invalid ftyp size"));
5423    }
5424    // Is a brand_count of zero valid?
5425    let brand_count = bytes_left / 4;
5426    let mut brands = TryVec::with_capacity(brand_count.try_into()?)?;
5427    for _ in 0..brand_count {
5428        brands.push(be_u32(src)?.into())?;
5429    }
5430    Ok(FileTypeBox {
5431        major_brand: From::from(major),
5432        minor_version: minor,
5433        compatible_brands: brands,
5434    })
5435}
5436
5437#[cfg_attr(debug_assertions, track_caller)]
5438fn check_parser_state<T>(header: &BoxHeader, left: &Take<T>) -> Result<(), Error> {
5439    let limit = left.limit();
5440    // Allow fully consumed boxes, or size=0 boxes (where original size was u64::MAX)
5441    if limit == 0 || header.size == u64::MAX {
5442        Ok(())
5443    } else {
5444        Err(Error::InvalidData("unread box content or bad parser sync"))
5445    }
5446}
5447
5448/// Skip a number of bytes that we don't care to parse.
5449fn skip<T: Read>(src: &mut T, bytes: u64) -> Result<()> {
5450    std::io::copy(&mut src.take(bytes), &mut std::io::sink())?;
5451    Ok(())
5452}
5453
5454fn be_u16<T: ReadBytesExt>(src: &mut T) -> Result<u16> {
5455    src.read_u16::<byteorder::BigEndian>().map_err(From::from)
5456}
5457
5458fn be_u32<T: ReadBytesExt>(src: &mut T) -> Result<u32> {
5459    src.read_u32::<byteorder::BigEndian>().map_err(From::from)
5460}
5461
5462fn be_i32<T: ReadBytesExt>(src: &mut T) -> Result<i32> {
5463    src.read_i32::<byteorder::BigEndian>().map_err(From::from)
5464}
5465
5466fn be_u64<T: ReadBytesExt>(src: &mut T) -> Result<u64> {
5467    src.read_u64::<byteorder::BigEndian>().map_err(From::from)
5468}