Skip to main content

nv_frame/
frame.rs

1//! Core frame types: [`FrameEnvelope`], [`PixelFormat`], [`Residency`], [`DataAccess`],
2//! and pixel data.
3
4use std::any::Any;
5use std::borrow::Cow;
6use std::sync::{Arc, OnceLock};
7
8use nv_core::{FeedId, MonotonicTs, TypedMetadata, WallTs};
9
10/// Pixel format of the decoded frame.
11#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
12pub enum PixelFormat {
13    /// 8-bit RGB, 3 bytes per pixel.
14    Rgb8,
15    /// 8-bit BGR, 3 bytes per pixel.
16    Bgr8,
17    /// 8-bit RGBA, 4 bytes per pixel.
18    Rgba8,
19    /// NV12 (Y plane + interleaved UV), 12 bits per pixel.
20    Nv12,
21    /// I420 (Y + U + V planes), 12 bits per pixel.
22    I420,
23    /// 8-bit grayscale, 1 byte per pixel.
24    Gray8,
25}
26
27impl PixelFormat {
28    /// Bytes per pixel for packed formats.
29    ///
30    /// Returns `None` for planar formats (NV12, I420) where the concept
31    /// of "bytes per pixel" is ambiguous.
32    #[must_use]
33    pub fn bytes_per_pixel(&self) -> Option<u32> {
34        match self {
35            Self::Rgb8 | Self::Bgr8 => Some(3),
36            Self::Rgba8 => Some(4),
37            Self::Gray8 => Some(1),
38            Self::Nv12 | Self::I420 => None,
39        }
40    }
41}
42
43/// Where the frame's pixel data physically resides.
44///
45/// `Residency` describes **storage location**, not access semantics.
46/// A host-resident frame stores pixel bytes in CPU-accessible memory.
47/// A device-resident frame stores data on an accelerator (GPU, NPU,
48/// DMA‑buf, etc.) where host-readable bytes are not directly available.
49///
50/// # Access pattern
51///
52/// The canonical way to handle frames in CPU/GPU-mixed pipelines:
53///
54/// ```ignore
55/// match frame.data_access() {
56///     DataAccess::HostReadable => {
57///         let bytes = frame.host_data().unwrap();
58///     }
59///     DataAccess::MappableToHost => {
60///         let pixels = frame.require_host_data()?;
61///     }
62///     DataAccess::Opaque => {
63///         let handle = frame.accelerated_handle::<MyBuffer>().unwrap();
64///     }
65///     _ => {}
66/// }
67/// ```
68///
69/// For pure CPU stages, [`FrameEnvelope::require_host_data()`] handles
70/// all three cases with a single call.
71///
72/// # Relationship to [`DataAccess`]
73///
74/// `Residency` describes **where** the data lives. [`DataAccess`] describes
75/// **what host-access is available**:
76///
77/// | `Residency` | `DataAccess` | Meaning |
78/// |---|---|---|
79/// | `Host` | `HostReadable` | CPU bytes directly available |
80/// | `Device` | `MappableToHost` | Device buffer, host-downloadable |
81/// | `Device` | `Opaque` | Device buffer, no host path |
82///
83/// Use [`FrameEnvelope::data_access()`] when you need the finer-grained
84/// access classification.
85///
86/// # Adapter crates
87///
88/// This enum deliberately does not name any vendor or backend. Concrete
89/// buffer types (CUDA buffers, OpenCL images, DMA‑buf fds, …) are defined
90/// by adapter crates and stored behind a type‑erased handle accessible via
91/// [`FrameEnvelope::accelerated_handle()`].
92#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
93pub enum Residency {
94    /// Pixel data is in CPU-accessible memory.
95    ///
96    /// [`FrameEnvelope::host_data()`] returns `Some(&[u8])`.
97    Host,
98    /// Pixel data resides on an accelerator device.
99    ///
100    /// Host-readable bytes may or may not be obtainable —
101    /// check [`FrameEnvelope::data_access()`] for details.
102    /// Use [`FrameEnvelope::accelerated_handle()`] to obtain the
103    /// type-erased handle.
104    Device,
105}
106
107/// What host-access is available for a frame's pixel data.
108///
109/// This is the finer-grained companion to [`Residency`]. Where
110/// `Residency` says **where** the data lives, `DataAccess` says
111/// **how** a CPU consumer can (or cannot) obtain host bytes.
112///
113/// Retrieve via [`FrameEnvelope::data_access()`].
114///
115/// # Canonical usage
116///
117/// ```ignore
118/// match frame.data_access() {
119///     DataAccess::HostReadable => {
120///         let bytes = frame.host_data().unwrap();
121///     }
122///     DataAccess::MappableToHost => {
123///         let pixels = frame.require_host_data()?;
124///     }
125///     DataAccess::Opaque => {
126///         let handle = frame.accelerated_handle::<MyAccelBuf>();
127///     }
128///     _ => { /* forward-compatible: handle additional access classes */ }
129/// }
130/// ```
131#[non_exhaustive]
132#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
133pub enum DataAccess {
134    /// Host-readable bytes are directly available (zero-copy borrow).
135    HostReadable,
136    /// Device-resident, but host materialization is available.
137    ///
138    /// The first call to [`FrameEnvelope::require_host_data()`] invokes
139    /// the backend materializer and caches the result in the frame's
140    /// `Arc`-shared inner state. Subsequent calls return a zero-copy
141    /// borrow of the cached bytes.
142    MappableToHost,
143    /// Opaque accelerated data; no host view is guaranteed.
144    ///
145    /// [`FrameEnvelope::require_host_data()`] returns
146    /// [`FrameAccessError::NotHostAccessible`].
147    Opaque,
148}
149
150/// Errors from frame data access operations.
151///
152/// Returned by [`FrameEnvelope::require_host_data()`] when host bytes
153/// cannot be obtained.
154#[derive(Debug, Clone, thiserror::Error)]
155pub enum FrameAccessError {
156    /// The frame is device-resident with no host materialization path.
157    #[error("frame is not host-accessible (opaque device-resident data)")]
158    NotHostAccessible,
159
160    /// A host-materialization path exists but the transfer failed.
161    #[error("host materialization failed: {detail}")]
162    MaterializationFailed {
163        /// Backend-provided failure description.
164        detail: String,
165    },
166}
167
168/// A function that materializes device-resident pixel data to host memory.
169///
170/// Adapter crates provide this when constructing device frames that support
171/// host fallback (e.g., GPU buffers that can be mapped or downloaded).
172///
173/// Called by [`FrameEnvelope::require_host_data()`] on
174/// [`DataAccess::MappableToHost`] frames. The result is automatically
175/// cached per frame — only the first call invokes the closure.
176pub type HostMaterializeFn = Box<dyn Fn() -> Result<HostBytes, FrameAccessError> + Send + Sync>;
177
178/// Host-readable bytes produced by materializing device-resident data.
179///
180/// Adapter crates return this from their [`HostMaterializeFn`] closures.
181/// Two construction paths are available:
182///
183/// - [`HostBytes::from_vec`] — owned copy (e.g., device-to-host download).
184/// - [`HostBytes::from_mapped`] — zero-copy mapped view with a lifetime
185///   guard (e.g., a memory-mapped device buffer).
186pub struct HostBytes {
187    repr: HostBytesRepr,
188}
189
190enum HostBytesRepr {
191    Owned(Vec<u8>),
192    Mapped {
193        ptr: *const u8,
194        len: usize,
195        _guard: Box<dyn Any + Send + Sync>,
196    },
197}
198
199// SAFETY: The `Mapped` variant holds an immutable `*const u8` into a buffer
200// whose lifetime is managed by `_guard` (a `Send + Sync` trait-object box).
201// The [`HostBytes::from_mapped`] contract requires the pointer to remain
202// valid and immutable while the guard is alive — the same invariants as
203// `PixelData::Mapped`.
204unsafe impl Send for HostBytes {}
205unsafe impl Sync for HostBytes {}
206
207impl HostBytes {
208    /// Create host bytes from an owned allocation.
209    #[must_use]
210    pub fn from_vec(data: Vec<u8>) -> Self {
211        Self {
212            repr: HostBytesRepr::Owned(data),
213        }
214    }
215
216    /// Create host bytes from a zero-copy mapped view.
217    ///
218    /// # Safety
219    ///
220    /// - `ptr` must point to at least `len` readable bytes.
221    /// - `guard` must keep the underlying buffer alive.
222    /// - The data at `ptr` must not be mutated while `guard` exists.
223    #[must_use]
224    pub unsafe fn from_mapped(
225        ptr: *const u8,
226        len: usize,
227        guard: Box<dyn Any + Send + Sync>,
228    ) -> Self {
229        Self {
230            repr: HostBytesRepr::Mapped {
231                ptr,
232                len,
233                _guard: guard,
234            },
235        }
236    }
237}
238
239impl AsRef<[u8]> for HostBytes {
240    fn as_ref(&self) -> &[u8] {
241        match &self.repr {
242            HostBytesRepr::Owned(v) => v.as_slice(),
243            // SAFETY: constructor contract guarantees validity while guard lives.
244            HostBytesRepr::Mapped { ptr, len, .. } => unsafe {
245                std::slice::from_raw_parts(*ptr, *len)
246            },
247        }
248    }
249}
250
251impl std::ops::Deref for HostBytes {
252    type Target = [u8];
253    fn deref(&self) -> &[u8] {
254        self.as_ref()
255    }
256}
257
258impl std::fmt::Debug for HostBytes {
259    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
260        match &self.repr {
261            HostBytesRepr::Owned(v) => write!(f, "HostBytes::Owned({} bytes)", v.len()),
262            HostBytesRepr::Mapped { len, .. } => write!(f, "HostBytes::Mapped({len} bytes)"),
263        }
264    }
265}
266
267/// Opaque guard that keeps a zero-copy memory mapping alive.
268///
269/// When this guard drops, the underlying buffer (e.g., a GStreamer mapped buffer)
270/// is released. The concrete type is set by `nv-media` and erased here to avoid
271/// exposing GStreamer types.
272pub(crate) type PinGuard = Box<dyn Any + Send + Sync>;
273
274/// How pixel data is stored.
275pub(crate) enum PixelData {
276    /// Zero-copy: pointer into a memory-mapped buffer (e.g., GStreamer).
277    /// The `_guard` holds the mapping alive; dropping it releases the buffer.
278    Mapped {
279        ptr: *const u8,
280        len: usize,
281        _guard: PinGuard,
282    },
283    /// Owned copy — used for synthetic/test frames or when zero-copy isn't available.
284    Owned(Vec<u8>),
285    /// Device-resident data (GPU, NPU, DMA-buf, etc.).
286    ///
287    /// The handle is type-erased; backend adapter crates define the
288    /// concrete type and downcast via [`FrameEnvelope::accelerated_handle()`].
289    Device {
290        handle: Arc<dyn Any + Send + Sync>,
291        /// Optional host-materialization callback.
292        ///
293        /// When present, [`FrameEnvelope::require_host_data()`] can
294        /// download device data to a host `Vec<u8>`.
295        materialize: Option<HostMaterializeFn>,
296    },
297}
298
299// SAFETY: `Mapped` variant holds an immutable pointer (`*const u8`) into a
300// buffer whose lifetime is managed by `_guard` (a trait-object box that is
301// `Send + Sync`). The contract on `new_mapped()` requires:
302//   1. The pointer remains valid as long as the guard lives.
303//   2. The data at the pointer is never mutated while any `FrameEnvelope`
304//      clone referencing it exists.
305// These invariants ensure the pointee is `Sync`-safe (immutable shared data)
306// and `Send`-safe (the guard is Send).
307//
308// `Device` variant holds an `Arc<dyn Any + Send + Sync>` which is
309// inherently `Send + Sync`.
310unsafe impl Send for PixelData {}
311unsafe impl Sync for PixelData {}
312
313/// Internal frame data behind the `Arc`.
314pub(crate) struct FrameInner {
315    pub feed_id: FeedId,
316    pub seq: u64,
317    pub ts: MonotonicTs,
318    pub wall_ts: WallTs,
319    pub width: u32,
320    pub height: u32,
321    pub format: PixelFormat,
322    pub stride: u32,
323    pub data: PixelData,
324    pub metadata: TypedMetadata,
325    /// Lazily cached host bytes for [`DataAccess::MappableToHost`] frames.
326    /// Never touched by host-resident frames (the fast path bypasses it).
327    ///
328    /// # Memory note
329    ///
330    /// The cache lives inside the `Arc`-shared inner state, so it persists
331    /// as long as **any** clone of this [`FrameEnvelope`] is alive. In
332    /// fan-out topologies (e.g., multiple subscribers receiving clones of
333    /// the same frame) the materialized bytes remain allocated until the
334    /// last clone drops. Operators should account for this when sizing
335    /// queue depths and subscriber counts on feeds with large device frames.
336    pub host_cache: OnceLock<Result<HostBytes, FrameAccessError>>,
337}
338
339/// An immutable, ref-counted video frame.
340///
341/// `Clone` is cheap — it bumps the `Arc` reference count. Pixel data is
342/// never copied between stages after construction.
343///
344/// Construct via [`FrameEnvelope::new_owned`] for test/synthetic frames,
345/// or via the zero-copy bridge in `nv-media` for live streams.
346#[derive(Clone)]
347pub struct FrameEnvelope {
348    pub(crate) inner: Arc<FrameInner>,
349}
350
351impl FrameEnvelope {
352    /// Create a frame with owned pixel data.
353    ///
354    /// This copies the data into the frame. Use the zero-copy bridge
355    /// in `nv-media` for production paths.
356    #[must_use]
357    #[allow(clippy::too_many_arguments)]
358    pub fn new_owned(
359        feed_id: FeedId,
360        seq: u64,
361        ts: MonotonicTs,
362        wall_ts: WallTs,
363        width: u32,
364        height: u32,
365        format: PixelFormat,
366        stride: u32,
367        data: Vec<u8>,
368        metadata: TypedMetadata,
369    ) -> Self {
370        Self {
371            inner: Arc::new(FrameInner {
372                feed_id,
373                seq,
374                ts,
375                wall_ts,
376                width,
377                height,
378                format,
379                stride,
380                data: PixelData::Owned(data),
381                metadata,
382                host_cache: OnceLock::new(),
383            }),
384        }
385    }
386
387    /// Create a frame backed by a device-resident accelerated buffer.
388    ///
389    /// The handle is type-erased and stored behind an `Arc`. Backend
390    /// adapter crates create the concrete handle type; stages that
391    /// understand it recover it via
392    /// [`accelerated_handle()`](Self::accelerated_handle).
393    ///
394    /// ## Host materialization
395    ///
396    /// If the backend can download device data to host memory, pass a
397    /// `Some(materialize)` closure. This promotes the frame's
398    /// [`DataAccess`] from [`Opaque`](DataAccess::Opaque) to
399    /// [`MappableToHost`](DataAccess::MappableToHost), enabling CPU
400    /// consumers to call [`require_host_data()`](Self::require_host_data).
401    ///
402    /// Pass `None` when host fallback is unavailable or undesirable.
403    ///
404    /// ## Geometry fields
405    ///
406    /// `width`, `height`, `format`, and `stride` still describe the logical
407    /// image layout — they are meaningful metadata even when the raw bytes
408    /// are not host-accessible.
409    ///
410    /// ## Intended uses for the opaque handle
411    ///
412    /// - Accelerated decode buffers (hardware-decoded video surfaces)
413    /// - GPU tensors destined for inference
414    /// - Accelerator-native frame storage (DMA-buf, OpenCL images, …)
415    ///
416    /// The handle must **not** be used for general stage metadata or
417    /// cross-stage messaging. Use [`TypedMetadata`] for those purposes.
418    #[must_use]
419    #[allow(clippy::too_many_arguments)]
420    pub fn new_device(
421        feed_id: FeedId,
422        seq: u64,
423        ts: MonotonicTs,
424        wall_ts: WallTs,
425        width: u32,
426        height: u32,
427        format: PixelFormat,
428        stride: u32,
429        handle: Arc<dyn Any + Send + Sync>,
430        materialize: Option<HostMaterializeFn>,
431        metadata: TypedMetadata,
432    ) -> Self {
433        Self {
434            inner: Arc::new(FrameInner {
435                feed_id,
436                seq,
437                ts,
438                wall_ts,
439                width,
440                height,
441                format,
442                stride,
443                data: PixelData::Device {
444                    handle,
445                    materialize,
446                },
447                metadata,
448                host_cache: OnceLock::new(),
449            }),
450        }
451    }
452
453    /// Create a frame with a zero-copy mapped buffer.
454    ///
455    /// # Safety
456    ///
457    /// - `ptr` must point to `len` readable bytes.
458    /// - The `guard` must keep the underlying buffer alive for the lifetime
459    ///   of this frame (and all its clones).
460    /// - The data at `ptr` must not be mutated while any clone of this frame exists.
461    #[must_use]
462    #[allow(clippy::too_many_arguments)]
463    pub unsafe fn new_mapped(
464        feed_id: FeedId,
465        seq: u64,
466        ts: MonotonicTs,
467        wall_ts: WallTs,
468        width: u32,
469        height: u32,
470        format: PixelFormat,
471        stride: u32,
472        ptr: *const u8,
473        len: usize,
474        guard: PinGuard,
475        metadata: TypedMetadata,
476    ) -> Self {
477        Self {
478            inner: Arc::new(FrameInner {
479                feed_id,
480                seq,
481                ts,
482                wall_ts,
483                width,
484                height,
485                format,
486                stride,
487                data: PixelData::Mapped {
488                    ptr,
489                    len,
490                    _guard: guard,
491                },
492                metadata,
493                host_cache: OnceLock::new(),
494            }),
495        }
496    }
497
498    /// The feed this frame originated from.
499    #[must_use]
500    pub fn feed_id(&self) -> FeedId {
501        self.inner.feed_id
502    }
503
504    /// Monotonic frame counter within this feed session.
505    #[must_use]
506    pub fn seq(&self) -> u64 {
507        self.inner.seq
508    }
509
510    /// Monotonic timestamp (nanoseconds since feed start).
511    #[must_use]
512    pub fn ts(&self) -> MonotonicTs {
513        self.inner.ts
514    }
515
516    /// Wall-clock timestamp (for output/provenance only).
517    #[must_use]
518    pub fn wall_ts(&self) -> WallTs {
519        self.inner.wall_ts
520    }
521
522    /// Frame width in pixels.
523    #[must_use]
524    pub fn width(&self) -> u32 {
525        self.inner.width
526    }
527
528    /// Frame height in pixels.
529    #[must_use]
530    pub fn height(&self) -> u32 {
531        self.inner.height
532    }
533
534    /// Pixel format of the decoded frame.
535    #[must_use]
536    pub fn format(&self) -> PixelFormat {
537        self.inner.format
538    }
539
540    /// Row stride in bytes.
541    #[must_use]
542    pub fn stride(&self) -> u32 {
543        self.inner.stride
544    }
545
546    /// Where this frame's pixel data physically resides.
547    ///
548    /// For a finer-grained classification that distinguishes mappable
549    /// device frames from opaque ones, see [`data_access()`](Self::data_access).
550    ///
551    /// ```ignore
552    /// match frame.residency() {
553    ///     Residency::Host   => { /* use host_data() */ }
554    ///     Residency::Device => { /* use accelerated_handle::<T>() */ }
555    /// }
556    /// ```
557    #[must_use]
558    pub fn residency(&self) -> Residency {
559        match &self.inner.data {
560            PixelData::Owned(_) | PixelData::Mapped { .. } => Residency::Host,
561            PixelData::Device { .. } => Residency::Device,
562        }
563    }
564
565    /// What host-access is available for this frame's pixel data.
566    ///
567    /// This is the finer-grained companion to [`residency()`](Self::residency):
568    ///
569    /// | Return value | Meaning |
570    /// |---|---|
571    /// | [`HostReadable`](DataAccess::HostReadable) | [`host_data()`](Self::host_data) returns `Some`. |
572    /// | [`MappableToHost`](DataAccess::MappableToHost) | [`require_host_data()`](Self::require_host_data) will materialize. |
573    /// | [`Opaque`](DataAccess::Opaque) | No host path available. |
574    #[must_use]
575    pub fn data_access(&self) -> DataAccess {
576        match &self.inner.data {
577            PixelData::Owned(_) | PixelData::Mapped { .. } => DataAccess::HostReadable,
578            PixelData::Device {
579                materialize: Some(_),
580                ..
581            } => DataAccess::MappableToHost,
582            PixelData::Device {
583                materialize: None, ..
584            } => DataAccess::Opaque,
585        }
586    }
587
588    /// Whether host-readable pixel bytes are directly available.
589    ///
590    /// Equivalent to `self.data_access() == DataAccess::HostReadable`.
591    /// Note: returns `false` for [`DataAccess::MappableToHost`] frames —
592    /// use [`require_host_data()`](Self::require_host_data) to obtain
593    /// host bytes from those frames.
594    #[must_use]
595    pub fn is_host_readable(&self) -> bool {
596        !matches!(&self.inner.data, PixelData::Device { .. })
597    }
598
599    /// Host-readable pixel bytes, if available.
600    ///
601    /// Returns `Some(&[u8])` for host-resident frames ([`Residency::Host`]),
602    /// `None` for device-resident frames ([`Residency::Device`]).
603    ///
604    /// This is the **zero-cost** accessor for the hot path when frames are
605    /// known to be host-resident. For a fallback-aware accessor that can
606    /// materialize device data, see [`require_host_data()`](Self::require_host_data).
607    #[must_use]
608    pub fn host_data(&self) -> Option<&[u8]> {
609        match &self.inner.data {
610            PixelData::Owned(v) => Some(v.as_slice()),
611            // SAFETY: the PinGuard keeps the buffer alive and immutable.
612            PixelData::Mapped { ptr, len, .. } => {
613                Some(unsafe { std::slice::from_raw_parts(*ptr, *len) })
614            }
615            PixelData::Device { .. } => None,
616        }
617    }
618
619    /// Obtain host-readable bytes, materializing from device if needed.
620    ///
621    /// This is the **primary ergonomic accessor** for CPU consumers that
622    /// may receive either host-resident or device-resident frames.
623    ///
624    /// | Frame kind | First call | Subsequent calls |
625    /// |---|---|---|
626    /// | Host-resident | `Cow::Borrowed` (zero-copy) | Same |
627    /// | Device + materializer | `Cow::Borrowed` (materialize → cache) | `Cow::Borrowed` (cached) |
628    /// | Device, opaque | `Err(NotHostAccessible)` | Same |
629    ///
630    /// # Memoization
631    ///
632    /// For [`DataAccess::MappableToHost`] frames, the first call invokes
633    /// the backend's [`HostMaterializeFn`] and caches the result in the
634    /// frame's `Arc`-shared inner state. All subsequent calls (including
635    /// from clones of this frame) return a zero-copy borrow of the cache.
636    ///
637    /// **Failures are also cached**: if the materializer returns an error,
638    /// that error is retained and cloned on subsequent calls. Frame data
639    /// is immutable, so a transfer that fails for a given frame will not
640    /// succeed on retry.
641    ///
642    /// # Performance
643    ///
644    /// - **Host-resident frames**: the `OnceLock` cache is never touched;
645    ///   this returns a direct borrow with zero overhead.
646    /// - **First materialization**: may allocate and/or block (backend-dependent).
647    /// - **Cached access**: zero-copy borrow from the cached [`HostBytes`].
648    ///
649    /// # Example: CPU-only stage
650    ///
651    /// ```ignore
652    /// let pixels = frame.require_host_data()
653    ///     .map_err(|e| StageError::ProcessingFailed {
654    ///         stage_id: MY_STAGE,
655    ///         detail: e.to_string(),
656    ///     })?;
657    /// process_cpu(&pixels);
658    /// ```
659    pub fn require_host_data(&self) -> Result<Cow<'_, [u8]>, FrameAccessError> {
660        match &self.inner.data {
661            PixelData::Owned(v) => Ok(Cow::Borrowed(v.as_slice())),
662            // SAFETY: the PinGuard keeps the buffer alive and immutable.
663            PixelData::Mapped { ptr, len, .. } => Ok(Cow::Borrowed(unsafe {
664                std::slice::from_raw_parts(*ptr, *len)
665            })),
666            PixelData::Device {
667                materialize: Some(f),
668                ..
669            } => {
670                let cached = self.inner.host_cache.get_or_init(|| {
671                    match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) {
672                        Ok(result) => result,
673                        Err(payload) => {
674                            let detail = match payload.downcast_ref::<&str>() {
675                                Some(s) => (*s).to_owned(),
676                                None => match payload.downcast_ref::<String>() {
677                                    Some(s) => s.clone(),
678                                    None => "unknown panic in host materializer".to_owned(),
679                                },
680                            };
681                            Err(FrameAccessError::MaterializationFailed { detail })
682                        }
683                    }
684                });
685                match cached {
686                    Ok(bytes) => Ok(Cow::Borrowed(bytes.as_ref())),
687                    Err(e) => Err(e.clone()),
688                }
689            }
690            PixelData::Device {
691                materialize: None, ..
692            } => Err(FrameAccessError::NotHostAccessible),
693        }
694    }
695
696    /// Downcast the opaque accelerated handle to a concrete type `T`.
697    ///
698    /// Returns `Some(&T)` if the frame is device-resident and the handle
699    /// is of type `T`. Returns `None` for host-resident frames or if the
700    /// concrete type does not match.
701    ///
702    /// # Intended uses
703    ///
704    /// The accelerated handle is intended **only** for:
705    ///
706    /// - Backend adapter crates bridging accelerated decode buffers
707    /// - GPU tensors destined for inference
708    /// - Accelerator-native frame storage (DMA-buf, OpenCL images, …)
709    ///
710    /// It must **not** be used for general stage metadata, arbitrary
711    /// payload storage, or cross-stage messaging. Use
712    /// [`TypedMetadata`] for those purposes.
713    ///
714    /// # Example
715    ///
716    /// ```ignore
717    /// match frame.data_access() {
718    ///     DataAccess::HostReadable => {
719    ///         let bytes = frame.host_data().unwrap();
720    ///         run_on_cpu(bytes);
721    ///     }
722    ///     DataAccess::MappableToHost | DataAccess::Opaque => {
723    ///         let buf = frame.accelerated_handle::<MyAccelBuffer>()
724    ///             .expect("expected MyAccelBuffer");
725    ///         run_on_device(buf);
726    ///     }
727    ///     _ => {}
728    /// }
729    /// ```
730    #[must_use]
731    pub fn accelerated_handle<T: Send + Sync + 'static>(&self) -> Option<&T> {
732        match &self.inner.data {
733            PixelData::Device { handle, .. } => handle.downcast_ref::<T>(),
734            _ => None,
735        }
736    }
737
738    /// Reference to per-frame metadata.
739    #[must_use]
740    pub fn metadata(&self) -> &TypedMetadata {
741        &self.inner.metadata
742    }
743}
744
745impl std::fmt::Debug for FrameEnvelope {
746    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
747        f.debug_struct("FrameEnvelope")
748            .field("feed_id", &self.inner.feed_id)
749            .field("seq", &self.inner.seq)
750            .field("ts", &self.inner.ts)
751            .field("dims", &(self.inner.width, self.inner.height))
752            .field("format", &self.inner.format)
753            .field("residency", &self.residency())
754            .field("data_access", &self.data_access())
755            .finish()
756    }
757}
758
759#[cfg(test)]
760mod tests {
761    use super::*;
762
763    fn test_frame() -> FrameEnvelope {
764        FrameEnvelope::new_owned(
765            FeedId::new(1),
766            0,
767            MonotonicTs::ZERO,
768            WallTs::from_micros(0),
769            320,
770            240,
771            PixelFormat::Rgb8,
772            960,
773            vec![0u8; 320 * 240 * 3],
774            TypedMetadata::new(),
775        )
776    }
777
778    #[test]
779    fn clone_is_cheap() {
780        let f1 = test_frame();
781        let f2 = f1.clone();
782        let d1 = f1.host_data().unwrap();
783        let d2 = f2.host_data().unwrap();
784        assert!(std::ptr::eq(d1.as_ptr(), d2.as_ptr()));
785    }
786
787    #[test]
788    fn accessors() {
789        let f = test_frame();
790        assert_eq!(f.width(), 320);
791        assert_eq!(f.height(), 240);
792        assert_eq!(f.format(), PixelFormat::Rgb8);
793        assert_eq!(f.host_data().unwrap().len(), 320 * 240 * 3);
794    }
795
796    // -- Send/Sync invariant tests --
797
798    /// Compile-time assertion: `PixelData` is `Send`.
799    const _: () = {
800        const fn assert_send<T: Send>() {}
801        assert_send::<PixelData>();
802    };
803
804    /// Compile-time assertion: `PixelData` is `Sync`.
805    const _: () = {
806        const fn assert_sync<T: Sync>() {}
807        assert_sync::<PixelData>();
808    };
809
810    /// Compile-time assertion: `FrameEnvelope` is `Send + Sync`.
811    const _: () = {
812        const fn assert_send_sync<T: Send + Sync>() {}
813        assert_send_sync::<FrameEnvelope>();
814    };
815
816    /// Runtime test: owned frames can be sent across threads.
817    #[test]
818    fn frame_is_send_across_threads() {
819        let f = test_frame();
820        let handle = std::thread::spawn(move || {
821            assert_eq!(f.width(), 320);
822            f
823        });
824        let f = handle.join().unwrap();
825        assert_eq!(f.height(), 240);
826    }
827
828    /// Runtime test: cloned frames can be shared across threads (Sync).
829    #[test]
830    fn frame_is_sync_across_threads() {
831        let f = Arc::new(test_frame());
832        let f2 = Arc::clone(&f);
833        let handle = std::thread::spawn(move || {
834            assert_eq!(f2.host_data().unwrap().len(), 320 * 240 * 3);
835        });
836        assert_eq!(f.width(), 320);
837        handle.join().unwrap();
838    }
839
840    /// Runtime test: mapped frames maintain invariants across Send boundary.
841    #[test]
842    fn mapped_frame_send_invariant() {
843        let data = vec![1u8, 2, 3, 4, 5, 6];
844        let ptr = data.as_ptr();
845        let len = data.len();
846        let guard: PinGuard = Box::new(data);
847
848        let f = unsafe {
849            FrameEnvelope::new_mapped(
850                FeedId::new(1),
851                0,
852                MonotonicTs::ZERO,
853                WallTs::from_micros(0),
854                2,
855                1,
856                PixelFormat::Rgb8,
857                6,
858                ptr,
859                len,
860                guard,
861                TypedMetadata::new(),
862            )
863        };
864        let handle = std::thread::spawn(move || {
865            assert_eq!(f.host_data().unwrap(), &[1, 2, 3, 4, 5, 6]);
866        });
867        handle.join().unwrap();
868    }
869
870    // -- Residency / accelerated-handle tests --
871
872    /// A mock accelerated buffer used only in tests.
873    #[derive(Debug, Clone, PartialEq)]
874    struct MockGpuBuffer {
875        device_id: u32,
876        mem_handle: u64,
877    }
878
879    fn device_frame(surface: MockGpuBuffer) -> FrameEnvelope {
880        FrameEnvelope::new_device(
881            FeedId::new(2),
882            10,
883            MonotonicTs::from_nanos(5_000_000),
884            WallTs::from_micros(100),
885            1920,
886            1080,
887            PixelFormat::Nv12,
888            1920,
889            Arc::new(surface),
890            None,
891            TypedMetadata::new(),
892        )
893    }
894
895    #[test]
896    fn owned_frame_is_host_resident() {
897        let f = test_frame();
898        assert_eq!(f.residency(), Residency::Host);
899        assert!(f.is_host_readable());
900        assert!(f.host_data().is_some());
901        assert_eq!(f.host_data().unwrap().len(), 320 * 240 * 3);
902    }
903
904    #[test]
905    fn mapped_frame_is_host_resident() {
906        let data = vec![42u8; 6];
907        let ptr = data.as_ptr();
908        let len = data.len();
909        let guard: PinGuard = Box::new(data);
910
911        let f = unsafe {
912            FrameEnvelope::new_mapped(
913                FeedId::new(1),
914                0,
915                MonotonicTs::ZERO,
916                WallTs::from_micros(0),
917                2,
918                1,
919                PixelFormat::Rgb8,
920                6,
921                ptr,
922                len,
923                guard,
924                TypedMetadata::new(),
925            )
926        };
927        assert_eq!(f.residency(), Residency::Host);
928        assert!(f.is_host_readable());
929        assert_eq!(f.host_data(), Some([42u8; 6].as_slice()));
930    }
931
932    #[test]
933    fn device_frame_residency() {
934        let f = device_frame(MockGpuBuffer {
935            device_id: 0,
936            mem_handle: 0xDEAD,
937        });
938        assert_eq!(f.residency(), Residency::Device);
939        assert!(!f.is_host_readable());
940        assert!(f.host_data().is_none());
941    }
942
943    #[test]
944    fn accelerated_handle_downcast() {
945        let surface = MockGpuBuffer {
946            device_id: 3,
947            mem_handle: 0xBEEF,
948        };
949        let f = device_frame(surface.clone());
950        let recovered = f.accelerated_handle::<MockGpuBuffer>().unwrap();
951        assert_eq!(recovered, &surface);
952    }
953
954    #[test]
955    fn accelerated_handle_wrong_type_returns_none() {
956        let f = device_frame(MockGpuBuffer {
957            device_id: 0,
958            mem_handle: 0,
959        });
960        assert!(f.accelerated_handle::<String>().is_none());
961    }
962
963    #[test]
964    fn host_frame_accelerated_handle_returns_none() {
965        let f = test_frame();
966        assert!(f.accelerated_handle::<MockGpuBuffer>().is_none());
967    }
968
969    #[test]
970    fn device_frame_host_data_returns_none() {
971        let f = device_frame(MockGpuBuffer {
972            device_id: 0,
973            mem_handle: 0,
974        });
975        assert!(f.host_data().is_none());
976    }
977
978    #[test]
979    fn device_frame_preserves_metadata_and_geometry() {
980        #[derive(Clone, Debug, PartialEq)]
981        struct Tag(u32);
982
983        let mut meta = TypedMetadata::new();
984        meta.insert(Tag(99));
985
986        let f = FrameEnvelope::new_device(
987            FeedId::new(5),
988            42,
989            MonotonicTs::from_nanos(1_000),
990            WallTs::from_micros(500),
991            3840,
992            2160,
993            PixelFormat::Rgb8,
994            3840 * 3,
995            Arc::new(MockGpuBuffer {
996                device_id: 1,
997                mem_handle: 0xCAFE,
998            }),
999            None,
1000            meta,
1001        );
1002
1003        assert_eq!(f.feed_id(), FeedId::new(5));
1004        assert_eq!(f.seq(), 42);
1005        assert_eq!(f.width(), 3840);
1006        assert_eq!(f.height(), 2160);
1007        assert_eq!(f.format(), PixelFormat::Rgb8);
1008        assert_eq!(f.stride(), 3840 * 3);
1009        assert_eq!(f.metadata().get::<Tag>(), Some(&Tag(99)));
1010    }
1011
1012    #[test]
1013    fn device_frame_clone_shares_handle() {
1014        let f1 = device_frame(MockGpuBuffer {
1015            device_id: 0,
1016            mem_handle: 0xAA,
1017        });
1018        let f2 = f1.clone();
1019        // Both clones see the same handle.
1020        let s1 = f1.accelerated_handle::<MockGpuBuffer>().unwrap();
1021        let s2 = f2.accelerated_handle::<MockGpuBuffer>().unwrap();
1022        assert!(std::ptr::eq(s1, s2));
1023    }
1024
1025    #[test]
1026    fn device_frame_is_send_sync() {
1027        let f = device_frame(MockGpuBuffer {
1028            device_id: 0,
1029            mem_handle: 0,
1030        });
1031        let handle = std::thread::spawn(move || {
1032            assert_eq!(f.residency(), Residency::Device);
1033            f.accelerated_handle::<MockGpuBuffer>().unwrap().device_id
1034        });
1035        assert_eq!(handle.join().unwrap(), 0);
1036    }
1037
1038    /// Verify the canonical residency-branching pattern compiles and works.
1039    #[test]
1040    fn residency_branch_pattern() {
1041        let host = test_frame();
1042        let device = device_frame(MockGpuBuffer {
1043            device_id: 1,
1044            mem_handle: 0xFF,
1045        });
1046
1047        for f in [host, device] {
1048            match f.residency() {
1049                Residency::Host => {
1050                    let bytes = f.host_data().expect("host-resident");
1051                    assert!(!bytes.is_empty());
1052                }
1053                Residency::Device => {
1054                    let buf = f
1055                        .accelerated_handle::<MockGpuBuffer>()
1056                        .expect("expected MockGpuBuffer");
1057                    assert_eq!(buf.device_id, 1);
1058                }
1059            }
1060        }
1061    }
1062
1063    #[test]
1064    fn debug_includes_residency() {
1065        let f = test_frame();
1066        let dbg = format!("{f:?}");
1067        assert!(dbg.contains("Host"));
1068
1069        let f2 = device_frame(MockGpuBuffer {
1070            device_id: 0,
1071            mem_handle: 0,
1072        });
1073        let dbg2 = format!("{f2:?}");
1074        assert!(dbg2.contains("Device"));
1075    }
1076
1077    // -- DataAccess tests --
1078
1079    #[test]
1080    fn host_frame_data_access() {
1081        let f = test_frame();
1082        assert_eq!(f.data_access(), DataAccess::HostReadable);
1083    }
1084
1085    #[test]
1086    fn opaque_device_frame_data_access() {
1087        let f = device_frame(MockGpuBuffer {
1088            device_id: 0,
1089            mem_handle: 0,
1090        });
1091        assert_eq!(f.data_access(), DataAccess::Opaque);
1092    }
1093
1094    #[test]
1095    fn mappable_device_frame_data_access() {
1096        let data = vec![10u8, 20, 30];
1097        let f = FrameEnvelope::new_device(
1098            FeedId::new(3),
1099            0,
1100            MonotonicTs::ZERO,
1101            WallTs::from_micros(0),
1102            1,
1103            1,
1104            PixelFormat::Rgb8,
1105            3,
1106            Arc::new(MockGpuBuffer {
1107                device_id: 1,
1108                mem_handle: 0xAA,
1109            }),
1110            Some(Box::new(move || Ok(HostBytes::from_vec(data.clone())))),
1111            TypedMetadata::new(),
1112        );
1113        assert_eq!(f.residency(), Residency::Device);
1114        assert_eq!(f.data_access(), DataAccess::MappableToHost);
1115    }
1116
1117    // -- require_host_data tests --
1118
1119    #[test]
1120    fn require_host_data_host_frame_is_borrowed() {
1121        let f = test_frame();
1122        let cow = f.require_host_data().unwrap();
1123        assert!(matches!(cow, Cow::Borrowed(_)));
1124        assert_eq!(cow.len(), 320 * 240 * 3);
1125    }
1126
1127    #[test]
1128    fn require_host_data_mappable_device_materializes() {
1129        let expected = vec![1u8, 2, 3, 4, 5, 6];
1130        let expected_clone = expected.clone();
1131        let f = FrameEnvelope::new_device(
1132            FeedId::new(4),
1133            0,
1134            MonotonicTs::ZERO,
1135            WallTs::from_micros(0),
1136            2,
1137            1,
1138            PixelFormat::Rgb8,
1139            6,
1140            Arc::new(MockGpuBuffer {
1141                device_id: 0,
1142                mem_handle: 0,
1143            }),
1144            Some(Box::new(move || {
1145                Ok(HostBytes::from_vec(expected_clone.clone()))
1146            })),
1147            TypedMetadata::new(),
1148        );
1149        let cow = f.require_host_data().unwrap();
1150        assert!(matches!(cow, Cow::Borrowed(_)));
1151        assert_eq!(&*cow, &expected[..]);
1152    }
1153
1154    #[test]
1155    fn require_host_data_opaque_device_returns_error() {
1156        let f = device_frame(MockGpuBuffer {
1157            device_id: 0,
1158            mem_handle: 0,
1159        });
1160        let err = f.require_host_data().unwrap_err();
1161        assert!(matches!(err, FrameAccessError::NotHostAccessible));
1162    }
1163
1164    #[test]
1165    fn require_host_data_materialization_failure() {
1166        let f = FrameEnvelope::new_device(
1167            FeedId::new(5),
1168            0,
1169            MonotonicTs::ZERO,
1170            WallTs::from_micros(0),
1171            1,
1172            1,
1173            PixelFormat::Gray8,
1174            1,
1175            Arc::new(MockGpuBuffer {
1176                device_id: 0,
1177                mem_handle: 0,
1178            }),
1179            Some(Box::new(|| {
1180                Err(FrameAccessError::MaterializationFailed {
1181                    detail: "transfer timeout".into(),
1182                })
1183            })),
1184            TypedMetadata::new(),
1185        );
1186        let err = f.require_host_data().unwrap_err();
1187        assert!(matches!(
1188            err,
1189            FrameAccessError::MaterializationFailed { .. }
1190        ));
1191        assert!(err.to_string().contains("transfer timeout"));
1192    }
1193
1194    /// Verify data_access branching pattern compiles and works.
1195    #[test]
1196    fn data_access_branch_pattern() {
1197        let host = test_frame();
1198        let data = vec![42u8; 6];
1199        let mappable = FrameEnvelope::new_device(
1200            FeedId::new(6),
1201            0,
1202            MonotonicTs::ZERO,
1203            WallTs::from_micros(0),
1204            2,
1205            1,
1206            PixelFormat::Rgb8,
1207            6,
1208            Arc::new(MockGpuBuffer {
1209                device_id: 0,
1210                mem_handle: 0,
1211            }),
1212            Some(Box::new(move || Ok(HostBytes::from_vec(data.clone())))),
1213            TypedMetadata::new(),
1214        );
1215        let opaque = device_frame(MockGpuBuffer {
1216            device_id: 0,
1217            mem_handle: 0,
1218        });
1219
1220        for f in [host, mappable, opaque] {
1221            match f.data_access() {
1222                DataAccess::HostReadable => {
1223                    assert!(f.host_data().is_some());
1224                    assert!(f.require_host_data().is_ok());
1225                }
1226                DataAccess::MappableToHost => {
1227                    assert!(f.host_data().is_none());
1228                    assert!(f.require_host_data().is_ok());
1229                }
1230                DataAccess::Opaque => {
1231                    assert!(f.host_data().is_none());
1232                    assert!(f.require_host_data().is_err());
1233                }
1234            }
1235        }
1236    }
1237
1238    // -- HostBytes tests --
1239
1240    /// Compile-time assertion: `HostBytes` is `Send + Sync`.
1241    const _: () = {
1242        const fn assert_send_sync<T: Send + Sync>() {}
1243        assert_send_sync::<HostBytes>();
1244    };
1245
1246    #[test]
1247    fn host_bytes_from_vec() {
1248        let hb = HostBytes::from_vec(vec![1, 2, 3]);
1249        assert_eq!(hb.as_ref(), &[1, 2, 3]);
1250        assert_eq!(&*hb, &[1, 2, 3]);
1251    }
1252
1253    #[test]
1254    fn host_bytes_from_mapped_zero_copy() {
1255        let data = vec![10u8, 20, 30];
1256        let ptr = data.as_ptr();
1257        let len = data.len();
1258        let guard: Box<dyn Any + Send + Sync> = Box::new(data);
1259        let hb = unsafe { HostBytes::from_mapped(ptr, len, guard) };
1260        assert_eq!(hb.as_ref(), &[10, 20, 30]);
1261    }
1262
1263    // -- Memoization tests --
1264
1265    #[test]
1266    fn require_host_data_memoizes_materialization() {
1267        use std::sync::atomic::{AtomicU32, Ordering};
1268
1269        let call_count = Arc::new(AtomicU32::new(0));
1270        let cc = Arc::clone(&call_count);
1271        let f = FrameEnvelope::new_device(
1272            FeedId::new(10),
1273            0,
1274            MonotonicTs::ZERO,
1275            WallTs::from_micros(0),
1276            1,
1277            1,
1278            PixelFormat::Gray8,
1279            1,
1280            Arc::new(MockGpuBuffer {
1281                device_id: 0,
1282                mem_handle: 0,
1283            }),
1284            Some(Box::new(move || {
1285                cc.fetch_add(1, Ordering::Relaxed);
1286                Ok(HostBytes::from_vec(vec![42]))
1287            })),
1288            TypedMetadata::new(),
1289        );
1290
1291        let r1 = f.require_host_data().unwrap();
1292        let r2 = f.require_host_data().unwrap();
1293        assert_eq!(&*r1, &[42u8]);
1294        assert_eq!(&*r2, &[42u8]);
1295        // Both borrows reference the same cached bytes.
1296        assert!(std::ptr::eq(r1.as_ptr(), r2.as_ptr()));
1297        assert_eq!(call_count.load(Ordering::Relaxed), 1);
1298    }
1299
1300    #[test]
1301    fn require_host_data_cache_shared_across_clones() {
1302        use std::sync::atomic::{AtomicU32, Ordering};
1303
1304        let call_count = Arc::new(AtomicU32::new(0));
1305        let cc = Arc::clone(&call_count);
1306        let f1 = FrameEnvelope::new_device(
1307            FeedId::new(11),
1308            0,
1309            MonotonicTs::ZERO,
1310            WallTs::from_micros(0),
1311            1,
1312            1,
1313            PixelFormat::Gray8,
1314            1,
1315            Arc::new(MockGpuBuffer {
1316                device_id: 0,
1317                mem_handle: 0,
1318            }),
1319            Some(Box::new(move || {
1320                cc.fetch_add(1, Ordering::Relaxed);
1321                Ok(HostBytes::from_vec(vec![99]))
1322            })),
1323            TypedMetadata::new(),
1324        );
1325        let f2 = f1.clone();
1326
1327        let r1 = f1.require_host_data().unwrap();
1328        let r2 = f2.require_host_data().unwrap();
1329        assert_eq!(&*r1, &[99u8]);
1330        assert_eq!(&*r2, &[99u8]);
1331        // Materializer invoked exactly once across clones.
1332        assert_eq!(call_count.load(Ordering::Relaxed), 1);
1333    }
1334
1335    #[test]
1336    fn require_host_data_concurrent_access() {
1337        use std::sync::Barrier;
1338        use std::sync::atomic::{AtomicU32, Ordering};
1339
1340        let call_count = Arc::new(AtomicU32::new(0));
1341        let cc = Arc::clone(&call_count);
1342        let f = FrameEnvelope::new_device(
1343            FeedId::new(12),
1344            0,
1345            MonotonicTs::ZERO,
1346            WallTs::from_micros(0),
1347            1,
1348            1,
1349            PixelFormat::Gray8,
1350            1,
1351            Arc::new(MockGpuBuffer {
1352                device_id: 0,
1353                mem_handle: 0,
1354            }),
1355            Some(Box::new(move || {
1356                cc.fetch_add(1, Ordering::Relaxed);
1357                Ok(HostBytes::from_vec(vec![7]))
1358            })),
1359            TypedMetadata::new(),
1360        );
1361
1362        let barrier = Arc::new(Barrier::new(4));
1363        let handles: Vec<_> = (0..4)
1364            .map(|_| {
1365                let f = f.clone();
1366                let b = Arc::clone(&barrier);
1367                std::thread::spawn(move || {
1368                    b.wait();
1369                    let r = f.require_host_data().unwrap();
1370                    assert_eq!(&*r, &[7u8]);
1371                })
1372            })
1373            .collect();
1374
1375        for h in handles {
1376            h.join().unwrap();
1377        }
1378        // OnceLock guarantees at most one init.
1379        assert_eq!(call_count.load(Ordering::Relaxed), 1);
1380    }
1381
1382    #[test]
1383    fn require_host_data_failure_is_cached() {
1384        use std::sync::atomic::{AtomicU32, Ordering};
1385
1386        let call_count = Arc::new(AtomicU32::new(0));
1387        let cc = Arc::clone(&call_count);
1388        let f = FrameEnvelope::new_device(
1389            FeedId::new(13),
1390            0,
1391            MonotonicTs::ZERO,
1392            WallTs::from_micros(0),
1393            1,
1394            1,
1395            PixelFormat::Gray8,
1396            1,
1397            Arc::new(MockGpuBuffer {
1398                device_id: 0,
1399                mem_handle: 0,
1400            }),
1401            Some(Box::new(move || {
1402                cc.fetch_add(1, Ordering::Relaxed);
1403                Err(FrameAccessError::MaterializationFailed {
1404                    detail: "device busy".into(),
1405                })
1406            })),
1407            TypedMetadata::new(),
1408        );
1409
1410        let e1 = f.require_host_data().unwrap_err();
1411        let e2 = f.require_host_data().unwrap_err();
1412        assert!(e1.to_string().contains("device busy"));
1413        assert!(e2.to_string().contains("device busy"));
1414        // Materializer invoked exactly once; failure is cached.
1415        assert_eq!(call_count.load(Ordering::Relaxed), 1);
1416    }
1417
1418    #[test]
1419    fn require_host_data_catches_materializer_panic() {
1420        let f = FrameEnvelope::new_device(
1421            FeedId::new(15),
1422            0,
1423            MonotonicTs::ZERO,
1424            WallTs::from_micros(0),
1425            1,
1426            1,
1427            PixelFormat::Gray8,
1428            1,
1429            Arc::new(MockGpuBuffer {
1430                device_id: 0,
1431                mem_handle: 0,
1432            }),
1433            Some(Box::new(|| panic!("adapter exploded"))),
1434            TypedMetadata::new(),
1435        );
1436
1437        // Panic is caught and surfaced as a FrameAccessError, not propagated.
1438        let err = f.require_host_data().unwrap_err();
1439        assert!(matches!(
1440            err,
1441            FrameAccessError::MaterializationFailed { .. }
1442        ));
1443        assert!(err.to_string().contains("adapter exploded"));
1444
1445        // Error is cached — second call returns the same error without re-panicking.
1446        let err2 = f.require_host_data().unwrap_err();
1447        assert!(err2.to_string().contains("adapter exploded"));
1448    }
1449
1450    #[test]
1451    fn require_host_data_host_frame_skips_cache() {
1452        let f = test_frame();
1453        let r1 = f.require_host_data().unwrap();
1454        let r2 = f.require_host_data().unwrap();
1455        assert!(matches!(r1, Cow::Borrowed(_)));
1456        assert!(matches!(r2, Cow::Borrowed(_)));
1457        // Both borrow the same underlying owned-data; no cache involved.
1458        assert!(std::ptr::eq(r1.as_ptr(), r2.as_ptr()));
1459    }
1460
1461    #[test]
1462    fn require_host_data_mapped_materializer() {
1463        let f = FrameEnvelope::new_device(
1464            FeedId::new(14),
1465            0,
1466            MonotonicTs::ZERO,
1467            WallTs::from_micros(0),
1468            1,
1469            3,
1470            PixelFormat::Gray8,
1471            1,
1472            Arc::new(MockGpuBuffer {
1473                device_id: 0,
1474                mem_handle: 0,
1475            }),
1476            Some(Box::new(|| {
1477                let data = vec![10u8, 20, 30];
1478                let ptr = data.as_ptr();
1479                let len = data.len();
1480                let guard: Box<dyn Any + Send + Sync> = Box::new(data);
1481                Ok(unsafe { HostBytes::from_mapped(ptr, len, guard) })
1482            })),
1483            TypedMetadata::new(),
1484        );
1485
1486        let cow = f.require_host_data().unwrap();
1487        assert_eq!(&*cow, &[10, 20, 30]);
1488        // Second call: cached, same pointer.
1489        let cow2 = f.require_host_data().unwrap();
1490        assert!(std::ptr::eq(cow.as_ptr(), cow2.as_ptr()));
1491    }
1492}