Skip to main content

edgefirst_tensor/
tensor_dyn.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::{DType, PixelFormat, Tensor, TensorMemory, TensorTrait};
5use half::f16;
6use std::fmt;
7
8/// Type-erased tensor. Wraps a `Tensor<T>` with runtime element type.
9#[non_exhaustive]
10pub enum TensorDyn {
11    /// Unsigned 8-bit integer tensor.
12    U8(Tensor<u8>),
13    /// Signed 8-bit integer tensor.
14    I8(Tensor<i8>),
15    /// Unsigned 16-bit integer tensor.
16    U16(Tensor<u16>),
17    /// Signed 16-bit integer tensor.
18    I16(Tensor<i16>),
19    /// Unsigned 32-bit integer tensor.
20    U32(Tensor<u32>),
21    /// Signed 32-bit integer tensor.
22    I32(Tensor<i32>),
23    /// Unsigned 64-bit integer tensor.
24    U64(Tensor<u64>),
25    /// Signed 64-bit integer tensor.
26    I64(Tensor<i64>),
27    /// 16-bit floating-point tensor.
28    F16(Tensor<f16>),
29    /// 32-bit floating-point tensor.
30    F32(Tensor<f32>),
31    /// 64-bit floating-point tensor.
32    F64(Tensor<f64>),
33}
34
35/// Dispatch a method call across all TensorDyn variants.
36macro_rules! dispatch {
37    ($self:expr, $method:ident $(, $arg:expr)*) => {
38        match $self {
39            TensorDyn::U8(t) => t.$method($($arg),*),
40            TensorDyn::I8(t) => t.$method($($arg),*),
41            TensorDyn::U16(t) => t.$method($($arg),*),
42            TensorDyn::I16(t) => t.$method($($arg),*),
43            TensorDyn::U32(t) => t.$method($($arg),*),
44            TensorDyn::I32(t) => t.$method($($arg),*),
45            TensorDyn::U64(t) => t.$method($($arg),*),
46            TensorDyn::I64(t) => t.$method($($arg),*),
47            TensorDyn::F16(t) => t.$method($($arg),*),
48            TensorDyn::F32(t) => t.$method($($arg),*),
49            TensorDyn::F64(t) => t.$method($($arg),*),
50        }
51    };
52}
53
54/// Generate the three downcast methods (ref, mut ref, owned) for one variant.
55macro_rules! downcast_methods {
56    ($variant:ident, $ty:ty, $as_name:ident, $as_mut_name:ident, $into_name:ident) => {
57        /// Returns a shared reference to the inner tensor if the type matches.
58        pub fn $as_name(&self) -> Option<&Tensor<$ty>> {
59            match self {
60                Self::$variant(t) => Some(t),
61                _ => None,
62            }
63        }
64
65        /// Returns a mutable reference to the inner tensor if the type matches.
66        pub fn $as_mut_name(&mut self) -> Option<&mut Tensor<$ty>> {
67            match self {
68                Self::$variant(t) => Some(t),
69                _ => None,
70            }
71        }
72
73        /// Unwraps the inner tensor if the type matches, otherwise returns `self` as `Err`.
74        /// The Err variant is necessarily large (returns the unconsumed TensorDyn).
75        #[allow(clippy::result_large_err)]
76        pub fn $into_name(self) -> Result<Tensor<$ty>, Self> {
77            match self {
78                Self::$variant(t) => Ok(t),
79                other => Err(other),
80            }
81        }
82    };
83}
84
85impl TensorDyn {
86    /// Return the runtime element type discriminant.
87    pub fn dtype(&self) -> DType {
88        match self {
89            Self::U8(_) => DType::U8,
90            Self::I8(_) => DType::I8,
91            Self::U16(_) => DType::U16,
92            Self::I16(_) => DType::I16,
93            Self::U32(_) => DType::U32,
94            Self::I32(_) => DType::I32,
95            Self::U64(_) => DType::U64,
96            Self::I64(_) => DType::I64,
97            Self::F16(_) => DType::F16,
98            Self::F32(_) => DType::F32,
99            Self::F64(_) => DType::F64,
100        }
101    }
102
103    /// Return the tensor shape.
104    pub fn shape(&self) -> &[usize] {
105        dispatch!(self, shape)
106    }
107
108    /// Return the tensor name.
109    pub fn name(&self) -> String {
110        dispatch!(self, name)
111    }
112
113    /// Return the pixel format (None if not an image tensor).
114    pub fn format(&self) -> Option<PixelFormat> {
115        dispatch!(self, format)
116    }
117
118    /// Return the image width (None if not an image tensor).
119    pub fn width(&self) -> Option<usize> {
120        dispatch!(self, width)
121    }
122
123    /// Return the image height (None if not an image tensor).
124    pub fn height(&self) -> Option<usize> {
125        dispatch!(self, height)
126    }
127
128    /// Return the total size of this tensor in bytes.
129    pub fn size(&self) -> usize {
130        dispatch!(self, size)
131    }
132
133    /// Return the memory allocation type.
134    pub fn memory(&self) -> TensorMemory {
135        dispatch!(self, memory)
136    }
137
138    /// Reshape this tensor. Total element count must remain the same.
139    pub fn reshape(&mut self, shape: &[usize]) -> crate::Result<()> {
140        dispatch!(self, reshape, shape)
141    }
142
143    /// Attach pixel format metadata to this tensor.
144    ///
145    /// Validates that the tensor's shape is compatible with the format's
146    /// layout (packed, planar, or semi-planar).
147    ///
148    /// # Arguments
149    ///
150    /// * `format` - The pixel format to attach
151    ///
152    /// # Returns
153    ///
154    /// `Ok(())` on success, with the format stored as metadata on the tensor.
155    ///
156    /// # Errors
157    ///
158    /// Returns `Error::InvalidShape` if the tensor shape doesn't match
159    /// the expected layout for the given format.
160    pub fn set_format(&mut self, format: PixelFormat) -> crate::Result<()> {
161        dispatch!(self, set_format, format)
162    }
163
164    /// Attach pixel format metadata, consuming and returning self.
165    ///
166    /// Enables builder-style chaining.
167    ///
168    /// # Arguments
169    ///
170    /// * `format` - The pixel format to attach
171    ///
172    /// # Returns
173    ///
174    /// The tensor with format metadata attached.
175    ///
176    /// # Errors
177    ///
178    /// Returns `Error::InvalidShape` if the tensor shape doesn't match
179    /// the expected layout for the given format.
180    pub fn with_format(mut self, format: PixelFormat) -> crate::Result<Self> {
181        self.set_format(format)?;
182        Ok(self)
183    }
184
185    /// Row stride in bytes (`None` = tightly packed).
186    pub fn row_stride(&self) -> Option<usize> {
187        dispatch!(self, row_stride)
188    }
189
190    /// Effective row stride: stored stride or computed from format and width.
191    pub fn effective_row_stride(&self) -> Option<usize> {
192        dispatch!(self, effective_row_stride)
193    }
194
195    /// Set the row stride in bytes for externally allocated buffers with
196    /// row padding.
197    ///
198    /// Must be called before the tensor is first used for rendering. The
199    /// format must be set before calling this method.
200    pub fn set_row_stride(&mut self, stride: usize) -> crate::Result<()> {
201        dispatch!(self, set_row_stride, stride)
202    }
203
204    /// Builder-style: set row stride, consuming and returning self.
205    pub fn with_row_stride(mut self, stride: usize) -> crate::Result<Self> {
206        self.set_row_stride(stride)?;
207        Ok(self)
208    }
209
210    /// Byte offset within the DMA-BUF where image data starts (`None` = 0).
211    pub fn plane_offset(&self) -> Option<usize> {
212        dispatch!(self, plane_offset)
213    }
214
215    /// Set the byte offset within the DMA-BUF where image data starts.
216    pub fn set_plane_offset(&mut self, offset: usize) {
217        dispatch!(self, set_plane_offset, offset)
218    }
219
220    /// Builder-style: set plane offset, consuming and returning self.
221    pub fn with_plane_offset(mut self, offset: usize) -> Self {
222        self.set_plane_offset(offset);
223        self
224    }
225
226    /// Clone the file descriptor associated with this tensor.
227    #[cfg(unix)]
228    pub fn clone_fd(&self) -> crate::Result<std::os::fd::OwnedFd> {
229        dispatch!(self, clone_fd)
230    }
231
232    /// Clone the DMA-BUF file descriptor backing this tensor (Linux only).
233    ///
234    /// # Returns
235    ///
236    /// An owned duplicate of the DMA-BUF file descriptor.
237    ///
238    /// # Errors
239    ///
240    /// * `Error::NotImplemented` if the tensor is not DMA-backed (Mem/Shm/Pbo)
241    /// * `Error::IoError` if the fd clone syscall fails (e.g., fd limit reached)
242    #[cfg(target_os = "linux")]
243    pub fn dmabuf_clone(&self) -> crate::Result<std::os::fd::OwnedFd> {
244        if self.memory() != TensorMemory::Dma {
245            return Err(crate::Error::NotImplemented(format!(
246                "dmabuf_clone requires DMA-backed tensor, got {:?}",
247                self.memory()
248            )));
249        }
250        self.clone_fd()
251    }
252
253    /// Borrow the DMA-BUF file descriptor backing this tensor (Linux only).
254    ///
255    /// # Returns
256    ///
257    /// A borrowed reference to the DMA-BUF file descriptor, tied to `self`'s
258    /// lifetime.
259    ///
260    /// # Errors
261    ///
262    /// * `Error::NotImplemented` if the tensor is not DMA-backed
263    #[cfg(target_os = "linux")]
264    pub fn dmabuf(&self) -> crate::Result<std::os::fd::BorrowedFd<'_>> {
265        dispatch!(self, dmabuf)
266    }
267
268    /// Return `true` if this tensor uses separate plane allocations.
269    pub fn is_multiplane(&self) -> bool {
270        dispatch!(self, is_multiplane)
271    }
272
273    /// Return the [`BufferIdentity`](crate::BufferIdentity) of the underlying
274    /// allocation.
275    ///
276    /// Two `TensorDyn` values share a [`BufferIdentity::id`] iff they were
277    /// produced by cloning the same allocation (e.g. through
278    /// [`DmaTensor::try_clone`](crate::dma::DmaTensor::try_clone)). Separate
279    /// imports of the same physical buffer (e.g. two `from_fd` calls on the
280    /// same dmabuf fd) have **distinct** identities — use
281    /// [`aliases`](Self::aliases) if you need to detect that case.
282    pub fn buffer_identity(&self) -> &crate::BufferIdentity {
283        dispatch!(self, buffer_identity)
284    }
285
286    /// Return `true` if `self` and `other` reference the same underlying
287    /// buffer.
288    ///
289    /// This is the correct check for APIs that require distinct input and
290    /// output tensors (e.g. `ImageProcessor::draw_decoded_masks`, where
291    /// aliasing `dst` and `background` would cause the GL backend to read
292    /// and write the same texture — undefined behaviour on most drivers).
293    ///
294    /// Matching is conservative:
295    /// 1. Matching [`BufferIdentity::id`] → same buffer (always).
296    /// 2. Matching backing type + matching dmabuf fd number (Linux, DMA
297    ///    tensors only) → same buffer, even across separate `from_fd`
298    ///    imports in the same process.
299    ///
300    /// Two distinct `dup`'d fds pointing at the same kernel dma-buf are
301    /// **not** detected — there is no cheap way to resolve that without a
302    /// round-trip through the kernel.
303    pub fn aliases(&self, other: &Self) -> bool {
304        if self.buffer_identity().id() == other.buffer_identity().id() {
305            return true;
306        }
307        if self.memory() != other.memory() {
308            return false;
309        }
310        #[cfg(target_os = "linux")]
311        if self.memory() == TensorMemory::Dma {
312            use std::os::fd::AsRawFd;
313            if let (Ok(a), Ok(b)) = (self.dmabuf(), other.dmabuf()) {
314                return a.as_raw_fd() == b.as_raw_fd();
315            }
316        }
317        false
318    }
319
320    // --- Downcasting ---
321
322    downcast_methods!(U8, u8, as_u8, as_u8_mut, into_u8);
323    downcast_methods!(I8, i8, as_i8, as_i8_mut, into_i8);
324    downcast_methods!(U16, u16, as_u16, as_u16_mut, into_u16);
325    downcast_methods!(I16, i16, as_i16, as_i16_mut, into_i16);
326    downcast_methods!(U32, u32, as_u32, as_u32_mut, into_u32);
327    downcast_methods!(I32, i32, as_i32, as_i32_mut, into_i32);
328    downcast_methods!(U64, u64, as_u64, as_u64_mut, into_u64);
329    downcast_methods!(I64, i64, as_i64, as_i64_mut, into_i64);
330    downcast_methods!(F16, f16, as_f16, as_f16_mut, into_f16);
331    downcast_methods!(F32, f32, as_f32, as_f32_mut, into_f32);
332    downcast_methods!(F64, f64, as_f64, as_f64_mut, into_f64);
333
334    /// Create a type-erased tensor with the given shape and element type.
335    pub fn new(
336        shape: &[usize],
337        dtype: DType,
338        memory: Option<TensorMemory>,
339        name: Option<&str>,
340    ) -> crate::Result<Self> {
341        match dtype {
342            DType::U8 => Tensor::<u8>::new(shape, memory, name).map(Self::U8),
343            DType::I8 => Tensor::<i8>::new(shape, memory, name).map(Self::I8),
344            DType::U16 => Tensor::<u16>::new(shape, memory, name).map(Self::U16),
345            DType::I16 => Tensor::<i16>::new(shape, memory, name).map(Self::I16),
346            DType::U32 => Tensor::<u32>::new(shape, memory, name).map(Self::U32),
347            DType::I32 => Tensor::<i32>::new(shape, memory, name).map(Self::I32),
348            DType::U64 => Tensor::<u64>::new(shape, memory, name).map(Self::U64),
349            DType::I64 => Tensor::<i64>::new(shape, memory, name).map(Self::I64),
350            DType::F16 => Tensor::<f16>::new(shape, memory, name).map(Self::F16),
351            DType::F32 => Tensor::<f32>::new(shape, memory, name).map(Self::F32),
352            DType::F64 => Tensor::<f64>::new(shape, memory, name).map(Self::F64),
353        }
354    }
355
356    /// Create a type-erased tensor from a file descriptor.
357    #[cfg(unix)]
358    pub fn from_fd(
359        fd: std::os::fd::OwnedFd,
360        shape: &[usize],
361        dtype: DType,
362        name: Option<&str>,
363    ) -> crate::Result<Self> {
364        match dtype {
365            DType::U8 => Tensor::<u8>::from_fd(fd, shape, name).map(Self::U8),
366            DType::I8 => Tensor::<i8>::from_fd(fd, shape, name).map(Self::I8),
367            DType::U16 => Tensor::<u16>::from_fd(fd, shape, name).map(Self::U16),
368            DType::I16 => Tensor::<i16>::from_fd(fd, shape, name).map(Self::I16),
369            DType::U32 => Tensor::<u32>::from_fd(fd, shape, name).map(Self::U32),
370            DType::I32 => Tensor::<i32>::from_fd(fd, shape, name).map(Self::I32),
371            DType::U64 => Tensor::<u64>::from_fd(fd, shape, name).map(Self::U64),
372            DType::I64 => Tensor::<i64>::from_fd(fd, shape, name).map(Self::I64),
373            DType::F16 => Tensor::<f16>::from_fd(fd, shape, name).map(Self::F16),
374            DType::F32 => Tensor::<f32>::from_fd(fd, shape, name).map(Self::F32),
375            DType::F64 => Tensor::<f64>::from_fd(fd, shape, name).map(Self::F64),
376        }
377    }
378
379    /// Create a type-erased image tensor.
380    ///
381    /// # Arguments
382    ///
383    /// * `width` - Image width in pixels
384    /// * `height` - Image height in pixels
385    /// * `format` - Pixel format
386    /// * `dtype` - Element type discriminant
387    /// * `memory` - Optional memory backend (None selects the best available)
388    ///
389    /// # Returns
390    ///
391    /// A new `TensorDyn` wrapping an image tensor of the requested element type.
392    ///
393    /// # Errors
394    ///
395    /// Returns an error if the underlying `Tensor::image` call fails.
396    pub fn image(
397        width: usize,
398        height: usize,
399        format: PixelFormat,
400        dtype: DType,
401        memory: Option<TensorMemory>,
402    ) -> crate::Result<Self> {
403        match dtype {
404            DType::U8 => Tensor::<u8>::image(width, height, format, memory).map(Self::U8),
405            DType::I8 => Tensor::<i8>::image(width, height, format, memory).map(Self::I8),
406            DType::U16 => Tensor::<u16>::image(width, height, format, memory).map(Self::U16),
407            DType::I16 => Tensor::<i16>::image(width, height, format, memory).map(Self::I16),
408            DType::U32 => Tensor::<u32>::image(width, height, format, memory).map(Self::U32),
409            DType::I32 => Tensor::<i32>::image(width, height, format, memory).map(Self::I32),
410            DType::U64 => Tensor::<u64>::image(width, height, format, memory).map(Self::U64),
411            DType::I64 => Tensor::<i64>::image(width, height, format, memory).map(Self::I64),
412            DType::F16 => Tensor::<f16>::image(width, height, format, memory).map(Self::F16),
413            DType::F32 => Tensor::<f32>::image(width, height, format, memory).map(Self::F32),
414            DType::F64 => Tensor::<f64>::image(width, height, format, memory).map(Self::F64),
415        }
416    }
417
418    /// Create a DMA-backed image tensor with an explicit row stride that
419    /// may exceed the natural `width * channels * sizeof(T)` pitch.
420    ///
421    /// See [`Tensor::image_with_stride`] for the detailed contract and
422    /// constraints. The TensorDyn wrapper dispatches to the appropriate
423    /// monomorphised `Tensor<T>` based on `dtype`.
424    ///
425    /// # Example
426    ///
427    /// ```no_run
428    /// use edgefirst_tensor::{TensorDyn, PixelFormat, DType, TensorMemory};
429    /// # fn main() -> edgefirst_tensor::Result<()> {
430    /// // Allocate a 3004×1688 RGBA8 canvas with 64-byte pitch alignment
431    /// // (12032 bytes per row instead of the natural 12016).
432    /// let img = TensorDyn::image_with_stride(
433    ///     3004, 1688,
434    ///     PixelFormat::Rgba, DType::U8,
435    ///     12032,
436    ///     Some(TensorMemory::Dma),
437    /// )?;
438    /// assert_eq!(img.width(), Some(3004));       // logical, unchanged
439    /// assert_eq!(img.effective_row_stride(), Some(12032)); // padded
440    /// # Ok(())
441    /// # }
442    /// ```
443    pub fn image_with_stride(
444        width: usize,
445        height: usize,
446        format: PixelFormat,
447        dtype: DType,
448        row_stride_bytes: usize,
449        memory: Option<TensorMemory>,
450    ) -> crate::Result<Self> {
451        match dtype {
452            DType::U8 => {
453                Tensor::<u8>::image_with_stride(width, height, format, row_stride_bytes, memory)
454                    .map(Self::U8)
455            }
456            DType::I8 => {
457                Tensor::<i8>::image_with_stride(width, height, format, row_stride_bytes, memory)
458                    .map(Self::I8)
459            }
460            DType::U16 => {
461                Tensor::<u16>::image_with_stride(width, height, format, row_stride_bytes, memory)
462                    .map(Self::U16)
463            }
464            DType::I16 => {
465                Tensor::<i16>::image_with_stride(width, height, format, row_stride_bytes, memory)
466                    .map(Self::I16)
467            }
468            DType::U32 => {
469                Tensor::<u32>::image_with_stride(width, height, format, row_stride_bytes, memory)
470                    .map(Self::U32)
471            }
472            DType::I32 => {
473                Tensor::<i32>::image_with_stride(width, height, format, row_stride_bytes, memory)
474                    .map(Self::I32)
475            }
476            DType::U64 => {
477                Tensor::<u64>::image_with_stride(width, height, format, row_stride_bytes, memory)
478                    .map(Self::U64)
479            }
480            DType::I64 => {
481                Tensor::<i64>::image_with_stride(width, height, format, row_stride_bytes, memory)
482                    .map(Self::I64)
483            }
484            DType::F16 => {
485                Tensor::<f16>::image_with_stride(width, height, format, row_stride_bytes, memory)
486                    .map(Self::F16)
487            }
488            DType::F32 => {
489                Tensor::<f32>::image_with_stride(width, height, format, row_stride_bytes, memory)
490                    .map(Self::F32)
491            }
492            DType::F64 => {
493                Tensor::<f64>::image_with_stride(width, height, format, row_stride_bytes, memory)
494                    .map(Self::F64)
495            }
496        }
497    }
498}
499
500// --- From impls ---
501
502impl From<Tensor<u8>> for TensorDyn {
503    fn from(t: Tensor<u8>) -> Self {
504        Self::U8(t)
505    }
506}
507
508impl From<Tensor<i8>> for TensorDyn {
509    fn from(t: Tensor<i8>) -> Self {
510        Self::I8(t)
511    }
512}
513
514impl From<Tensor<u16>> for TensorDyn {
515    fn from(t: Tensor<u16>) -> Self {
516        Self::U16(t)
517    }
518}
519
520impl From<Tensor<i16>> for TensorDyn {
521    fn from(t: Tensor<i16>) -> Self {
522        Self::I16(t)
523    }
524}
525
526impl From<Tensor<u32>> for TensorDyn {
527    fn from(t: Tensor<u32>) -> Self {
528        Self::U32(t)
529    }
530}
531
532impl From<Tensor<i32>> for TensorDyn {
533    fn from(t: Tensor<i32>) -> Self {
534        Self::I32(t)
535    }
536}
537
538impl From<Tensor<u64>> for TensorDyn {
539    fn from(t: Tensor<u64>) -> Self {
540        Self::U64(t)
541    }
542}
543
544impl From<Tensor<i64>> for TensorDyn {
545    fn from(t: Tensor<i64>) -> Self {
546        Self::I64(t)
547    }
548}
549
550impl From<Tensor<f16>> for TensorDyn {
551    fn from(t: Tensor<f16>) -> Self {
552        Self::F16(t)
553    }
554}
555
556impl From<Tensor<f32>> for TensorDyn {
557    fn from(t: Tensor<f32>) -> Self {
558        Self::F32(t)
559    }
560}
561
562impl From<Tensor<f64>> for TensorDyn {
563    fn from(t: Tensor<f64>) -> Self {
564        Self::F64(t)
565    }
566}
567
568impl fmt::Debug for TensorDyn {
569    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
570        dispatch!(self, fmt, f)
571    }
572}
573
574#[cfg(test)]
575mod tests {
576    use super::*;
577
578    #[test]
579    fn from_typed_tensor() {
580        let t = Tensor::<u8>::new(&[10], None, None).unwrap();
581        let dyn_t: TensorDyn = t.into();
582        assert_eq!(dyn_t.dtype(), DType::U8);
583        assert_eq!(dyn_t.shape(), &[10]);
584    }
585
586    #[test]
587    fn downcast_ref() {
588        let t = Tensor::<u8>::new(&[10], None, None).unwrap();
589        let dyn_t: TensorDyn = t.into();
590        assert!(dyn_t.as_u8().is_some());
591        assert!(dyn_t.as_i8().is_none());
592    }
593
594    #[test]
595    fn downcast_into() {
596        let t = Tensor::<u8>::new(&[10], None, None).unwrap();
597        let dyn_t: TensorDyn = t.into();
598        let back = dyn_t.into_u8().unwrap();
599        assert_eq!(back.shape(), &[10]);
600    }
601
602    #[test]
603    fn image_accessors() {
604        let t = Tensor::<u8>::image(640, 480, PixelFormat::Rgba, None).unwrap();
605        let dyn_t: TensorDyn = t.into();
606        assert_eq!(dyn_t.format(), Some(PixelFormat::Rgba));
607        assert_eq!(dyn_t.width(), Some(640));
608        assert_eq!(dyn_t.height(), Some(480));
609        assert!(!dyn_t.is_multiplane());
610    }
611
612    #[test]
613    fn image_constructor() {
614        let dyn_t = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::U8, None).unwrap();
615        assert_eq!(dyn_t.dtype(), DType::U8);
616        assert_eq!(dyn_t.format(), Some(PixelFormat::Rgb));
617        assert_eq!(dyn_t.width(), Some(640));
618    }
619
620    #[test]
621    fn image_constructor_i8() {
622        let dyn_t = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::I8, None).unwrap();
623        assert_eq!(dyn_t.dtype(), DType::I8);
624        assert_eq!(dyn_t.format(), Some(PixelFormat::Rgb));
625    }
626
627    #[test]
628    fn set_format_packed() {
629        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
630        assert_eq!(t.format(), None);
631        t.set_format(PixelFormat::Rgb).unwrap();
632        assert_eq!(t.format(), Some(PixelFormat::Rgb));
633        assert_eq!(t.width(), Some(640));
634        assert_eq!(t.height(), Some(480));
635    }
636
637    #[test]
638    fn set_format_planar() {
639        let mut t = TensorDyn::new(&[3, 480, 640], DType::U8, None, None).unwrap();
640        t.set_format(PixelFormat::PlanarRgb).unwrap();
641        assert_eq!(t.format(), Some(PixelFormat::PlanarRgb));
642        assert_eq!(t.width(), Some(640));
643        assert_eq!(t.height(), Some(480));
644    }
645
646    #[test]
647    fn set_format_rejects_wrong_shape() {
648        let mut t = TensorDyn::new(&[480, 640, 4], DType::U8, None, None).unwrap();
649        assert!(t.set_format(PixelFormat::Rgb).is_err());
650    }
651
652    #[test]
653    fn with_format_builder() {
654        let t = TensorDyn::new(&[480, 640, 4], DType::U8, None, None)
655            .unwrap()
656            .with_format(PixelFormat::Rgba)
657            .unwrap();
658        assert_eq!(t.format(), Some(PixelFormat::Rgba));
659        assert_eq!(t.width(), Some(640));
660        assert_eq!(t.height(), Some(480));
661    }
662
663    #[cfg(target_os = "linux")]
664    #[test]
665    fn dmabuf_clone_mem_tensor_fails() {
666        let t = TensorDyn::new(&[480, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
667        assert_eq!(t.memory(), TensorMemory::Mem);
668        assert!(t.dmabuf_clone().is_err());
669    }
670
671    #[cfg(target_os = "linux")]
672    #[test]
673    fn dmabuf_mem_tensor_fails() {
674        let t = TensorDyn::new(&[480, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
675        assert!(t.dmabuf().is_err());
676    }
677
678    #[test]
679    fn set_format_semi_planar_nv12() {
680        // 720 rows = 480 * 3/2 (NV12: height + height/2 for chroma)
681        let mut t = TensorDyn::new(&[720, 640], DType::U8, Some(TensorMemory::Mem), None).unwrap();
682        t.set_format(PixelFormat::Nv12).unwrap();
683        assert_eq!(t.format(), Some(PixelFormat::Nv12));
684        assert_eq!(t.width(), Some(640));
685        assert_eq!(t.height(), Some(480));
686    }
687
688    #[test]
689    fn set_format_semi_planar_nv16() {
690        // 960 rows = 480 * 2 (NV16: height + height for chroma)
691        let mut t = TensorDyn::new(&[960, 640], DType::U8, Some(TensorMemory::Mem), None).unwrap();
692        t.set_format(PixelFormat::Nv16).unwrap();
693        assert_eq!(t.format(), Some(PixelFormat::Nv16));
694        assert_eq!(t.width(), Some(640));
695        assert_eq!(t.height(), Some(480));
696    }
697
698    #[test]
699    fn with_format_rejects_wrong_shape() {
700        let result = TensorDyn::new(&[480, 640, 4], DType::U8, None, None)
701            .unwrap()
702            .with_format(PixelFormat::Rgb);
703        assert!(result.is_err());
704    }
705
706    #[test]
707    fn set_format_preserved_after_rejection() {
708        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
709        t.set_format(PixelFormat::Rgb).unwrap();
710        assert_eq!(t.format(), Some(PixelFormat::Rgb));
711
712        // Rgba requires 4 channels, should fail on a 3-channel tensor
713        assert!(t.set_format(PixelFormat::Rgba).is_err());
714
715        // Original format should be preserved
716        assert_eq!(t.format(), Some(PixelFormat::Rgb));
717    }
718
719    #[test]
720    fn set_format_idempotent() {
721        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
722        t.set_format(PixelFormat::Rgb).unwrap();
723        t.set_format(PixelFormat::Rgb).unwrap();
724        assert_eq!(t.format(), Some(PixelFormat::Rgb));
725        assert_eq!(t.width(), Some(640));
726        assert_eq!(t.height(), Some(480));
727    }
728
729    // --- Row stride tests ---
730
731    #[test]
732    fn set_row_stride_valid() {
733        // RGBA 100px wide: min stride = 400, set 512
734        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
735        t.set_row_stride(512).unwrap();
736        assert_eq!(t.row_stride(), Some(512));
737        assert_eq!(t.effective_row_stride(), Some(512));
738    }
739
740    #[test]
741    fn set_row_stride_equals_min() {
742        // RGB 100px: min stride = 300, set exactly 300
743        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
744        t.set_row_stride(300).unwrap();
745        assert_eq!(t.row_stride(), Some(300));
746    }
747
748    #[test]
749    fn set_row_stride_too_small() {
750        // RGBA 100px: min stride = 400, set 300
751        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
752        assert!(t.set_row_stride(300).is_err());
753        assert_eq!(t.row_stride(), None);
754    }
755
756    #[test]
757    fn set_row_stride_zero() {
758        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
759        assert!(t.set_row_stride(0).is_err());
760    }
761
762    #[test]
763    fn set_row_stride_requires_format() {
764        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
765        assert!(t.set_row_stride(2048).is_err());
766    }
767
768    #[test]
769    fn effective_row_stride_without_stride() {
770        let t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
771        assert_eq!(t.row_stride(), None);
772        assert_eq!(t.effective_row_stride(), Some(300)); // 100 * 3
773    }
774
775    #[test]
776    fn effective_row_stride_no_format() {
777        let t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
778        assert_eq!(t.effective_row_stride(), None);
779    }
780
781    #[test]
782    fn with_row_stride_builder() {
783        let t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None)
784            .unwrap()
785            .with_row_stride(512)
786            .unwrap();
787        assert_eq!(t.row_stride(), Some(512));
788        assert_eq!(t.effective_row_stride(), Some(512));
789    }
790
791    #[test]
792    fn with_row_stride_rejects_small() {
793        let result = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None)
794            .unwrap()
795            .with_row_stride(200);
796        assert!(result.is_err());
797    }
798
799    #[test]
800    fn set_format_clears_row_stride() {
801        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
802        t.set_format(PixelFormat::Rgb).unwrap();
803        t.set_row_stride(2048).unwrap();
804        assert_eq!(t.row_stride(), Some(2048));
805
806        // Incompatible format change (4-chan on 3-chan shape) fails — stride preserved
807        let _ = t.set_format(PixelFormat::Bgra);
808        assert_eq!(t.row_stride(), Some(2048));
809
810        // Re-set to same format — stride preserved
811        t.set_format(PixelFormat::Rgb).unwrap();
812        assert_eq!(t.row_stride(), Some(2048));
813
814        // Reshape clears format and stride
815        t.reshape(&[480 * 640 * 3]).unwrap();
816        assert_eq!(t.row_stride(), None);
817        assert_eq!(t.format(), None);
818    }
819
820    #[test]
821    fn set_format_different_compatible_clears_stride() {
822        // RGBA and BGRA are both 4-channel packed — switching between them
823        // succeeds and must clear the stored stride.
824        let mut t = TensorDyn::new(&[480, 640, 4], DType::U8, None, None).unwrap();
825        t.set_format(PixelFormat::Rgba).unwrap();
826        t.set_row_stride(4096).unwrap();
827        assert_eq!(t.row_stride(), Some(4096));
828
829        // Successful format change to a different compatible format clears stride
830        t.set_format(PixelFormat::Bgra).unwrap();
831        assert_eq!(t.format(), Some(PixelFormat::Bgra));
832        assert_eq!(t.row_stride(), None);
833    }
834
835    #[test]
836    fn set_format_same_preserves_stride() {
837        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
838        t.set_row_stride(512).unwrap();
839        // Re-setting the same format should not clear stride
840        t.set_format(PixelFormat::Rgb).unwrap();
841        assert_eq!(t.row_stride(), Some(512));
842    }
843
844    #[test]
845    fn effective_row_stride_planar() {
846        let t = TensorDyn::image(640, 480, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
847        assert_eq!(t.effective_row_stride(), Some(640)); // planar: width only
848    }
849
850    #[test]
851    fn effective_row_stride_nv12() {
852        let t = TensorDyn::image(640, 480, PixelFormat::Nv12, DType::U8, None).unwrap();
853        assert_eq!(t.effective_row_stride(), Some(640)); // semi-planar: width only
854    }
855
856    #[test]
857    fn map_rejects_strided_tensor() {
858        let mut t =
859            Tensor::<u8>::image(100, 100, PixelFormat::Rgba, Some(TensorMemory::Mem)).unwrap();
860        // Map works before stride is set
861        assert!(t.map().is_ok());
862        // After setting stride, map should be rejected
863        t.set_row_stride(512).unwrap();
864        let err = t.map();
865        assert!(err.is_err());
866    }
867
868    // ── plane_offset tests ──────────────────────────────────────────
869
870    #[test]
871    fn plane_offset_default_none() {
872        let t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
873        assert_eq!(t.plane_offset(), None);
874    }
875
876    #[test]
877    fn set_plane_offset_basic() {
878        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
879        t.set_plane_offset(4096);
880        assert_eq!(t.plane_offset(), Some(4096));
881    }
882
883    #[test]
884    fn set_plane_offset_zero() {
885        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
886        t.set_plane_offset(0);
887        assert_eq!(t.plane_offset(), Some(0));
888    }
889
890    #[test]
891    fn set_plane_offset_no_format() {
892        // plane_offset does not require format (it is format-independent)
893        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
894        t.set_plane_offset(4096);
895        assert_eq!(t.plane_offset(), Some(4096));
896    }
897
898    #[test]
899    fn with_plane_offset_builder() {
900        let t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None)
901            .unwrap()
902            .with_plane_offset(8192);
903        assert_eq!(t.plane_offset(), Some(8192));
904    }
905
906    #[test]
907    fn set_format_clears_plane_offset() {
908        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
909        t.set_format(PixelFormat::Rgb).unwrap();
910        t.set_plane_offset(4096);
911        assert_eq!(t.plane_offset(), Some(4096));
912
913        // Re-set same format — offset preserved
914        t.set_format(PixelFormat::Rgb).unwrap();
915        assert_eq!(t.plane_offset(), Some(4096));
916
917        // Reshape clears everything
918        t.reshape(&[480 * 640 * 3]).unwrap();
919        assert_eq!(t.plane_offset(), None);
920        assert_eq!(t.format(), None);
921    }
922
923    #[test]
924    fn map_rejects_offset_tensor() {
925        let mut t =
926            Tensor::<u8>::image(100, 100, PixelFormat::Rgba, Some(TensorMemory::Mem)).unwrap();
927        // Map works before offset is set
928        assert!(t.map().is_ok());
929        // After setting non-zero offset, map should be rejected
930        t.set_plane_offset(4096);
931        assert!(t.map().is_err());
932    }
933
934    #[test]
935    fn map_accepts_zero_offset_tensor() {
936        let mut t =
937            Tensor::<u8>::image(100, 100, PixelFormat::Rgba, Some(TensorMemory::Mem)).unwrap();
938        t.set_plane_offset(0);
939        // Zero offset is fine for CPU mapping
940        assert!(t.map().is_ok());
941    }
942
943    #[test]
944    fn from_planes_propagates_plane_offset() {
945        let mut luma =
946            Tensor::<u8>::new(&[480, 640], Some(TensorMemory::Mem), Some("luma")).unwrap();
947        luma.set_plane_offset(4096);
948        let chroma =
949            Tensor::<u8>::new(&[240, 640], Some(TensorMemory::Mem), Some("chroma")).unwrap();
950        let combined = Tensor::<u8>::from_planes(luma, chroma, PixelFormat::Nv12).unwrap();
951        assert_eq!(combined.plane_offset(), Some(4096));
952    }
953}