Skip to main content

edgefirst_tensor/
tensor_dyn.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::{DType, PixelFormat, Tensor, TensorMemory, TensorTrait};
5use half::f16;
6use std::fmt;
7
8/// Type-erased tensor. Wraps a `Tensor<T>` with runtime element type.
9#[non_exhaustive]
10pub enum TensorDyn {
11    /// Unsigned 8-bit integer tensor.
12    U8(Tensor<u8>),
13    /// Signed 8-bit integer tensor.
14    I8(Tensor<i8>),
15    /// Unsigned 16-bit integer tensor.
16    U16(Tensor<u16>),
17    /// Signed 16-bit integer tensor.
18    I16(Tensor<i16>),
19    /// Unsigned 32-bit integer tensor.
20    U32(Tensor<u32>),
21    /// Signed 32-bit integer tensor.
22    I32(Tensor<i32>),
23    /// Unsigned 64-bit integer tensor.
24    U64(Tensor<u64>),
25    /// Signed 64-bit integer tensor.
26    I64(Tensor<i64>),
27    /// 16-bit floating-point tensor.
28    F16(Tensor<f16>),
29    /// 32-bit floating-point tensor.
30    F32(Tensor<f32>),
31    /// 64-bit floating-point tensor.
32    F64(Tensor<f64>),
33}
34
35/// Dispatch a method call across all TensorDyn variants.
36macro_rules! dispatch {
37    ($self:expr, $method:ident $(, $arg:expr)*) => {
38        match $self {
39            TensorDyn::U8(t) => t.$method($($arg),*),
40            TensorDyn::I8(t) => t.$method($($arg),*),
41            TensorDyn::U16(t) => t.$method($($arg),*),
42            TensorDyn::I16(t) => t.$method($($arg),*),
43            TensorDyn::U32(t) => t.$method($($arg),*),
44            TensorDyn::I32(t) => t.$method($($arg),*),
45            TensorDyn::U64(t) => t.$method($($arg),*),
46            TensorDyn::I64(t) => t.$method($($arg),*),
47            TensorDyn::F16(t) => t.$method($($arg),*),
48            TensorDyn::F32(t) => t.$method($($arg),*),
49            TensorDyn::F64(t) => t.$method($($arg),*),
50        }
51    };
52}
53
54/// Generate the three downcast methods (ref, mut ref, owned) for one variant.
55macro_rules! downcast_methods {
56    ($variant:ident, $ty:ty, $as_name:ident, $as_mut_name:ident, $into_name:ident) => {
57        /// Returns a shared reference to the inner tensor if the type matches.
58        pub fn $as_name(&self) -> Option<&Tensor<$ty>> {
59            match self {
60                Self::$variant(t) => Some(t),
61                _ => None,
62            }
63        }
64
65        /// Returns a mutable reference to the inner tensor if the type matches.
66        pub fn $as_mut_name(&mut self) -> Option<&mut Tensor<$ty>> {
67            match self {
68                Self::$variant(t) => Some(t),
69                _ => None,
70            }
71        }
72
73        /// Unwraps the inner tensor if the type matches, otherwise returns `self` as `Err`.
74        /// The Err variant is necessarily large (returns the unconsumed TensorDyn).
75        #[allow(clippy::result_large_err)]
76        pub fn $into_name(self) -> Result<Tensor<$ty>, Self> {
77            match self {
78                Self::$variant(t) => Ok(t),
79                other => Err(other),
80            }
81        }
82    };
83}
84
85impl TensorDyn {
86    /// Return the runtime element type discriminant.
87    pub fn dtype(&self) -> DType {
88        match self {
89            Self::U8(_) => DType::U8,
90            Self::I8(_) => DType::I8,
91            Self::U16(_) => DType::U16,
92            Self::I16(_) => DType::I16,
93            Self::U32(_) => DType::U32,
94            Self::I32(_) => DType::I32,
95            Self::U64(_) => DType::U64,
96            Self::I64(_) => DType::I64,
97            Self::F16(_) => DType::F16,
98            Self::F32(_) => DType::F32,
99            Self::F64(_) => DType::F64,
100        }
101    }
102
103    /// Return the tensor shape.
104    pub fn shape(&self) -> &[usize] {
105        dispatch!(self, shape)
106    }
107
108    /// Return the tensor name.
109    pub fn name(&self) -> String {
110        dispatch!(self, name)
111    }
112
113    /// Return the pixel format (None if not an image tensor).
114    pub fn format(&self) -> Option<PixelFormat> {
115        dispatch!(self, format)
116    }
117
118    /// Return the image width (None if not an image tensor).
119    pub fn width(&self) -> Option<usize> {
120        dispatch!(self, width)
121    }
122
123    /// Return the image height (None if not an image tensor).
124    pub fn height(&self) -> Option<usize> {
125        dispatch!(self, height)
126    }
127
128    /// Return the total size of this tensor in bytes.
129    pub fn size(&self) -> usize {
130        dispatch!(self, size)
131    }
132
133    /// Return the memory allocation type.
134    pub fn memory(&self) -> TensorMemory {
135        dispatch!(self, memory)
136    }
137
138    /// Reshape this tensor. Total element count must remain the same.
139    pub fn reshape(&mut self, shape: &[usize]) -> crate::Result<()> {
140        dispatch!(self, reshape, shape)
141    }
142
143    /// Attach pixel format metadata to this tensor.
144    ///
145    /// Validates that the tensor's shape is compatible with the format's
146    /// layout (packed, planar, or semi-planar).
147    ///
148    /// # Arguments
149    ///
150    /// * `format` - The pixel format to attach
151    ///
152    /// # Returns
153    ///
154    /// `Ok(())` on success, with the format stored as metadata on the tensor.
155    ///
156    /// # Errors
157    ///
158    /// Returns `Error::InvalidShape` if the tensor shape doesn't match
159    /// the expected layout for the given format.
160    pub fn set_format(&mut self, format: PixelFormat) -> crate::Result<()> {
161        dispatch!(self, set_format, format)
162    }
163
164    /// Attach pixel format metadata, consuming and returning self.
165    ///
166    /// Enables builder-style chaining.
167    ///
168    /// # Arguments
169    ///
170    /// * `format` - The pixel format to attach
171    ///
172    /// # Returns
173    ///
174    /// The tensor with format metadata attached.
175    ///
176    /// # Errors
177    ///
178    /// Returns `Error::InvalidShape` if the tensor shape doesn't match
179    /// the expected layout for the given format.
180    pub fn with_format(mut self, format: PixelFormat) -> crate::Result<Self> {
181        self.set_format(format)?;
182        Ok(self)
183    }
184
185    /// Row stride in bytes (`None` = tightly packed).
186    pub fn row_stride(&self) -> Option<usize> {
187        dispatch!(self, row_stride)
188    }
189
190    /// Effective row stride: stored stride or computed from format and width.
191    pub fn effective_row_stride(&self) -> Option<usize> {
192        dispatch!(self, effective_row_stride)
193    }
194
195    /// Set the row stride in bytes for externally allocated buffers with
196    /// row padding.
197    ///
198    /// Must be called before the tensor is first used for rendering. The
199    /// format must be set before calling this method.
200    pub fn set_row_stride(&mut self, stride: usize) -> crate::Result<()> {
201        dispatch!(self, set_row_stride, stride)
202    }
203
204    /// Builder-style: set row stride, consuming and returning self.
205    pub fn with_row_stride(mut self, stride: usize) -> crate::Result<Self> {
206        self.set_row_stride(stride)?;
207        Ok(self)
208    }
209
210    /// Byte offset within the DMA-BUF where image data starts (`None` = 0).
211    pub fn plane_offset(&self) -> Option<usize> {
212        dispatch!(self, plane_offset)
213    }
214
215    /// Set the byte offset within the DMA-BUF where image data starts.
216    pub fn set_plane_offset(&mut self, offset: usize) {
217        dispatch!(self, set_plane_offset, offset)
218    }
219
220    /// Builder-style: set plane offset, consuming and returning self.
221    pub fn with_plane_offset(mut self, offset: usize) -> Self {
222        self.set_plane_offset(offset);
223        self
224    }
225
226    /// Quantization metadata. Returns `None` for float variants (F16, F32,
227    /// F64) — quantization does not apply to floating-point tensors.
228    /// Otherwise delegates to the typed `Tensor<T>::quantization()` accessor.
229    pub fn quantization(&self) -> Option<&crate::Quantization> {
230        match self {
231            Self::U8(t) => t.quantization(),
232            Self::I8(t) => t.quantization(),
233            Self::U16(t) => t.quantization(),
234            Self::I16(t) => t.quantization(),
235            Self::U32(t) => t.quantization(),
236            Self::I32(t) => t.quantization(),
237            Self::U64(t) => t.quantization(),
238            Self::I64(t) => t.quantization(),
239            Self::F16(_) | Self::F32(_) | Self::F64(_) => None,
240        }
241    }
242
243    /// Attach quantization metadata. Fails on float variants with
244    /// [`Error::QuantizationInvalid`]; delegates to the typed setter for
245    /// integer variants.
246    pub fn set_quantization(&mut self, q: crate::Quantization) -> crate::Result<()> {
247        match self {
248            Self::U8(t) => t.set_quantization(q),
249            Self::I8(t) => t.set_quantization(q),
250            Self::U16(t) => t.set_quantization(q),
251            Self::I16(t) => t.set_quantization(q),
252            Self::U32(t) => t.set_quantization(q),
253            Self::I32(t) => t.set_quantization(q),
254            Self::U64(t) => t.set_quantization(q),
255            Self::I64(t) => t.set_quantization(q),
256            Self::F16(_) | Self::F32(_) | Self::F64(_) => Err(crate::Error::QuantizationInvalid {
257                field: "dtype_is_integer",
258                expected: "integer tensor dtype (u8/i8/u16/i16/u32/i32/u64/i64)".to_string(),
259                got: format!("{:?}", self.dtype()),
260            }),
261        }
262    }
263
264    /// Builder-style variant of [`Self::set_quantization`]. Consumes self
265    /// and returns it with quantization applied (or the original error).
266    pub fn with_quantization(mut self, q: crate::Quantization) -> crate::Result<Self> {
267        self.set_quantization(q)?;
268        Ok(self)
269    }
270
271    /// Clear any quantization metadata. No-op on float variants.
272    pub fn clear_quantization(&mut self) {
273        match self {
274            Self::U8(t) => t.clear_quantization(),
275            Self::I8(t) => t.clear_quantization(),
276            Self::U16(t) => t.clear_quantization(),
277            Self::I16(t) => t.clear_quantization(),
278            Self::U32(t) => t.clear_quantization(),
279            Self::I32(t) => t.clear_quantization(),
280            Self::U64(t) => t.clear_quantization(),
281            Self::I64(t) => t.clear_quantization(),
282            Self::F16(_) | Self::F32(_) | Self::F64(_) => {}
283        }
284    }
285
286    /// Clone the file descriptor associated with this tensor.
287    #[cfg(unix)]
288    pub fn clone_fd(&self) -> crate::Result<std::os::fd::OwnedFd> {
289        dispatch!(self, clone_fd)
290    }
291
292    /// Clone the DMA-BUF file descriptor backing this tensor (Linux only).
293    ///
294    /// # Returns
295    ///
296    /// An owned duplicate of the DMA-BUF file descriptor.
297    ///
298    /// # Errors
299    ///
300    /// * `Error::NotImplemented` if the tensor is not DMA-backed (Mem/Shm/Pbo)
301    /// * `Error::IoError` if the fd clone syscall fails (e.g., fd limit reached)
302    #[cfg(target_os = "linux")]
303    pub fn dmabuf_clone(&self) -> crate::Result<std::os::fd::OwnedFd> {
304        if self.memory() != TensorMemory::Dma {
305            return Err(crate::Error::NotImplemented(format!(
306                "dmabuf_clone requires DMA-backed tensor, got {:?}",
307                self.memory()
308            )));
309        }
310        self.clone_fd()
311    }
312
313    /// Borrow the DMA-BUF file descriptor backing this tensor (Linux only).
314    ///
315    /// # Returns
316    ///
317    /// A borrowed reference to the DMA-BUF file descriptor, tied to `self`'s
318    /// lifetime.
319    ///
320    /// # Errors
321    ///
322    /// * `Error::NotImplemented` if the tensor is not DMA-backed
323    #[cfg(target_os = "linux")]
324    pub fn dmabuf(&self) -> crate::Result<std::os::fd::BorrowedFd<'_>> {
325        dispatch!(self, dmabuf)
326    }
327
328    /// Return `true` if this tensor uses separate plane allocations.
329    pub fn is_multiplane(&self) -> bool {
330        dispatch!(self, is_multiplane)
331    }
332
333    /// Return the [`BufferIdentity`](crate::BufferIdentity) of the underlying
334    /// allocation.
335    ///
336    /// Two `TensorDyn` values share a [`BufferIdentity::id`] iff they were
337    /// produced by cloning the same allocation (e.g. through
338    /// [`DmaTensor::try_clone`](crate::dma::DmaTensor::try_clone)). Separate
339    /// imports of the same physical buffer (e.g. two `from_fd` calls on the
340    /// same dmabuf fd) have **distinct** identities — use
341    /// [`aliases`](Self::aliases) if you need to detect that case.
342    pub fn buffer_identity(&self) -> &crate::BufferIdentity {
343        dispatch!(self, buffer_identity)
344    }
345
346    /// Return `true` if `self` and `other` reference the same underlying
347    /// buffer.
348    ///
349    /// This is the correct check for APIs that require distinct input and
350    /// output tensors (e.g. `ImageProcessor::draw_decoded_masks`, where
351    /// aliasing `dst` and `background` would cause the GL backend to read
352    /// and write the same texture — undefined behaviour on most drivers).
353    ///
354    /// Matching is conservative:
355    /// 1. Matching [`BufferIdentity::id`] → same buffer (always).
356    /// 2. Matching backing type + matching dmabuf fd number (Linux, DMA
357    ///    tensors only) → same buffer, even across separate `from_fd`
358    ///    imports in the same process.
359    ///
360    /// Two distinct `dup`'d fds pointing at the same kernel dma-buf are
361    /// **not** detected — there is no cheap way to resolve that without a
362    /// round-trip through the kernel.
363    pub fn aliases(&self, other: &Self) -> bool {
364        if self.buffer_identity().id() == other.buffer_identity().id() {
365            return true;
366        }
367        if self.memory() != other.memory() {
368            return false;
369        }
370        #[cfg(target_os = "linux")]
371        if self.memory() == TensorMemory::Dma {
372            use std::os::fd::AsRawFd;
373            if let (Ok(a), Ok(b)) = (self.dmabuf(), other.dmabuf()) {
374                return a.as_raw_fd() == b.as_raw_fd();
375            }
376        }
377        false
378    }
379
380    // --- Downcasting ---
381
382    downcast_methods!(U8, u8, as_u8, as_u8_mut, into_u8);
383    downcast_methods!(I8, i8, as_i8, as_i8_mut, into_i8);
384    downcast_methods!(U16, u16, as_u16, as_u16_mut, into_u16);
385    downcast_methods!(I16, i16, as_i16, as_i16_mut, into_i16);
386    downcast_methods!(U32, u32, as_u32, as_u32_mut, into_u32);
387    downcast_methods!(I32, i32, as_i32, as_i32_mut, into_i32);
388    downcast_methods!(U64, u64, as_u64, as_u64_mut, into_u64);
389    downcast_methods!(I64, i64, as_i64, as_i64_mut, into_i64);
390    downcast_methods!(F16, f16, as_f16, as_f16_mut, into_f16);
391    downcast_methods!(F32, f32, as_f32, as_f32_mut, into_f32);
392    downcast_methods!(F64, f64, as_f64, as_f64_mut, into_f64);
393
394    /// Create a type-erased tensor with the given shape and element type.
395    pub fn new(
396        shape: &[usize],
397        dtype: DType,
398        memory: Option<TensorMemory>,
399        name: Option<&str>,
400    ) -> crate::Result<Self> {
401        match dtype {
402            DType::U8 => Tensor::<u8>::new(shape, memory, name).map(Self::U8),
403            DType::I8 => Tensor::<i8>::new(shape, memory, name).map(Self::I8),
404            DType::U16 => Tensor::<u16>::new(shape, memory, name).map(Self::U16),
405            DType::I16 => Tensor::<i16>::new(shape, memory, name).map(Self::I16),
406            DType::U32 => Tensor::<u32>::new(shape, memory, name).map(Self::U32),
407            DType::I32 => Tensor::<i32>::new(shape, memory, name).map(Self::I32),
408            DType::U64 => Tensor::<u64>::new(shape, memory, name).map(Self::U64),
409            DType::I64 => Tensor::<i64>::new(shape, memory, name).map(Self::I64),
410            DType::F16 => Tensor::<f16>::new(shape, memory, name).map(Self::F16),
411            DType::F32 => Tensor::<f32>::new(shape, memory, name).map(Self::F32),
412            DType::F64 => Tensor::<f64>::new(shape, memory, name).map(Self::F64),
413        }
414    }
415
416    /// Create a type-erased tensor from a file descriptor.
417    #[cfg(unix)]
418    pub fn from_fd(
419        fd: std::os::fd::OwnedFd,
420        shape: &[usize],
421        dtype: DType,
422        name: Option<&str>,
423    ) -> crate::Result<Self> {
424        match dtype {
425            DType::U8 => Tensor::<u8>::from_fd(fd, shape, name).map(Self::U8),
426            DType::I8 => Tensor::<i8>::from_fd(fd, shape, name).map(Self::I8),
427            DType::U16 => Tensor::<u16>::from_fd(fd, shape, name).map(Self::U16),
428            DType::I16 => Tensor::<i16>::from_fd(fd, shape, name).map(Self::I16),
429            DType::U32 => Tensor::<u32>::from_fd(fd, shape, name).map(Self::U32),
430            DType::I32 => Tensor::<i32>::from_fd(fd, shape, name).map(Self::I32),
431            DType::U64 => Tensor::<u64>::from_fd(fd, shape, name).map(Self::U64),
432            DType::I64 => Tensor::<i64>::from_fd(fd, shape, name).map(Self::I64),
433            DType::F16 => Tensor::<f16>::from_fd(fd, shape, name).map(Self::F16),
434            DType::F32 => Tensor::<f32>::from_fd(fd, shape, name).map(Self::F32),
435            DType::F64 => Tensor::<f64>::from_fd(fd, shape, name).map(Self::F64),
436        }
437    }
438
439    /// Create a type-erased image tensor.
440    ///
441    /// # Arguments
442    ///
443    /// * `width` - Image width in pixels
444    /// * `height` - Image height in pixels
445    /// * `format` - Pixel format
446    /// * `dtype` - Element type discriminant
447    /// * `memory` - Optional memory backend (None selects the best available)
448    ///
449    /// # Returns
450    ///
451    /// A new `TensorDyn` wrapping an image tensor of the requested element type.
452    ///
453    /// # Errors
454    ///
455    /// Returns an error if the underlying `Tensor::image` call fails.
456    pub fn image(
457        width: usize,
458        height: usize,
459        format: PixelFormat,
460        dtype: DType,
461        memory: Option<TensorMemory>,
462    ) -> crate::Result<Self> {
463        match dtype {
464            DType::U8 => Tensor::<u8>::image(width, height, format, memory).map(Self::U8),
465            DType::I8 => Tensor::<i8>::image(width, height, format, memory).map(Self::I8),
466            DType::U16 => Tensor::<u16>::image(width, height, format, memory).map(Self::U16),
467            DType::I16 => Tensor::<i16>::image(width, height, format, memory).map(Self::I16),
468            DType::U32 => Tensor::<u32>::image(width, height, format, memory).map(Self::U32),
469            DType::I32 => Tensor::<i32>::image(width, height, format, memory).map(Self::I32),
470            DType::U64 => Tensor::<u64>::image(width, height, format, memory).map(Self::U64),
471            DType::I64 => Tensor::<i64>::image(width, height, format, memory).map(Self::I64),
472            DType::F16 => Tensor::<f16>::image(width, height, format, memory).map(Self::F16),
473            DType::F32 => Tensor::<f32>::image(width, height, format, memory).map(Self::F32),
474            DType::F64 => Tensor::<f64>::image(width, height, format, memory).map(Self::F64),
475        }
476    }
477
478    /// Create a DMA-backed image tensor with an explicit row stride that
479    /// may exceed the natural `width * channels * sizeof(T)` pitch.
480    ///
481    /// See [`Tensor::image_with_stride`] for the detailed contract and
482    /// constraints. The TensorDyn wrapper dispatches to the appropriate
483    /// monomorphised `Tensor<T>` based on `dtype`.
484    ///
485    /// # Example
486    ///
487    /// ```no_run
488    /// use edgefirst_tensor::{TensorDyn, PixelFormat, DType, TensorMemory};
489    /// # fn main() -> edgefirst_tensor::Result<()> {
490    /// // Allocate a 3004×1688 RGBA8 canvas with 64-byte pitch alignment
491    /// // (12032 bytes per row instead of the natural 12016).
492    /// let img = TensorDyn::image_with_stride(
493    ///     3004, 1688,
494    ///     PixelFormat::Rgba, DType::U8,
495    ///     12032,
496    ///     Some(TensorMemory::Dma),
497    /// )?;
498    /// assert_eq!(img.width(), Some(3004));       // logical, unchanged
499    /// assert_eq!(img.effective_row_stride(), Some(12032)); // padded
500    /// # Ok(())
501    /// # }
502    /// ```
503    pub fn image_with_stride(
504        width: usize,
505        height: usize,
506        format: PixelFormat,
507        dtype: DType,
508        row_stride_bytes: usize,
509        memory: Option<TensorMemory>,
510    ) -> crate::Result<Self> {
511        match dtype {
512            DType::U8 => {
513                Tensor::<u8>::image_with_stride(width, height, format, row_stride_bytes, memory)
514                    .map(Self::U8)
515            }
516            DType::I8 => {
517                Tensor::<i8>::image_with_stride(width, height, format, row_stride_bytes, memory)
518                    .map(Self::I8)
519            }
520            DType::U16 => {
521                Tensor::<u16>::image_with_stride(width, height, format, row_stride_bytes, memory)
522                    .map(Self::U16)
523            }
524            DType::I16 => {
525                Tensor::<i16>::image_with_stride(width, height, format, row_stride_bytes, memory)
526                    .map(Self::I16)
527            }
528            DType::U32 => {
529                Tensor::<u32>::image_with_stride(width, height, format, row_stride_bytes, memory)
530                    .map(Self::U32)
531            }
532            DType::I32 => {
533                Tensor::<i32>::image_with_stride(width, height, format, row_stride_bytes, memory)
534                    .map(Self::I32)
535            }
536            DType::U64 => {
537                Tensor::<u64>::image_with_stride(width, height, format, row_stride_bytes, memory)
538                    .map(Self::U64)
539            }
540            DType::I64 => {
541                Tensor::<i64>::image_with_stride(width, height, format, row_stride_bytes, memory)
542                    .map(Self::I64)
543            }
544            DType::F16 => {
545                Tensor::<f16>::image_with_stride(width, height, format, row_stride_bytes, memory)
546                    .map(Self::F16)
547            }
548            DType::F32 => {
549                Tensor::<f32>::image_with_stride(width, height, format, row_stride_bytes, memory)
550                    .map(Self::F32)
551            }
552            DType::F64 => {
553                Tensor::<f64>::image_with_stride(width, height, format, row_stride_bytes, memory)
554                    .map(Self::F64)
555            }
556        }
557    }
558}
559
560// --- From impls ---
561
562impl From<Tensor<u8>> for TensorDyn {
563    fn from(t: Tensor<u8>) -> Self {
564        Self::U8(t)
565    }
566}
567
568impl From<Tensor<i8>> for TensorDyn {
569    fn from(t: Tensor<i8>) -> Self {
570        Self::I8(t)
571    }
572}
573
574impl From<Tensor<u16>> for TensorDyn {
575    fn from(t: Tensor<u16>) -> Self {
576        Self::U16(t)
577    }
578}
579
580impl From<Tensor<i16>> for TensorDyn {
581    fn from(t: Tensor<i16>) -> Self {
582        Self::I16(t)
583    }
584}
585
586impl From<Tensor<u32>> for TensorDyn {
587    fn from(t: Tensor<u32>) -> Self {
588        Self::U32(t)
589    }
590}
591
592impl From<Tensor<i32>> for TensorDyn {
593    fn from(t: Tensor<i32>) -> Self {
594        Self::I32(t)
595    }
596}
597
598impl From<Tensor<u64>> for TensorDyn {
599    fn from(t: Tensor<u64>) -> Self {
600        Self::U64(t)
601    }
602}
603
604impl From<Tensor<i64>> for TensorDyn {
605    fn from(t: Tensor<i64>) -> Self {
606        Self::I64(t)
607    }
608}
609
610impl From<Tensor<f16>> for TensorDyn {
611    fn from(t: Tensor<f16>) -> Self {
612        Self::F16(t)
613    }
614}
615
616impl From<Tensor<f32>> for TensorDyn {
617    fn from(t: Tensor<f32>) -> Self {
618        Self::F32(t)
619    }
620}
621
622impl From<Tensor<f64>> for TensorDyn {
623    fn from(t: Tensor<f64>) -> Self {
624        Self::F64(t)
625    }
626}
627
628impl fmt::Debug for TensorDyn {
629    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
630        dispatch!(self, fmt, f)
631    }
632}
633
634#[cfg(test)]
635mod tests {
636    use super::*;
637
638    #[test]
639    fn from_typed_tensor() {
640        let t = Tensor::<u8>::new(&[10], None, None).unwrap();
641        let dyn_t: TensorDyn = t.into();
642        assert_eq!(dyn_t.dtype(), DType::U8);
643        assert_eq!(dyn_t.shape(), &[10]);
644    }
645
646    #[test]
647    fn downcast_ref() {
648        let t = Tensor::<u8>::new(&[10], None, None).unwrap();
649        let dyn_t: TensorDyn = t.into();
650        assert!(dyn_t.as_u8().is_some());
651        assert!(dyn_t.as_i8().is_none());
652    }
653
654    #[test]
655    fn downcast_into() {
656        let t = Tensor::<u8>::new(&[10], None, None).unwrap();
657        let dyn_t: TensorDyn = t.into();
658        let back = dyn_t.into_u8().unwrap();
659        assert_eq!(back.shape(), &[10]);
660    }
661
662    #[test]
663    fn image_accessors() {
664        let t = Tensor::<u8>::image(640, 480, PixelFormat::Rgba, None).unwrap();
665        let dyn_t: TensorDyn = t.into();
666        assert_eq!(dyn_t.format(), Some(PixelFormat::Rgba));
667        assert_eq!(dyn_t.width(), Some(640));
668        assert_eq!(dyn_t.height(), Some(480));
669        assert!(!dyn_t.is_multiplane());
670    }
671
672    #[test]
673    fn image_constructor() {
674        let dyn_t = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::U8, None).unwrap();
675        assert_eq!(dyn_t.dtype(), DType::U8);
676        assert_eq!(dyn_t.format(), Some(PixelFormat::Rgb));
677        assert_eq!(dyn_t.width(), Some(640));
678    }
679
680    #[test]
681    fn image_constructor_i8() {
682        let dyn_t = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::I8, None).unwrap();
683        assert_eq!(dyn_t.dtype(), DType::I8);
684        assert_eq!(dyn_t.format(), Some(PixelFormat::Rgb));
685    }
686
687    #[test]
688    fn set_format_packed() {
689        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
690        assert_eq!(t.format(), None);
691        t.set_format(PixelFormat::Rgb).unwrap();
692        assert_eq!(t.format(), Some(PixelFormat::Rgb));
693        assert_eq!(t.width(), Some(640));
694        assert_eq!(t.height(), Some(480));
695    }
696
697    #[test]
698    fn set_format_planar() {
699        let mut t = TensorDyn::new(&[3, 480, 640], DType::U8, None, None).unwrap();
700        t.set_format(PixelFormat::PlanarRgb).unwrap();
701        assert_eq!(t.format(), Some(PixelFormat::PlanarRgb));
702        assert_eq!(t.width(), Some(640));
703        assert_eq!(t.height(), Some(480));
704    }
705
706    #[test]
707    fn set_format_rejects_wrong_shape() {
708        let mut t = TensorDyn::new(&[480, 640, 4], DType::U8, None, None).unwrap();
709        assert!(t.set_format(PixelFormat::Rgb).is_err());
710    }
711
712    #[test]
713    fn with_format_builder() {
714        let t = TensorDyn::new(&[480, 640, 4], DType::U8, None, None)
715            .unwrap()
716            .with_format(PixelFormat::Rgba)
717            .unwrap();
718        assert_eq!(t.format(), Some(PixelFormat::Rgba));
719        assert_eq!(t.width(), Some(640));
720        assert_eq!(t.height(), Some(480));
721    }
722
723    #[cfg(target_os = "linux")]
724    #[test]
725    fn dmabuf_clone_mem_tensor_fails() {
726        let t = TensorDyn::new(&[480, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
727        assert_eq!(t.memory(), TensorMemory::Mem);
728        assert!(t.dmabuf_clone().is_err());
729    }
730
731    #[cfg(target_os = "linux")]
732    #[test]
733    fn dmabuf_mem_tensor_fails() {
734        let t = TensorDyn::new(&[480, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
735        assert!(t.dmabuf().is_err());
736    }
737
738    #[test]
739    fn set_format_semi_planar_nv12() {
740        // 720 rows = 480 * 3/2 (NV12: height + height/2 for chroma)
741        let mut t = TensorDyn::new(&[720, 640], DType::U8, Some(TensorMemory::Mem), None).unwrap();
742        t.set_format(PixelFormat::Nv12).unwrap();
743        assert_eq!(t.format(), Some(PixelFormat::Nv12));
744        assert_eq!(t.width(), Some(640));
745        assert_eq!(t.height(), Some(480));
746    }
747
748    #[test]
749    fn set_format_semi_planar_nv16() {
750        // 960 rows = 480 * 2 (NV16: height + height for chroma)
751        let mut t = TensorDyn::new(&[960, 640], DType::U8, Some(TensorMemory::Mem), None).unwrap();
752        t.set_format(PixelFormat::Nv16).unwrap();
753        assert_eq!(t.format(), Some(PixelFormat::Nv16));
754        assert_eq!(t.width(), Some(640));
755        assert_eq!(t.height(), Some(480));
756    }
757
758    #[test]
759    fn with_format_rejects_wrong_shape() {
760        let result = TensorDyn::new(&[480, 640, 4], DType::U8, None, None)
761            .unwrap()
762            .with_format(PixelFormat::Rgb);
763        assert!(result.is_err());
764    }
765
766    #[test]
767    fn set_format_preserved_after_rejection() {
768        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
769        t.set_format(PixelFormat::Rgb).unwrap();
770        assert_eq!(t.format(), Some(PixelFormat::Rgb));
771
772        // Rgba requires 4 channels, should fail on a 3-channel tensor
773        assert!(t.set_format(PixelFormat::Rgba).is_err());
774
775        // Original format should be preserved
776        assert_eq!(t.format(), Some(PixelFormat::Rgb));
777    }
778
779    #[test]
780    fn set_format_idempotent() {
781        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
782        t.set_format(PixelFormat::Rgb).unwrap();
783        t.set_format(PixelFormat::Rgb).unwrap();
784        assert_eq!(t.format(), Some(PixelFormat::Rgb));
785        assert_eq!(t.width(), Some(640));
786        assert_eq!(t.height(), Some(480));
787    }
788
789    // --- Row stride tests ---
790
791    #[test]
792    fn set_row_stride_valid() {
793        // RGBA 100px wide: min stride = 400, set 512
794        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
795        t.set_row_stride(512).unwrap();
796        assert_eq!(t.row_stride(), Some(512));
797        assert_eq!(t.effective_row_stride(), Some(512));
798    }
799
800    #[test]
801    fn set_row_stride_equals_min() {
802        // RGB 100px: min stride = 300, set exactly 300
803        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
804        t.set_row_stride(300).unwrap();
805        assert_eq!(t.row_stride(), Some(300));
806    }
807
808    #[test]
809    fn set_row_stride_too_small() {
810        // RGBA 100px: min stride = 400, set 300
811        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
812        assert!(t.set_row_stride(300).is_err());
813        assert_eq!(t.row_stride(), None);
814    }
815
816    #[test]
817    fn set_row_stride_zero() {
818        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
819        assert!(t.set_row_stride(0).is_err());
820    }
821
822    #[test]
823    fn set_row_stride_requires_format() {
824        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
825        assert!(t.set_row_stride(2048).is_err());
826    }
827
828    #[test]
829    fn effective_row_stride_without_stride() {
830        let t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
831        assert_eq!(t.row_stride(), None);
832        assert_eq!(t.effective_row_stride(), Some(300)); // 100 * 3
833    }
834
835    #[test]
836    fn effective_row_stride_no_format() {
837        let t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
838        assert_eq!(t.effective_row_stride(), None);
839    }
840
841    #[test]
842    fn with_row_stride_builder() {
843        let t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None)
844            .unwrap()
845            .with_row_stride(512)
846            .unwrap();
847        assert_eq!(t.row_stride(), Some(512));
848        assert_eq!(t.effective_row_stride(), Some(512));
849    }
850
851    #[test]
852    fn with_row_stride_rejects_small() {
853        let result = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None)
854            .unwrap()
855            .with_row_stride(200);
856        assert!(result.is_err());
857    }
858
859    #[test]
860    fn set_format_clears_row_stride() {
861        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
862        t.set_format(PixelFormat::Rgb).unwrap();
863        t.set_row_stride(2048).unwrap();
864        assert_eq!(t.row_stride(), Some(2048));
865
866        // Incompatible format change (4-chan on 3-chan shape) fails — stride preserved
867        let _ = t.set_format(PixelFormat::Bgra);
868        assert_eq!(t.row_stride(), Some(2048));
869
870        // Re-set to same format — stride preserved
871        t.set_format(PixelFormat::Rgb).unwrap();
872        assert_eq!(t.row_stride(), Some(2048));
873
874        // Reshape clears format and stride
875        t.reshape(&[480 * 640 * 3]).unwrap();
876        assert_eq!(t.row_stride(), None);
877        assert_eq!(t.format(), None);
878    }
879
880    #[test]
881    fn set_format_different_compatible_clears_stride() {
882        // RGBA and BGRA are both 4-channel packed — switching between them
883        // succeeds and must clear the stored stride.
884        let mut t = TensorDyn::new(&[480, 640, 4], DType::U8, None, None).unwrap();
885        t.set_format(PixelFormat::Rgba).unwrap();
886        t.set_row_stride(4096).unwrap();
887        assert_eq!(t.row_stride(), Some(4096));
888
889        // Successful format change to a different compatible format clears stride
890        t.set_format(PixelFormat::Bgra).unwrap();
891        assert_eq!(t.format(), Some(PixelFormat::Bgra));
892        assert_eq!(t.row_stride(), None);
893    }
894
895    #[test]
896    fn set_format_same_preserves_stride() {
897        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
898        t.set_row_stride(512).unwrap();
899        // Re-setting the same format should not clear stride
900        t.set_format(PixelFormat::Rgb).unwrap();
901        assert_eq!(t.row_stride(), Some(512));
902    }
903
904    #[test]
905    fn effective_row_stride_planar() {
906        let t = TensorDyn::image(640, 480, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
907        assert_eq!(t.effective_row_stride(), Some(640)); // planar: width only
908    }
909
910    #[test]
911    fn effective_row_stride_nv12() {
912        let t = TensorDyn::image(640, 480, PixelFormat::Nv12, DType::U8, None).unwrap();
913        assert_eq!(t.effective_row_stride(), Some(640)); // semi-planar: width only
914    }
915
916    #[test]
917    fn map_rejects_strided_tensor() {
918        let mut t =
919            Tensor::<u8>::image(100, 100, PixelFormat::Rgba, Some(TensorMemory::Mem)).unwrap();
920        // Map works before stride is set
921        assert!(t.map().is_ok());
922        // After setting stride, map should be rejected
923        t.set_row_stride(512).unwrap();
924        let err = t.map();
925        assert!(err.is_err());
926    }
927
928    // ── plane_offset tests ──────────────────────────────────────────
929
930    #[test]
931    fn plane_offset_default_none() {
932        let t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
933        assert_eq!(t.plane_offset(), None);
934    }
935
936    #[test]
937    fn set_plane_offset_basic() {
938        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
939        t.set_plane_offset(4096);
940        assert_eq!(t.plane_offset(), Some(4096));
941    }
942
943    #[test]
944    fn set_plane_offset_zero() {
945        let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
946        t.set_plane_offset(0);
947        assert_eq!(t.plane_offset(), Some(0));
948    }
949
950    #[test]
951    fn set_plane_offset_no_format() {
952        // plane_offset does not require format (it is format-independent)
953        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
954        t.set_plane_offset(4096);
955        assert_eq!(t.plane_offset(), Some(4096));
956    }
957
958    #[test]
959    fn with_plane_offset_builder() {
960        let t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None)
961            .unwrap()
962            .with_plane_offset(8192);
963        assert_eq!(t.plane_offset(), Some(8192));
964    }
965
966    #[test]
967    fn set_format_clears_plane_offset() {
968        let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
969        t.set_format(PixelFormat::Rgb).unwrap();
970        t.set_plane_offset(4096);
971        assert_eq!(t.plane_offset(), Some(4096));
972
973        // Re-set same format — offset preserved
974        t.set_format(PixelFormat::Rgb).unwrap();
975        assert_eq!(t.plane_offset(), Some(4096));
976
977        // Reshape clears everything
978        t.reshape(&[480 * 640 * 3]).unwrap();
979        assert_eq!(t.plane_offset(), None);
980        assert_eq!(t.format(), None);
981    }
982
983    #[test]
984    fn map_rejects_offset_tensor() {
985        let mut t =
986            Tensor::<u8>::image(100, 100, PixelFormat::Rgba, Some(TensorMemory::Mem)).unwrap();
987        // Map works before offset is set
988        assert!(t.map().is_ok());
989        // After setting non-zero offset, map should be rejected
990        t.set_plane_offset(4096);
991        assert!(t.map().is_err());
992    }
993
994    #[test]
995    fn map_accepts_zero_offset_tensor() {
996        let mut t =
997            Tensor::<u8>::image(100, 100, PixelFormat::Rgba, Some(TensorMemory::Mem)).unwrap();
998        t.set_plane_offset(0);
999        // Zero offset is fine for CPU mapping
1000        assert!(t.map().is_ok());
1001    }
1002
1003    #[test]
1004    fn from_planes_propagates_plane_offset() {
1005        let mut luma =
1006            Tensor::<u8>::new(&[480, 640], Some(TensorMemory::Mem), Some("luma")).unwrap();
1007        luma.set_plane_offset(4096);
1008        let chroma =
1009            Tensor::<u8>::new(&[240, 640], Some(TensorMemory::Mem), Some("chroma")).unwrap();
1010        let combined = Tensor::<u8>::from_planes(luma, chroma, PixelFormat::Nv12).unwrap();
1011        assert_eq!(combined.plane_offset(), Some(4096));
1012    }
1013}