1use crate::{DType, PixelFormat, Tensor, TensorMemory, TensorTrait};
5use half::f16;
6use std::fmt;
7
8#[non_exhaustive]
10pub enum TensorDyn {
11 U8(Tensor<u8>),
13 I8(Tensor<i8>),
15 U16(Tensor<u16>),
17 I16(Tensor<i16>),
19 U32(Tensor<u32>),
21 I32(Tensor<i32>),
23 U64(Tensor<u64>),
25 I64(Tensor<i64>),
27 F16(Tensor<f16>),
29 F32(Tensor<f32>),
31 F64(Tensor<f64>),
33}
34
35macro_rules! dispatch {
37 ($self:expr, $method:ident $(, $arg:expr)*) => {
38 match $self {
39 TensorDyn::U8(t) => t.$method($($arg),*),
40 TensorDyn::I8(t) => t.$method($($arg),*),
41 TensorDyn::U16(t) => t.$method($($arg),*),
42 TensorDyn::I16(t) => t.$method($($arg),*),
43 TensorDyn::U32(t) => t.$method($($arg),*),
44 TensorDyn::I32(t) => t.$method($($arg),*),
45 TensorDyn::U64(t) => t.$method($($arg),*),
46 TensorDyn::I64(t) => t.$method($($arg),*),
47 TensorDyn::F16(t) => t.$method($($arg),*),
48 TensorDyn::F32(t) => t.$method($($arg),*),
49 TensorDyn::F64(t) => t.$method($($arg),*),
50 }
51 };
52}
53
54macro_rules! downcast_methods {
56 ($variant:ident, $ty:ty, $as_name:ident, $as_mut_name:ident, $into_name:ident) => {
57 pub fn $as_name(&self) -> Option<&Tensor<$ty>> {
59 match self {
60 Self::$variant(t) => Some(t),
61 _ => None,
62 }
63 }
64
65 pub fn $as_mut_name(&mut self) -> Option<&mut Tensor<$ty>> {
67 match self {
68 Self::$variant(t) => Some(t),
69 _ => None,
70 }
71 }
72
73 #[allow(clippy::result_large_err)]
76 pub fn $into_name(self) -> Result<Tensor<$ty>, Self> {
77 match self {
78 Self::$variant(t) => Ok(t),
79 other => Err(other),
80 }
81 }
82 };
83}
84
85impl TensorDyn {
86 pub fn dtype(&self) -> DType {
88 match self {
89 Self::U8(_) => DType::U8,
90 Self::I8(_) => DType::I8,
91 Self::U16(_) => DType::U16,
92 Self::I16(_) => DType::I16,
93 Self::U32(_) => DType::U32,
94 Self::I32(_) => DType::I32,
95 Self::U64(_) => DType::U64,
96 Self::I64(_) => DType::I64,
97 Self::F16(_) => DType::F16,
98 Self::F32(_) => DType::F32,
99 Self::F64(_) => DType::F64,
100 }
101 }
102
103 pub fn shape(&self) -> &[usize] {
105 dispatch!(self, shape)
106 }
107
108 pub fn name(&self) -> String {
110 dispatch!(self, name)
111 }
112
113 pub fn format(&self) -> Option<PixelFormat> {
115 dispatch!(self, format)
116 }
117
118 pub fn width(&self) -> Option<usize> {
120 dispatch!(self, width)
121 }
122
123 pub fn height(&self) -> Option<usize> {
125 dispatch!(self, height)
126 }
127
128 pub fn size(&self) -> usize {
130 dispatch!(self, size)
131 }
132
133 pub fn memory(&self) -> TensorMemory {
135 dispatch!(self, memory)
136 }
137
138 pub fn reshape(&mut self, shape: &[usize]) -> crate::Result<()> {
140 dispatch!(self, reshape, shape)
141 }
142
143 pub fn set_format(&mut self, format: PixelFormat) -> crate::Result<()> {
161 dispatch!(self, set_format, format)
162 }
163
164 pub fn with_format(mut self, format: PixelFormat) -> crate::Result<Self> {
181 self.set_format(format)?;
182 Ok(self)
183 }
184
185 pub fn row_stride(&self) -> Option<usize> {
187 dispatch!(self, row_stride)
188 }
189
190 pub fn effective_row_stride(&self) -> Option<usize> {
192 dispatch!(self, effective_row_stride)
193 }
194
195 pub fn set_row_stride(&mut self, stride: usize) -> crate::Result<()> {
201 dispatch!(self, set_row_stride, stride)
202 }
203
204 pub fn with_row_stride(mut self, stride: usize) -> crate::Result<Self> {
206 self.set_row_stride(stride)?;
207 Ok(self)
208 }
209
210 pub fn plane_offset(&self) -> Option<usize> {
212 dispatch!(self, plane_offset)
213 }
214
215 pub fn set_plane_offset(&mut self, offset: usize) {
217 dispatch!(self, set_plane_offset, offset)
218 }
219
220 pub fn with_plane_offset(mut self, offset: usize) -> Self {
222 self.set_plane_offset(offset);
223 self
224 }
225
226 #[cfg(unix)]
228 pub fn clone_fd(&self) -> crate::Result<std::os::fd::OwnedFd> {
229 dispatch!(self, clone_fd)
230 }
231
232 #[cfg(target_os = "linux")]
243 pub fn dmabuf_clone(&self) -> crate::Result<std::os::fd::OwnedFd> {
244 if self.memory() != TensorMemory::Dma {
245 return Err(crate::Error::NotImplemented(format!(
246 "dmabuf_clone requires DMA-backed tensor, got {:?}",
247 self.memory()
248 )));
249 }
250 self.clone_fd()
251 }
252
253 #[cfg(target_os = "linux")]
264 pub fn dmabuf(&self) -> crate::Result<std::os::fd::BorrowedFd<'_>> {
265 dispatch!(self, dmabuf)
266 }
267
268 pub fn is_multiplane(&self) -> bool {
270 dispatch!(self, is_multiplane)
271 }
272
273 pub fn buffer_identity(&self) -> &crate::BufferIdentity {
283 dispatch!(self, buffer_identity)
284 }
285
286 pub fn aliases(&self, other: &Self) -> bool {
304 if self.buffer_identity().id() == other.buffer_identity().id() {
305 return true;
306 }
307 if self.memory() != other.memory() {
308 return false;
309 }
310 #[cfg(target_os = "linux")]
311 if self.memory() == TensorMemory::Dma {
312 use std::os::fd::AsRawFd;
313 if let (Ok(a), Ok(b)) = (self.dmabuf(), other.dmabuf()) {
314 return a.as_raw_fd() == b.as_raw_fd();
315 }
316 }
317 false
318 }
319
320 downcast_methods!(U8, u8, as_u8, as_u8_mut, into_u8);
323 downcast_methods!(I8, i8, as_i8, as_i8_mut, into_i8);
324 downcast_methods!(U16, u16, as_u16, as_u16_mut, into_u16);
325 downcast_methods!(I16, i16, as_i16, as_i16_mut, into_i16);
326 downcast_methods!(U32, u32, as_u32, as_u32_mut, into_u32);
327 downcast_methods!(I32, i32, as_i32, as_i32_mut, into_i32);
328 downcast_methods!(U64, u64, as_u64, as_u64_mut, into_u64);
329 downcast_methods!(I64, i64, as_i64, as_i64_mut, into_i64);
330 downcast_methods!(F16, f16, as_f16, as_f16_mut, into_f16);
331 downcast_methods!(F32, f32, as_f32, as_f32_mut, into_f32);
332 downcast_methods!(F64, f64, as_f64, as_f64_mut, into_f64);
333
334 pub fn new(
336 shape: &[usize],
337 dtype: DType,
338 memory: Option<TensorMemory>,
339 name: Option<&str>,
340 ) -> crate::Result<Self> {
341 match dtype {
342 DType::U8 => Tensor::<u8>::new(shape, memory, name).map(Self::U8),
343 DType::I8 => Tensor::<i8>::new(shape, memory, name).map(Self::I8),
344 DType::U16 => Tensor::<u16>::new(shape, memory, name).map(Self::U16),
345 DType::I16 => Tensor::<i16>::new(shape, memory, name).map(Self::I16),
346 DType::U32 => Tensor::<u32>::new(shape, memory, name).map(Self::U32),
347 DType::I32 => Tensor::<i32>::new(shape, memory, name).map(Self::I32),
348 DType::U64 => Tensor::<u64>::new(shape, memory, name).map(Self::U64),
349 DType::I64 => Tensor::<i64>::new(shape, memory, name).map(Self::I64),
350 DType::F16 => Tensor::<f16>::new(shape, memory, name).map(Self::F16),
351 DType::F32 => Tensor::<f32>::new(shape, memory, name).map(Self::F32),
352 DType::F64 => Tensor::<f64>::new(shape, memory, name).map(Self::F64),
353 }
354 }
355
356 #[cfg(unix)]
358 pub fn from_fd(
359 fd: std::os::fd::OwnedFd,
360 shape: &[usize],
361 dtype: DType,
362 name: Option<&str>,
363 ) -> crate::Result<Self> {
364 match dtype {
365 DType::U8 => Tensor::<u8>::from_fd(fd, shape, name).map(Self::U8),
366 DType::I8 => Tensor::<i8>::from_fd(fd, shape, name).map(Self::I8),
367 DType::U16 => Tensor::<u16>::from_fd(fd, shape, name).map(Self::U16),
368 DType::I16 => Tensor::<i16>::from_fd(fd, shape, name).map(Self::I16),
369 DType::U32 => Tensor::<u32>::from_fd(fd, shape, name).map(Self::U32),
370 DType::I32 => Tensor::<i32>::from_fd(fd, shape, name).map(Self::I32),
371 DType::U64 => Tensor::<u64>::from_fd(fd, shape, name).map(Self::U64),
372 DType::I64 => Tensor::<i64>::from_fd(fd, shape, name).map(Self::I64),
373 DType::F16 => Tensor::<f16>::from_fd(fd, shape, name).map(Self::F16),
374 DType::F32 => Tensor::<f32>::from_fd(fd, shape, name).map(Self::F32),
375 DType::F64 => Tensor::<f64>::from_fd(fd, shape, name).map(Self::F64),
376 }
377 }
378
379 pub fn image(
397 width: usize,
398 height: usize,
399 format: PixelFormat,
400 dtype: DType,
401 memory: Option<TensorMemory>,
402 ) -> crate::Result<Self> {
403 match dtype {
404 DType::U8 => Tensor::<u8>::image(width, height, format, memory).map(Self::U8),
405 DType::I8 => Tensor::<i8>::image(width, height, format, memory).map(Self::I8),
406 DType::U16 => Tensor::<u16>::image(width, height, format, memory).map(Self::U16),
407 DType::I16 => Tensor::<i16>::image(width, height, format, memory).map(Self::I16),
408 DType::U32 => Tensor::<u32>::image(width, height, format, memory).map(Self::U32),
409 DType::I32 => Tensor::<i32>::image(width, height, format, memory).map(Self::I32),
410 DType::U64 => Tensor::<u64>::image(width, height, format, memory).map(Self::U64),
411 DType::I64 => Tensor::<i64>::image(width, height, format, memory).map(Self::I64),
412 DType::F16 => Tensor::<f16>::image(width, height, format, memory).map(Self::F16),
413 DType::F32 => Tensor::<f32>::image(width, height, format, memory).map(Self::F32),
414 DType::F64 => Tensor::<f64>::image(width, height, format, memory).map(Self::F64),
415 }
416 }
417
418 pub fn image_with_stride(
444 width: usize,
445 height: usize,
446 format: PixelFormat,
447 dtype: DType,
448 row_stride_bytes: usize,
449 memory: Option<TensorMemory>,
450 ) -> crate::Result<Self> {
451 match dtype {
452 DType::U8 => {
453 Tensor::<u8>::image_with_stride(width, height, format, row_stride_bytes, memory)
454 .map(Self::U8)
455 }
456 DType::I8 => {
457 Tensor::<i8>::image_with_stride(width, height, format, row_stride_bytes, memory)
458 .map(Self::I8)
459 }
460 DType::U16 => {
461 Tensor::<u16>::image_with_stride(width, height, format, row_stride_bytes, memory)
462 .map(Self::U16)
463 }
464 DType::I16 => {
465 Tensor::<i16>::image_with_stride(width, height, format, row_stride_bytes, memory)
466 .map(Self::I16)
467 }
468 DType::U32 => {
469 Tensor::<u32>::image_with_stride(width, height, format, row_stride_bytes, memory)
470 .map(Self::U32)
471 }
472 DType::I32 => {
473 Tensor::<i32>::image_with_stride(width, height, format, row_stride_bytes, memory)
474 .map(Self::I32)
475 }
476 DType::U64 => {
477 Tensor::<u64>::image_with_stride(width, height, format, row_stride_bytes, memory)
478 .map(Self::U64)
479 }
480 DType::I64 => {
481 Tensor::<i64>::image_with_stride(width, height, format, row_stride_bytes, memory)
482 .map(Self::I64)
483 }
484 DType::F16 => {
485 Tensor::<f16>::image_with_stride(width, height, format, row_stride_bytes, memory)
486 .map(Self::F16)
487 }
488 DType::F32 => {
489 Tensor::<f32>::image_with_stride(width, height, format, row_stride_bytes, memory)
490 .map(Self::F32)
491 }
492 DType::F64 => {
493 Tensor::<f64>::image_with_stride(width, height, format, row_stride_bytes, memory)
494 .map(Self::F64)
495 }
496 }
497 }
498}
499
500impl From<Tensor<u8>> for TensorDyn {
503 fn from(t: Tensor<u8>) -> Self {
504 Self::U8(t)
505 }
506}
507
508impl From<Tensor<i8>> for TensorDyn {
509 fn from(t: Tensor<i8>) -> Self {
510 Self::I8(t)
511 }
512}
513
514impl From<Tensor<u16>> for TensorDyn {
515 fn from(t: Tensor<u16>) -> Self {
516 Self::U16(t)
517 }
518}
519
520impl From<Tensor<i16>> for TensorDyn {
521 fn from(t: Tensor<i16>) -> Self {
522 Self::I16(t)
523 }
524}
525
526impl From<Tensor<u32>> for TensorDyn {
527 fn from(t: Tensor<u32>) -> Self {
528 Self::U32(t)
529 }
530}
531
532impl From<Tensor<i32>> for TensorDyn {
533 fn from(t: Tensor<i32>) -> Self {
534 Self::I32(t)
535 }
536}
537
538impl From<Tensor<u64>> for TensorDyn {
539 fn from(t: Tensor<u64>) -> Self {
540 Self::U64(t)
541 }
542}
543
544impl From<Tensor<i64>> for TensorDyn {
545 fn from(t: Tensor<i64>) -> Self {
546 Self::I64(t)
547 }
548}
549
550impl From<Tensor<f16>> for TensorDyn {
551 fn from(t: Tensor<f16>) -> Self {
552 Self::F16(t)
553 }
554}
555
556impl From<Tensor<f32>> for TensorDyn {
557 fn from(t: Tensor<f32>) -> Self {
558 Self::F32(t)
559 }
560}
561
562impl From<Tensor<f64>> for TensorDyn {
563 fn from(t: Tensor<f64>) -> Self {
564 Self::F64(t)
565 }
566}
567
568impl fmt::Debug for TensorDyn {
569 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
570 dispatch!(self, fmt, f)
571 }
572}
573
574#[cfg(test)]
575mod tests {
576 use super::*;
577
578 #[test]
579 fn from_typed_tensor() {
580 let t = Tensor::<u8>::new(&[10], None, None).unwrap();
581 let dyn_t: TensorDyn = t.into();
582 assert_eq!(dyn_t.dtype(), DType::U8);
583 assert_eq!(dyn_t.shape(), &[10]);
584 }
585
586 #[test]
587 fn downcast_ref() {
588 let t = Tensor::<u8>::new(&[10], None, None).unwrap();
589 let dyn_t: TensorDyn = t.into();
590 assert!(dyn_t.as_u8().is_some());
591 assert!(dyn_t.as_i8().is_none());
592 }
593
594 #[test]
595 fn downcast_into() {
596 let t = Tensor::<u8>::new(&[10], None, None).unwrap();
597 let dyn_t: TensorDyn = t.into();
598 let back = dyn_t.into_u8().unwrap();
599 assert_eq!(back.shape(), &[10]);
600 }
601
602 #[test]
603 fn image_accessors() {
604 let t = Tensor::<u8>::image(640, 480, PixelFormat::Rgba, None).unwrap();
605 let dyn_t: TensorDyn = t.into();
606 assert_eq!(dyn_t.format(), Some(PixelFormat::Rgba));
607 assert_eq!(dyn_t.width(), Some(640));
608 assert_eq!(dyn_t.height(), Some(480));
609 assert!(!dyn_t.is_multiplane());
610 }
611
612 #[test]
613 fn image_constructor() {
614 let dyn_t = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::U8, None).unwrap();
615 assert_eq!(dyn_t.dtype(), DType::U8);
616 assert_eq!(dyn_t.format(), Some(PixelFormat::Rgb));
617 assert_eq!(dyn_t.width(), Some(640));
618 }
619
620 #[test]
621 fn image_constructor_i8() {
622 let dyn_t = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::I8, None).unwrap();
623 assert_eq!(dyn_t.dtype(), DType::I8);
624 assert_eq!(dyn_t.format(), Some(PixelFormat::Rgb));
625 }
626
627 #[test]
628 fn set_format_packed() {
629 let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
630 assert_eq!(t.format(), None);
631 t.set_format(PixelFormat::Rgb).unwrap();
632 assert_eq!(t.format(), Some(PixelFormat::Rgb));
633 assert_eq!(t.width(), Some(640));
634 assert_eq!(t.height(), Some(480));
635 }
636
637 #[test]
638 fn set_format_planar() {
639 let mut t = TensorDyn::new(&[3, 480, 640], DType::U8, None, None).unwrap();
640 t.set_format(PixelFormat::PlanarRgb).unwrap();
641 assert_eq!(t.format(), Some(PixelFormat::PlanarRgb));
642 assert_eq!(t.width(), Some(640));
643 assert_eq!(t.height(), Some(480));
644 }
645
646 #[test]
647 fn set_format_rejects_wrong_shape() {
648 let mut t = TensorDyn::new(&[480, 640, 4], DType::U8, None, None).unwrap();
649 assert!(t.set_format(PixelFormat::Rgb).is_err());
650 }
651
652 #[test]
653 fn with_format_builder() {
654 let t = TensorDyn::new(&[480, 640, 4], DType::U8, None, None)
655 .unwrap()
656 .with_format(PixelFormat::Rgba)
657 .unwrap();
658 assert_eq!(t.format(), Some(PixelFormat::Rgba));
659 assert_eq!(t.width(), Some(640));
660 assert_eq!(t.height(), Some(480));
661 }
662
663 #[cfg(target_os = "linux")]
664 #[test]
665 fn dmabuf_clone_mem_tensor_fails() {
666 let t = TensorDyn::new(&[480, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
667 assert_eq!(t.memory(), TensorMemory::Mem);
668 assert!(t.dmabuf_clone().is_err());
669 }
670
671 #[cfg(target_os = "linux")]
672 #[test]
673 fn dmabuf_mem_tensor_fails() {
674 let t = TensorDyn::new(&[480, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
675 assert!(t.dmabuf().is_err());
676 }
677
678 #[test]
679 fn set_format_semi_planar_nv12() {
680 let mut t = TensorDyn::new(&[720, 640], DType::U8, Some(TensorMemory::Mem), None).unwrap();
682 t.set_format(PixelFormat::Nv12).unwrap();
683 assert_eq!(t.format(), Some(PixelFormat::Nv12));
684 assert_eq!(t.width(), Some(640));
685 assert_eq!(t.height(), Some(480));
686 }
687
688 #[test]
689 fn set_format_semi_planar_nv16() {
690 let mut t = TensorDyn::new(&[960, 640], DType::U8, Some(TensorMemory::Mem), None).unwrap();
692 t.set_format(PixelFormat::Nv16).unwrap();
693 assert_eq!(t.format(), Some(PixelFormat::Nv16));
694 assert_eq!(t.width(), Some(640));
695 assert_eq!(t.height(), Some(480));
696 }
697
698 #[test]
699 fn with_format_rejects_wrong_shape() {
700 let result = TensorDyn::new(&[480, 640, 4], DType::U8, None, None)
701 .unwrap()
702 .with_format(PixelFormat::Rgb);
703 assert!(result.is_err());
704 }
705
706 #[test]
707 fn set_format_preserved_after_rejection() {
708 let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
709 t.set_format(PixelFormat::Rgb).unwrap();
710 assert_eq!(t.format(), Some(PixelFormat::Rgb));
711
712 assert!(t.set_format(PixelFormat::Rgba).is_err());
714
715 assert_eq!(t.format(), Some(PixelFormat::Rgb));
717 }
718
719 #[test]
720 fn set_format_idempotent() {
721 let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
722 t.set_format(PixelFormat::Rgb).unwrap();
723 t.set_format(PixelFormat::Rgb).unwrap();
724 assert_eq!(t.format(), Some(PixelFormat::Rgb));
725 assert_eq!(t.width(), Some(640));
726 assert_eq!(t.height(), Some(480));
727 }
728
729 #[test]
732 fn set_row_stride_valid() {
733 let mut t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
735 t.set_row_stride(512).unwrap();
736 assert_eq!(t.row_stride(), Some(512));
737 assert_eq!(t.effective_row_stride(), Some(512));
738 }
739
740 #[test]
741 fn set_row_stride_equals_min() {
742 let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
744 t.set_row_stride(300).unwrap();
745 assert_eq!(t.row_stride(), Some(300));
746 }
747
748 #[test]
749 fn set_row_stride_too_small() {
750 let mut t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
752 assert!(t.set_row_stride(300).is_err());
753 assert_eq!(t.row_stride(), None);
754 }
755
756 #[test]
757 fn set_row_stride_zero() {
758 let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
759 assert!(t.set_row_stride(0).is_err());
760 }
761
762 #[test]
763 fn set_row_stride_requires_format() {
764 let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
765 assert!(t.set_row_stride(2048).is_err());
766 }
767
768 #[test]
769 fn effective_row_stride_without_stride() {
770 let t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
771 assert_eq!(t.row_stride(), None);
772 assert_eq!(t.effective_row_stride(), Some(300)); }
774
775 #[test]
776 fn effective_row_stride_no_format() {
777 let t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
778 assert_eq!(t.effective_row_stride(), None);
779 }
780
781 #[test]
782 fn with_row_stride_builder() {
783 let t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None)
784 .unwrap()
785 .with_row_stride(512)
786 .unwrap();
787 assert_eq!(t.row_stride(), Some(512));
788 assert_eq!(t.effective_row_stride(), Some(512));
789 }
790
791 #[test]
792 fn with_row_stride_rejects_small() {
793 let result = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None)
794 .unwrap()
795 .with_row_stride(200);
796 assert!(result.is_err());
797 }
798
799 #[test]
800 fn set_format_clears_row_stride() {
801 let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
802 t.set_format(PixelFormat::Rgb).unwrap();
803 t.set_row_stride(2048).unwrap();
804 assert_eq!(t.row_stride(), Some(2048));
805
806 let _ = t.set_format(PixelFormat::Bgra);
808 assert_eq!(t.row_stride(), Some(2048));
809
810 t.set_format(PixelFormat::Rgb).unwrap();
812 assert_eq!(t.row_stride(), Some(2048));
813
814 t.reshape(&[480 * 640 * 3]).unwrap();
816 assert_eq!(t.row_stride(), None);
817 assert_eq!(t.format(), None);
818 }
819
820 #[test]
821 fn set_format_different_compatible_clears_stride() {
822 let mut t = TensorDyn::new(&[480, 640, 4], DType::U8, None, None).unwrap();
825 t.set_format(PixelFormat::Rgba).unwrap();
826 t.set_row_stride(4096).unwrap();
827 assert_eq!(t.row_stride(), Some(4096));
828
829 t.set_format(PixelFormat::Bgra).unwrap();
831 assert_eq!(t.format(), Some(PixelFormat::Bgra));
832 assert_eq!(t.row_stride(), None);
833 }
834
835 #[test]
836 fn set_format_same_preserves_stride() {
837 let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
838 t.set_row_stride(512).unwrap();
839 t.set_format(PixelFormat::Rgb).unwrap();
841 assert_eq!(t.row_stride(), Some(512));
842 }
843
844 #[test]
845 fn effective_row_stride_planar() {
846 let t = TensorDyn::image(640, 480, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
847 assert_eq!(t.effective_row_stride(), Some(640)); }
849
850 #[test]
851 fn effective_row_stride_nv12() {
852 let t = TensorDyn::image(640, 480, PixelFormat::Nv12, DType::U8, None).unwrap();
853 assert_eq!(t.effective_row_stride(), Some(640)); }
855
856 #[test]
857 fn map_rejects_strided_tensor() {
858 let mut t =
859 Tensor::<u8>::image(100, 100, PixelFormat::Rgba, Some(TensorMemory::Mem)).unwrap();
860 assert!(t.map().is_ok());
862 t.set_row_stride(512).unwrap();
864 let err = t.map();
865 assert!(err.is_err());
866 }
867
868 #[test]
871 fn plane_offset_default_none() {
872 let t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
873 assert_eq!(t.plane_offset(), None);
874 }
875
876 #[test]
877 fn set_plane_offset_basic() {
878 let mut t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None).unwrap();
879 t.set_plane_offset(4096);
880 assert_eq!(t.plane_offset(), Some(4096));
881 }
882
883 #[test]
884 fn set_plane_offset_zero() {
885 let mut t = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
886 t.set_plane_offset(0);
887 assert_eq!(t.plane_offset(), Some(0));
888 }
889
890 #[test]
891 fn set_plane_offset_no_format() {
892 let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
894 t.set_plane_offset(4096);
895 assert_eq!(t.plane_offset(), Some(4096));
896 }
897
898 #[test]
899 fn with_plane_offset_builder() {
900 let t = TensorDyn::image(100, 100, PixelFormat::Rgba, DType::U8, None)
901 .unwrap()
902 .with_plane_offset(8192);
903 assert_eq!(t.plane_offset(), Some(8192));
904 }
905
906 #[test]
907 fn set_format_clears_plane_offset() {
908 let mut t = TensorDyn::new(&[480, 640, 3], DType::U8, None, None).unwrap();
909 t.set_format(PixelFormat::Rgb).unwrap();
910 t.set_plane_offset(4096);
911 assert_eq!(t.plane_offset(), Some(4096));
912
913 t.set_format(PixelFormat::Rgb).unwrap();
915 assert_eq!(t.plane_offset(), Some(4096));
916
917 t.reshape(&[480 * 640 * 3]).unwrap();
919 assert_eq!(t.plane_offset(), None);
920 assert_eq!(t.format(), None);
921 }
922
923 #[test]
924 fn map_rejects_offset_tensor() {
925 let mut t =
926 Tensor::<u8>::image(100, 100, PixelFormat::Rgba, Some(TensorMemory::Mem)).unwrap();
927 assert!(t.map().is_ok());
929 t.set_plane_offset(4096);
931 assert!(t.map().is_err());
932 }
933
934 #[test]
935 fn map_accepts_zero_offset_tensor() {
936 let mut t =
937 Tensor::<u8>::image(100, 100, PixelFormat::Rgba, Some(TensorMemory::Mem)).unwrap();
938 t.set_plane_offset(0);
939 assert!(t.map().is_ok());
941 }
942
943 #[test]
944 fn from_planes_propagates_plane_offset() {
945 let mut luma =
946 Tensor::<u8>::new(&[480, 640], Some(TensorMemory::Mem), Some("luma")).unwrap();
947 luma.set_plane_offset(4096);
948 let chroma =
949 Tensor::<u8>::new(&[240, 640], Some(TensorMemory::Mem), Some("chroma")).unwrap();
950 let combined = Tensor::<u8>::from_planes(luma, chroma, PixelFormat::Nv12).unwrap();
951 assert_eq!(combined.plane_offset(), Some(4096));
952 }
953}