1use num_traits::Float;
2use thiserror::Error;
3
4use super::{
5 allocator::{CpuAllocator, TensorAllocator, TensorAllocatorError},
6 storage::TensorStorage,
7 view::TensorView,
8};
9
10#[derive(Error, Debug, PartialEq)]
12pub enum TensorError {
13 #[error("Failed to cast data")]
15 CastError,
16
17 #[error("The number of elements in the data does not match the shape of the tensor: {0}")]
19 InvalidShape(usize),
20
21 #[error("Index out of bounds. The index {0} is out of bounds.")]
23 IndexOutOfBounds(usize),
24
25 #[error("Error with the tensor storage: {0}")]
27 StorageError(#[from] TensorAllocatorError),
28
29 #[error("Dimension mismatch: {0}")]
31 DimensionMismatch(String),
32
33 #[error("Unsupported operation: {0}")]
35 UnsupportedOperation(String),
36}
37
38pub(crate) fn get_strides_from_shape<const N: usize>(shape: [usize; N]) -> [usize; N] {
48 let mut strides: [usize; N] = [0; N];
49 let mut stride = 1;
50 for i in (0..shape.len()).rev() {
51 strides[i] = stride;
52 stride *= shape[i];
53 }
54 strides
55}
56
57pub struct Tensor<T, const N: usize, A: TensorAllocator> {
78 pub storage: TensorStorage<T, A>,
80 pub shape: [usize; N],
82 pub strides: [usize; N],
84}
85
86impl<T, const N: usize, A: TensorAllocator> Tensor<T, N, A>
87where
88 A: 'static,
89{
90 #[inline]
96 pub fn as_slice(&self) -> &[T] {
97 self.storage.as_slice()
98 }
99
100 #[inline]
106 pub fn as_slice_mut(&mut self) -> &mut [T] {
107 self.storage.as_mut_slice()
108 }
109
110 #[inline]
116 pub fn as_ptr(&self) -> *const T {
117 self.storage.as_ptr()
118 }
119
120 #[inline]
126 pub fn as_mut_ptr(&mut self) -> *mut T {
127 self.storage.as_mut_ptr()
128 }
129
130 #[inline]
136 pub fn into_vec(self) -> Vec<T> {
137 self.storage.into_vec()
138 }
139
140 pub fn from_shape_vec(shape: [usize; N], data: Vec<T>, alloc: A) -> Result<Self, TensorError> {
166 let numel = shape.iter().product::<usize>();
167 if numel != data.len() {
168 return Err(TensorError::InvalidShape(numel));
169 }
170 let storage = TensorStorage::from_vec(data, alloc);
171 let strides = get_strides_from_shape(shape);
172 Ok(Self {
173 storage,
174 shape,
175 strides,
176 })
177 }
178
179 pub fn from_shape_slice(shape: [usize; N], data: &[T], alloc: A) -> Result<Self, TensorError>
195 where
196 T: Clone,
197 {
198 let numel = shape.iter().product::<usize>();
199 if numel != data.len() {
200 return Err(TensorError::InvalidShape(numel));
201 }
202 let storage = TensorStorage::from_vec(data.to_vec(), alloc);
203 let strides = get_strides_from_shape(shape);
204 Ok(Self {
205 storage,
206 shape,
207 strides,
208 })
209 }
210
211 pub fn from_shape_val(shape: [usize; N], value: T, alloc: A) -> Self
237 where
238 T: Clone,
239 {
240 let numel = shape.iter().product::<usize>();
241 let data = vec![value; numel];
242 let storage = TensorStorage::from_vec(data, alloc);
243 let strides = get_strides_from_shape(shape);
244 Self {
245 storage,
246 shape,
247 strides,
248 }
249 }
250
251 pub fn from_shape_fn<F>(shape: [usize; N], alloc: A, f: F) -> Self
276 where
277 F: Fn([usize; N]) -> T,
278 {
279 let numel = shape.iter().product::<usize>();
280 let data: Vec<T> = (0..numel)
281 .map(|i| {
282 let mut index = [0; N];
283 let mut j = i;
284 for k in (0..N).rev() {
285 index[k] = j % shape[k];
286 j /= shape[k];
287 }
288 f(index)
289 })
290 .collect();
291 let storage = TensorStorage::from_vec(data, alloc);
292 let strides = get_strides_from_shape(shape);
293 Self {
294 storage,
295 shape,
296 strides,
297 }
298 }
299
300 #[inline]
306 pub fn numel(&self) -> usize {
307 self.storage.len() / std::mem::size_of::<T>()
308 }
309
310 pub fn get_iter_offset(&self, index: [usize; N]) -> Option<usize> {
320 let mut offset = 0;
321 for ((&idx, dim_size), stride) in index.iter().zip(self.shape).zip(self.strides) {
322 if idx >= dim_size {
323 return None;
324 }
325 offset += idx * stride;
326 }
327 Some(offset)
328 }
329
330 pub fn get_iter_offset_unchecked(&self, index: [usize; N]) -> usize {
340 let mut offset = 0;
341 for (&idx, stride) in index.iter().zip(self.strides) {
342 offset += idx * stride;
343 }
344 offset
345 }
346
347 pub fn get_index_unchecked(&self, offset: usize) -> [usize; N] {
357 let mut idx = [0; N];
358 let mut rem = offset;
359 for (dim_i, s) in self.strides.iter().enumerate() {
360 idx[dim_i] = rem / s;
361 rem = offset % s;
362 }
363
364 idx
365 }
366
367 pub fn get_index(&self, offset: usize) -> Result<[usize; N], TensorError> {
381 if offset >= self.numel() {
382 return Err(TensorError::IndexOutOfBounds(offset));
383 }
384 let idx = self.get_index_unchecked(offset);
385
386 Ok(idx)
387 }
388
389 pub fn get_unchecked(&self, index: [usize; N]) -> &T {
413 let offset = self.get_iter_offset_unchecked(index);
414 unsafe { self.storage.as_slice().get_unchecked(offset) }
415 }
416
417 pub fn get(&self, index: [usize; N]) -> Option<&T> {
448 self.get_iter_offset(index)
449 .and_then(|i| self.storage.as_slice().get(i))
450 }
451
452 pub fn reshape<const M: usize>(
481 &self,
482 shape: [usize; M],
483 ) -> Result<TensorView<T, M, A>, TensorError> {
484 let numel = shape.iter().product::<usize>();
485 if numel != self.storage.len() {
486 return Err(TensorError::DimensionMismatch(format!(
487 "Cannot reshape tensor of shape {:?} with {} elements to shape {:?} with {} elements",
488 self.shape, self.storage.len(), shape, numel
489 )));
490 }
491
492 let strides = get_strides_from_shape(shape);
493
494 Ok(TensorView {
495 storage: &self.storage,
496 shape,
497 strides,
498 })
499 }
500
501 pub fn permute_axes(&self, axes: [usize; N]) -> TensorView<T, N, A> {
514 let mut new_shape = [0; N];
515 let mut new_strides = [0; N];
516 for (i, &axis) in axes.iter().enumerate() {
517 new_shape[i] = self.shape[axis];
518 new_strides[i] = self.strides[axis];
519 }
520
521 TensorView {
522 storage: &self.storage,
523 shape: new_shape,
524 strides: new_strides,
525 }
526 }
527
528 pub fn view(&self) -> TensorView<T, N, A> {
536 TensorView {
537 storage: &self.storage,
538 shape: self.shape,
539 strides: self.strides,
540 }
541 }
542
543 pub fn zeros(shape: [usize; N], alloc: A) -> Tensor<T, N, A>
552 where
553 T: Clone + num_traits::Zero,
554 {
555 Self::from_shape_val(shape, T::zero(), alloc)
557 }
558
559 pub fn map<U, F>(&self, f: F) -> Tensor<U, N, A>
581 where
582 F: Fn(&T) -> U,
583 {
584 let data: Vec<U> = self.as_slice().iter().map(f).collect();
585 let storage = TensorStorage::from_vec(data, self.storage.alloc().clone());
586
587 Tensor {
588 storage,
589 shape: self.shape,
590 strides: self.strides,
591 }
592 }
593
594 pub fn powi(&self, n: i32) -> Tensor<T, N, A>
604 where
605 T: Float,
606 {
607 self.map(|x| x.powi(n))
608 }
609
610 pub fn abs(&self) -> Tensor<T, N, A>
616 where
617 T: Float,
618 {
619 self.map(|x| x.abs())
620 }
621
622 pub fn mean(&self) -> Result<T, TensorError>
628 where
629 T: Float,
630 {
631 let data_acc = self.as_slice().iter().fold(T::zero(), |acc, &x| acc + x);
632 let mean = data_acc / T::from(self.as_slice().len()).ok_or(TensorError::CastError)?;
633
634 Ok(mean)
635 }
636
637 pub fn cast<U>(&self) -> Tensor<U, N, CpuAllocator>
655 where
656 U: From<T>,
657 T: Clone,
658 {
659 let mut data: Vec<U> = Vec::with_capacity(self.storage.len());
660 self.as_slice().iter().for_each(|x| {
661 data.push(U::from(x.clone()));
662 });
663 let storage = TensorStorage::from_vec(data, CpuAllocator);
664 Tensor {
665 storage,
666 shape: self.shape,
667 strides: self.strides,
668 }
669 }
670
671 pub fn element_wise_op<F>(
706 &self,
707 other: &Tensor<T, N, CpuAllocator>,
708 op: F,
709 ) -> Result<Tensor<T, N, CpuAllocator>, TensorError>
710 where
711 F: Fn(&T, &T) -> T,
712 {
713 if self.shape != other.shape {
714 return Err(TensorError::DimensionMismatch(format!(
715 "Shapes {:?} and {:?} are not compatible for element-wise operations",
716 self.shape, other.shape
717 )));
718 }
719
720 let data = self
721 .as_slice()
722 .iter()
723 .zip(other.as_slice().iter())
724 .map(|(a, b)| op(a, b))
725 .collect();
726
727 let storage = TensorStorage::from_vec(data, CpuAllocator);
728
729 Ok(Tensor {
730 storage,
731 shape: self.shape,
732 strides: self.strides,
733 })
734 }
735
736 pub fn add(&self, other: &Tensor<T, N, CpuAllocator>) -> Tensor<T, N, CpuAllocator>
761 where
762 T: std::ops::Add<Output = T> + Clone,
763 {
764 self.element_wise_op(other, |a, b| a.clone() + b.clone())
765 .expect("Tensor dimension mismatch")
766 }
767
768 pub fn sub(&self, other: &Tensor<T, N, CpuAllocator>) -> Tensor<T, N, CpuAllocator>
793 where
794 T: std::ops::Sub<Output = T> + Clone,
795 {
796 self.element_wise_op(other, |a, b| a.clone() - b.clone())
797 .expect("Tensor dimension mismatch")
798 }
799
800 pub fn mul(&self, other: &Tensor<T, N, CpuAllocator>) -> Tensor<T, N, CpuAllocator>
825 where
826 T: std::ops::Mul<Output = T> + Clone,
827 {
828 self.element_wise_op(other, |a, b| a.clone() * b.clone())
829 .expect("Tensor dimension mismatch")
830 }
831
832 pub fn div(&self, other: &Tensor<T, N, CpuAllocator>) -> Tensor<T, N, CpuAllocator>
857 where
858 T: std::ops::Div<Output = T> + Clone,
859 {
860 self.element_wise_op(other, |a, b| a.clone() / b.clone())
861 .expect("Tensor dimension mismatch")
862 }
863}
864
865impl<T, const N: usize, A> Clone for Tensor<T, N, A>
866where
867 T: Clone,
868 A: TensorAllocator + Clone + 'static,
869{
870 fn clone(&self) -> Self {
871 Self {
872 storage: self.storage.clone(),
873 shape: self.shape,
874 strides: self.strides,
875 }
876 }
877}
878
879impl<T, const N: usize, A> std::fmt::Display for Tensor<T, N, A>
880where
881 T: std::fmt::Display + std::fmt::LowerExp,
882 A: TensorAllocator + 'static,
883{
884 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
885 let width = self
886 .storage
887 .as_slice()
888 .iter()
889 .map(|v| format!("{v:.4}").len())
890 .max()
891 .unwrap();
892
893 let scientific = width > 8;
894
895 let should_mask: [bool; N] = self.shape.map(|s| s > 8);
896 let mut skip_until = 0;
897
898 for (i, v) in self.storage.as_slice().iter().enumerate() {
899 if i < skip_until {
900 continue;
901 }
902 let mut value = String::new();
903 let mut prefix = String::new();
904 let mut suffix = String::new();
905 let mut separator = ",".to_string();
906 let mut last_size = 1;
907 for (dim, (&size, maskable)) in self.shape.iter().zip(should_mask).enumerate().rev() {
908 let prod = size * last_size;
909 if i % prod == (3 * last_size) && maskable {
910 let pad = if dim == (N - 1) { 0 } else { dim + 1 };
911 value = format!("{}...", " ".repeat(pad));
912 skip_until = i + (size - 4) * last_size;
913 prefix = "".to_string();
914 if dim != (N - 1) {
915 separator = "\n".repeat(N - 1 - dim);
916 }
917 break;
918 } else if i % prod == 0 {
919 prefix.push('[');
920 } else if (i + 1) % prod == 0 {
921 suffix.push(']');
922 separator.push('\n');
923 if dim == 0 {
924 separator = "".to_string();
925 }
926 } else {
927 break;
928 }
929 last_size = prod;
930 }
931 if !prefix.is_empty() {
932 prefix = format!("{prefix:>N$}");
933 }
934
935 if value.is_empty() {
936 value = if scientific {
937 let num = format!("{v:.4e}");
938 let (before, after) = num.split_once('e').unwrap();
939 let after = if let Some(stripped) = after.strip_prefix('-') {
940 format!("-{:0>2}", &stripped)
941 } else {
942 format!("+{:0>2}", &after)
943 };
944 format!("{before}e{after}")
945 } else {
946 let rounded = format!("{v:.4}");
947 format!("{rounded:>width$}")
948 }
949 };
950 write!(f, "{prefix}{value}{suffix}{separator}",)?;
951 }
952 Ok(())
953 }
954}
955
956#[cfg(test)]
957mod tests {
958 use crate::allocator::CpuAllocator;
959 use crate::tensor::{Tensor, TensorError};
960
961 #[test]
962 fn constructor_1d() -> Result<(), TensorError> {
963 let data: Vec<u8> = vec![1];
964 let t = Tensor::<u8, 1, _>::from_shape_vec([1], data, CpuAllocator)?;
965 assert_eq!(t.shape, [1]);
966 assert_eq!(t.as_slice(), vec![1]);
967 assert_eq!(t.strides, [1]);
968 assert_eq!(t.numel(), 1);
969 Ok(())
970 }
971
972 #[test]
973 fn constructor_2d() -> Result<(), TensorError> {
974 let data: Vec<u8> = vec![1, 2];
975 let t = Tensor::<u8, 2, _>::from_shape_vec([1, 2], data, CpuAllocator)?;
976 assert_eq!(t.shape, [1, 2]);
977 assert_eq!(t.as_slice(), vec![1, 2]);
978 assert_eq!(t.strides, [2, 1]);
979 assert_eq!(t.numel(), 2);
980 Ok(())
981 }
982
983 #[test]
984 fn get_1d() -> Result<(), TensorError> {
985 let data: Vec<u8> = vec![1, 2, 3, 4];
986 let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
987 assert_eq!(t.get([0]), Some(&1));
988 assert_eq!(t.get([1]), Some(&2));
989 assert_eq!(t.get([2]), Some(&3));
990 assert_eq!(t.get([3]), Some(&4));
991 assert!(t.get([4]).is_none());
992 Ok(())
993 }
994
995 #[test]
996 fn get_2d() -> Result<(), TensorError> {
997 let data: Vec<u8> = vec![1, 2, 3, 4];
998 let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
999 assert_eq!(t.get([0, 0]), Some(&1));
1000 assert_eq!(t.get([0, 1]), Some(&2));
1001 assert_eq!(t.get([1, 0]), Some(&3));
1002 assert_eq!(t.get([1, 1]), Some(&4));
1003 assert!(t.get([2, 0]).is_none());
1004 assert!(t.get([0, 2]).is_none());
1005 Ok(())
1006 }
1007
1008 #[test]
1009 fn get_3d() -> Result<(), TensorError> {
1010 let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6];
1011 let t = Tensor::<u8, 3, _>::from_shape_vec([2, 1, 3], data, CpuAllocator)?;
1012 assert_eq!(t.get([0, 0, 0]), Some(&1));
1013 assert_eq!(t.get([0, 0, 1]), Some(&2));
1014 assert_eq!(t.get([0, 0, 2]), Some(&3));
1015 assert_eq!(t.get([1, 0, 0]), Some(&4));
1016 assert_eq!(t.get([1, 0, 1]), Some(&5));
1017 assert_eq!(t.get([1, 0, 2]), Some(&6));
1018 assert!(t.get([2, 0, 0]).is_none());
1019 assert!(t.get([0, 1, 0]).is_none());
1020 assert!(t.get([0, 0, 3]).is_none());
1021 Ok(())
1022 }
1023
1024 #[test]
1025 fn get_checked_1d() -> Result<(), TensorError> {
1026 let data: Vec<u8> = vec![1, 2, 3, 4];
1027 let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1028 assert_eq!(*t.get_unchecked([0]), 1);
1029 assert_eq!(*t.get_unchecked([1]), 2);
1030 assert_eq!(*t.get_unchecked([2]), 3);
1031 assert_eq!(*t.get_unchecked([3]), 4);
1032 Ok(())
1033 }
1034
1035 #[test]
1036 fn get_checked_2d() -> Result<(), TensorError> {
1037 let data: Vec<u8> = vec![1, 2, 3, 4];
1038 let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1039 assert_eq!(*t.get_unchecked([0, 0]), 1);
1040 assert_eq!(*t.get_unchecked([0, 1]), 2);
1041 assert_eq!(*t.get_unchecked([1, 0]), 3);
1042 assert_eq!(*t.get_unchecked([1, 1]), 4);
1043 Ok(())
1044 }
1045
1046 #[test]
1047 fn add_1d() -> Result<(), TensorError> {
1048 let data1: Vec<u8> = vec![1, 2, 3, 4];
1049 let t1 = Tensor::<u8, 1, _>::from_shape_vec([4], data1, CpuAllocator)?;
1050 let data2: Vec<u8> = vec![1, 2, 3, 4];
1051 let t2 = Tensor::<u8, 1, _>::from_shape_vec([4], data2, CpuAllocator)?;
1052 let t3 = t1.add(&t2);
1053 assert_eq!(t3.as_slice(), vec![2, 4, 6, 8]);
1054 Ok(())
1055 }
1056
1057 #[test]
1058 fn add_2d() -> Result<(), TensorError> {
1059 let data1: Vec<u8> = vec![1, 2, 3, 4];
1060 let t1 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data1, CpuAllocator)?;
1061 let data2: Vec<u8> = vec![1, 2, 3, 4];
1062 let t2 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data2, CpuAllocator)?;
1063 let t3 = t1.add(&t2);
1064 assert_eq!(t3.as_slice(), vec![2, 4, 6, 8]);
1065 Ok(())
1066 }
1067
1068 #[test]
1069 fn add_3d() -> Result<(), TensorError> {
1070 let data1: Vec<u8> = vec![1, 2, 3, 4, 5, 6];
1071 let t1 = Tensor::<u8, 3, _>::from_shape_vec([2, 1, 3], data1, CpuAllocator)?;
1072 let data2: Vec<u8> = vec![1, 2, 3, 4, 5, 6];
1073 let t2 = Tensor::<u8, 3, _>::from_shape_vec([2, 1, 3], data2, CpuAllocator)?;
1074 let t3 = t1.add(&t2);
1075 assert_eq!(t3.as_slice(), vec![2, 4, 6, 8, 10, 12]);
1076 Ok(())
1077 }
1078
1079 #[test]
1080 fn sub_1d() -> Result<(), TensorError> {
1081 let data1: Vec<u8> = vec![1, 2, 3, 4];
1082 let t1 = Tensor::<u8, 1, _>::from_shape_vec([4], data1, CpuAllocator)?;
1083 let data2: Vec<u8> = vec![1, 2, 3, 4];
1084 let t2 = Tensor::<u8, 1, _>::from_shape_vec([4], data2, CpuAllocator)?;
1085 let t3 = t1.sub(&t2);
1086 assert_eq!(t3.as_slice(), vec![0, 0, 0, 0]);
1087 Ok(())
1088 }
1089
1090 #[test]
1091 fn sub_2d() -> Result<(), TensorError> {
1092 let data1: Vec<u8> = vec![1, 2, 3, 4];
1093 let t1 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data1, CpuAllocator)?;
1094 let data2: Vec<u8> = vec![1, 2, 3, 4];
1095 let t2 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data2, CpuAllocator)?;
1096 let t3 = t1.sub(&t2);
1097 assert_eq!(t3.as_slice(), vec![0, 0, 0, 0]);
1098 Ok(())
1099 }
1100
1101 #[test]
1102 fn div_1d() -> Result<(), TensorError> {
1103 let data1: Vec<u8> = vec![1, 2, 3, 4];
1104 let t1 = Tensor::<u8, 1, _>::from_shape_vec([4], data1, CpuAllocator)?;
1105 let data2: Vec<u8> = vec![1, 2, 3, 4];
1106 let t2 = Tensor::<u8, 1, _>::from_shape_vec([4], data2, CpuAllocator)?;
1107 let t3 = t1.div(&t2);
1108 assert_eq!(t3.as_slice(), vec![1, 1, 1, 1]);
1109 Ok(())
1110 }
1111
1112 #[test]
1113 fn div_2d() -> Result<(), TensorError> {
1114 let data1: Vec<u8> = vec![1, 2, 3, 4];
1115 let t1 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data1, CpuAllocator)?;
1116 let data2: Vec<u8> = vec![1, 2, 3, 4];
1117 let t2 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data2, CpuAllocator)?;
1118 let t3 = t1.div(&t2);
1119 assert_eq!(t3.as_slice(), vec![1, 1, 1, 1]);
1120 Ok(())
1121 }
1122
1123 #[test]
1124 fn mul_1d() -> Result<(), TensorError> {
1125 let data1: Vec<u8> = vec![1, 2, 3, 4];
1126 let t1 = Tensor::<u8, 1, _>::from_shape_vec([4], data1, CpuAllocator)?;
1127 let data2: Vec<u8> = vec![1, 2, 3, 4];
1128 let t2 = Tensor::<u8, 1, _>::from_shape_vec([4], data2, CpuAllocator)?;
1129 let t3 = t1.mul(&t2);
1130 assert_eq!(t3.as_slice(), vec![1, 4, 9, 16]);
1131 Ok(())
1132 }
1133
1134 #[test]
1135 fn mul_2d() -> Result<(), TensorError> {
1136 let data1: Vec<u8> = vec![1, 2, 3, 4];
1137 let t1 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data1, CpuAllocator)?;
1138 let data2: Vec<u8> = vec![1, 2, 3, 4];
1139 let t2 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data2, CpuAllocator)?;
1140 let t3 = t1.mul(&t2);
1141 assert_eq!(t3.as_slice(), vec![1, 4, 9, 16]);
1142 Ok(())
1143 }
1144
1145 #[test]
1146 fn reshape_1d() -> Result<(), TensorError> {
1147 let data: Vec<u8> = vec![1, 2, 3, 4];
1148 let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1149
1150 let view = t.reshape([2, 2])?;
1151
1152 assert_eq!(view.shape, [2, 2]);
1153 assert_eq!(view.as_slice(), vec![1, 2, 3, 4]);
1154 assert_eq!(view.strides, [2, 1]);
1155 assert_eq!(view.numel(), 4);
1156 assert_eq!(view.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1157 Ok(())
1158 }
1159
1160 #[test]
1161 fn reshape_2d() -> Result<(), TensorError> {
1162 let data: Vec<u8> = vec![1, 2, 3, 4];
1163 let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1164 let t2 = t.reshape([4])?;
1165
1166 assert_eq!(t2.shape, [4]);
1167 assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1168 assert_eq!(t2.strides, [1]);
1169 assert_eq!(t2.numel(), 4);
1170 assert_eq!(t2.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1171 Ok(())
1172 }
1173
1174 #[test]
1175 fn reshape_get_1d() -> Result<(), TensorError> {
1176 let data: Vec<u8> = vec![1, 2, 3, 4];
1177 let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1178 let view = t.reshape([2, 2])?;
1179 assert_eq!(*view.get_unchecked([0, 0]), 1);
1180 assert_eq!(*view.get_unchecked([0, 1]), 2);
1181 assert_eq!(*view.get_unchecked([1, 0]), 3);
1182 assert_eq!(*view.get_unchecked([1, 1]), 4);
1183 assert_eq!(view.numel(), 4);
1184 assert_eq!(view.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1185 Ok(())
1186 }
1187
1188 #[test]
1189 fn permute_axes_1d() -> Result<(), TensorError> {
1190 let data: Vec<u8> = vec![1, 2, 3, 4];
1191 let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1192 let t2 = t.permute_axes([0]);
1193 assert_eq!(t2.shape, [4]);
1194 assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1195 assert_eq!(t2.strides, [1]);
1196 assert_eq!(t2.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1197 Ok(())
1198 }
1199
1200 #[test]
1201 fn permute_axes_2d() -> Result<(), TensorError> {
1202 let data: Vec<u8> = vec![1, 2, 3, 4];
1203 let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1204 let view = t.permute_axes([1, 0]);
1205 assert_eq!(view.shape, [2, 2]);
1206 assert_eq!(*view.get_unchecked([0, 0]), 1u8);
1207 assert_eq!(*view.get_unchecked([1, 0]), 2u8);
1208 assert_eq!(*view.get_unchecked([0, 1]), 3u8);
1209 assert_eq!(*view.get_unchecked([1, 1]), 4u8);
1210 assert_eq!(view.strides, [1, 2]);
1211 assert_eq!(view.as_contiguous().as_slice(), vec![1, 3, 2, 4]);
1212 Ok(())
1213 }
1214
1215 #[test]
1216 fn contiguous_2d() -> Result<(), TensorError> {
1217 let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6];
1218 let t = Tensor::<u8, 2, _>::from_shape_vec([2, 3], data, CpuAllocator)?;
1219
1220 let view = t.permute_axes([1, 0]);
1221
1222 let contiguous = view.as_contiguous();
1223
1224 assert_eq!(contiguous.shape, [3, 2]);
1225 assert_eq!(contiguous.strides, [2, 1]);
1226 assert_eq!(contiguous.as_slice(), vec![1, 4, 2, 5, 3, 6]);
1227
1228 Ok(())
1229 }
1230
1231 #[test]
1232 fn zeros_1d() -> Result<(), TensorError> {
1233 let t = Tensor::<u8, 1, _>::zeros([4], CpuAllocator);
1234 assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
1235 Ok(())
1236 }
1237
1238 #[test]
1239 fn zeros_2d() -> Result<(), TensorError> {
1240 let t = Tensor::<u8, 2, _>::zeros([2, 2], CpuAllocator);
1241 assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
1242 Ok(())
1243 }
1244
1245 #[test]
1246 fn map_1d() -> Result<(), TensorError> {
1247 let data: Vec<u8> = vec![1, 2, 3, 4];
1248 let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1249 let t2 = t.map(|x| *x + 1);
1250 assert_eq!(t2.as_slice(), vec![2, 3, 4, 5]);
1251 Ok(())
1252 }
1253
1254 #[test]
1255 fn map_2d() -> Result<(), TensorError> {
1256 let data: Vec<u8> = vec![1, 2, 3, 4];
1257 let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1258 let t2 = t.map(|x| *x + 1);
1259 assert_eq!(t2.as_slice(), vec![2, 3, 4, 5]);
1260 Ok(())
1261 }
1262
1263 #[test]
1264 fn from_shape_val_1d() -> Result<(), TensorError> {
1265 let t = Tensor::<u8, 1, _>::from_shape_val([4], 0, CpuAllocator);
1266 assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
1267 Ok(())
1268 }
1269
1270 #[test]
1271 fn from_shape_val_2d() -> Result<(), TensorError> {
1272 let t = Tensor::<u8, 2, _>::from_shape_val([2, 2], 1, CpuAllocator);
1273 assert_eq!(t.as_slice(), vec![1, 1, 1, 1]);
1274 Ok(())
1275 }
1276
1277 #[test]
1278 fn from_shape_val_3d() -> Result<(), TensorError> {
1279 let t = Tensor::<u8, 3, _>::from_shape_val([2, 1, 3], 2, CpuAllocator);
1280 assert_eq!(t.as_slice(), vec![2, 2, 2, 2, 2, 2]);
1281 Ok(())
1282 }
1283
1284 #[test]
1285 fn cast_1d() -> Result<(), TensorError> {
1286 let data: Vec<u8> = vec![1, 2, 3, 4];
1287 let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1288 let t2 = t.cast::<u16>();
1289 assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1290 Ok(())
1291 }
1292
1293 #[test]
1294 fn cast_2d() -> Result<(), TensorError> {
1295 let data: Vec<u8> = vec![1, 2, 3, 4];
1296 let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1297 let t2 = t.cast::<u16>();
1298 assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1299 Ok(())
1300 }
1301
1302 #[test]
1303 fn from_shape_fn_1d() -> Result<(), TensorError> {
1304 let alloc = CpuAllocator;
1305 let t = Tensor::from_shape_fn([3, 3], alloc, |[i, j]| ((1 + i) * (1 + j)) as u8);
1306 assert_eq!(t.as_slice(), vec![1, 2, 3, 2, 4, 6, 3, 6, 9]);
1307 Ok(())
1308 }
1309
1310 #[test]
1311 fn from_shape_fn_2d() -> Result<(), TensorError> {
1312 let alloc = CpuAllocator;
1313 let t = Tensor::from_shape_fn([3, 3], alloc, |[i, j]| ((1 + i) * (1 + j)) as f32);
1314 assert_eq!(
1315 t.as_slice(),
1316 vec![1.0, 2.0, 3.0, 2.0, 4.0, 6.0, 3.0, 6.0, 9.0]
1317 );
1318 Ok(())
1319 }
1320
1321 #[test]
1322 fn from_shape_fn_3d() -> Result<(), TensorError> {
1323 let alloc = CpuAllocator;
1324 let t = Tensor::from_shape_fn([2, 3, 3], alloc, |[x, y, c]| {
1325 ((1 + x) * (1 + y) * (1 + c)) as i16
1326 });
1327 assert_eq!(
1328 t.as_slice(),
1329 vec![1, 2, 3, 2, 4, 6, 3, 6, 9, 2, 4, 6, 4, 8, 12, 6, 12, 18]
1330 );
1331 Ok(())
1332 }
1333
1334 #[test]
1335 fn view_1d() -> Result<(), TensorError> {
1336 let alloc = CpuAllocator;
1337 let data: Vec<u8> = vec![1, 2, 3, 4];
1338 let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, alloc)?;
1339 let view = t.view();
1340
1341 assert_eq!(view.as_slice(), t.as_slice());
1343
1344 assert!(std::ptr::eq(view.as_ptr(), t.as_ptr()));
1346
1347 Ok(())
1348 }
1349
1350 #[test]
1353 fn powi_and_abs() -> Result<(), TensorError> {
1354 let data: Vec<f32> = vec![-1.0, 2.0, -3.0, 4.0];
1355 let t = Tensor::<f32, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1356
1357 let t_powi = t.powi(2);
1358 assert_eq!(t_powi.as_slice(), &[1.0, 4.0, 9.0, 16.0]);
1359
1360 let t_abs = t.abs();
1361 assert_eq!(t_abs.as_slice(), &[1.0, 2.0, 3.0, 4.0]);
1362
1363 Ok(())
1364 }
1365
1366 #[test]
1367 fn from_slice() -> Result<(), TensorError> {
1368 let data: [u8; 4] = [1, 2, 3, 4];
1369 let t = Tensor::<u8, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1370
1371 assert_eq!(t.shape, [2, 2]);
1372 assert_eq!(t.as_slice(), &[1, 2, 3, 4]);
1373
1374 Ok(())
1375 }
1376
1377 #[test]
1378 fn display_2d() -> Result<(), TensorError> {
1379 let data: [u8; 4] = [1, 2, 3, 4];
1380 let t = Tensor::<u8, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1381 let disp = t.to_string();
1382 let lines = disp.lines().collect::<Vec<_>>();
1383
1384 #[rustfmt::skip]
1385 assert_eq!(lines.as_slice(),
1386 ["[[1,2],",
1387 " [3,4]]"]);
1388 Ok(())
1389 }
1390
1391 #[test]
1392 fn display_3d() -> Result<(), TensorError> {
1393 let data: [u8; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1394 let t = Tensor::<u8, 3, _>::from_shape_slice([2, 3, 2], &data, CpuAllocator)?;
1395 let disp = t.to_string();
1396 let lines = disp.lines().collect::<Vec<_>>();
1397
1398 #[rustfmt::skip]
1399 assert_eq!(lines.as_slice(),
1400 ["[[[ 1, 2],",
1401 " [ 3, 4],",
1402 " [ 5, 6]],",
1403 "",
1404 " [[ 7, 8],",
1405 " [ 9,10],",
1406 " [11,12]]]"]);
1407 Ok(())
1408 }
1409
1410 #[test]
1411 fn display_float() -> Result<(), TensorError> {
1412 let data: [f32; 4] = [1.00001, 1.00009, 0.99991, 0.99999];
1413 let t = Tensor::<f32, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1414 let disp = t.to_string();
1415 let lines = disp.lines().collect::<Vec<_>>();
1416
1417 #[rustfmt::skip]
1418 assert_eq!(lines.as_slice(),
1419 ["[[1.0000,1.0001],",
1420 " [0.9999,1.0000]]"]);
1421 Ok(())
1422 }
1423
1424 #[test]
1425 fn display_big_float() -> Result<(), TensorError> {
1426 let data: [f32; 4] = [1000.00001, 1.00009, 0.99991, 0.99999];
1427 let t = Tensor::<f32, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1428 let disp = t.to_string();
1429 let lines = disp.lines().collect::<Vec<_>>();
1430
1431 #[rustfmt::skip]
1432 assert_eq!(lines.as_slice(),
1433 ["[[1.0000e+03,1.0001e+00],",
1434 " [9.9991e-01,9.9999e-01]]"]);
1435 Ok(())
1436 }
1437
1438 #[test]
1439 fn display_big_tensor() -> Result<(), TensorError> {
1440 let data: [u8; 1000] = [0; 1000];
1441 let t = Tensor::<u8, 3, _>::from_shape_slice([10, 10, 10], &data, CpuAllocator)?;
1442 let disp = t.to_string();
1443 let lines = disp.lines().collect::<Vec<_>>();
1444
1445 #[rustfmt::skip]
1446 assert_eq!(lines.as_slice(),
1447 ["[[[0,0,0,...,0],",
1448 " [0,0,0,...,0],",
1449 " [0,0,0,...,0],",
1450 " ...",
1451 " [0,0,0,...,0]],",
1452 "",
1453 " [[0,0,0,...,0],",
1454 " [0,0,0,...,0],",
1455 " [0,0,0,...,0],",
1456 " ...",
1457 " [0,0,0,...,0]],",
1458 "",
1459 " [[0,0,0,...,0],",
1460 " [0,0,0,...,0],",
1461 " [0,0,0,...,0],",
1462 " ...",
1463 " [0,0,0,...,0]],",
1464 "",
1465 " ...",
1466 "",
1467 " [[0,0,0,...,0],",
1468 " [0,0,0,...,0],",
1469 " [0,0,0,...,0],",
1470 " ...",
1471 " [0,0,0,...,0]]]"]);
1472 Ok(())
1473 }
1474
1475 #[test]
1476 fn get_index_unchecked_1d() -> Result<(), TensorError> {
1477 let data: Vec<u8> = vec![1, 2, 3, 4];
1478 let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator)?;
1479 assert_eq!(t.get_index_unchecked(0), [0]);
1480 assert_eq!(t.get_index_unchecked(1), [1]);
1481 assert_eq!(t.get_index_unchecked(2), [2]);
1482 assert_eq!(t.get_index_unchecked(3), [3]);
1483 Ok(())
1484 }
1485
1486 #[test]
1487 fn get_index_unchecked_2d() -> Result<(), TensorError> {
1488 let data: Vec<u8> = vec![1, 2, 3, 4];
1489 let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator)?;
1490 assert_eq!(t.get_index_unchecked(0), [0, 0]);
1491 assert_eq!(t.get_index_unchecked(1), [0, 1]);
1492 assert_eq!(t.get_index_unchecked(2), [1, 0]);
1493 assert_eq!(t.get_index_unchecked(3), [1, 1]);
1494 Ok(())
1495 }
1496
1497 #[test]
1498 fn get_index_unchecked_3d() -> Result<(), TensorError> {
1499 let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1500 let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1501 assert_eq!(t.get_index_unchecked(0), [0, 0, 0]);
1502 assert_eq!(t.get_index_unchecked(1), [0, 0, 1]);
1503 assert_eq!(t.get_index_unchecked(2), [0, 0, 2]);
1504 assert_eq!(t.get_index_unchecked(3), [0, 1, 0]);
1505 assert_eq!(t.get_index_unchecked(4), [0, 1, 1]);
1506 assert_eq!(t.get_index_unchecked(5), [0, 1, 2]);
1507 assert_eq!(t.get_index_unchecked(6), [1, 0, 0]);
1508 assert_eq!(t.get_index_unchecked(7), [1, 0, 1]);
1509 assert_eq!(t.get_index_unchecked(8), [1, 0, 2]);
1510 assert_eq!(t.get_index_unchecked(9), [1, 1, 0]);
1511 assert_eq!(t.get_index_unchecked(10), [1, 1, 1]);
1512 assert_eq!(t.get_index_unchecked(11), [1, 1, 2]);
1513 Ok(())
1514 }
1515
1516 #[test]
1517 fn get_index_to_offset_and_back() -> Result<(), TensorError> {
1518 let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1519 let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1520 for offset in 0..12 {
1521 assert_eq!(
1522 t.get_iter_offset_unchecked(t.get_index_unchecked(offset)),
1523 offset
1524 );
1525 }
1526 Ok(())
1527 }
1528
1529 #[test]
1530 fn get_offset_to_index_and_back() -> Result<(), TensorError> {
1531 let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1532 let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1533 for ind in [
1534 [0, 0, 0],
1535 [0, 0, 1],
1536 [0, 0, 2],
1537 [0, 1, 0],
1538 [0, 1, 1],
1539 [0, 1, 2],
1540 [1, 0, 0],
1541 [1, 0, 1],
1542 [1, 0, 2],
1543 [1, 1, 0],
1544 [1, 1, 1],
1545 [1, 1, 2],
1546 ] {
1547 assert_eq!(t.get_index_unchecked(t.get_iter_offset_unchecked(ind)), ind);
1548 }
1549 Ok(())
1550 }
1551
1552 #[test]
1553 fn get_index_1d() -> Result<(), TensorError> {
1554 let data: Vec<u8> = vec![1, 2, 3, 4];
1555 let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator)?;
1556 assert_eq!(t.get_index(3), Ok([3]));
1557 assert!(t
1558 .get_index(4)
1559 .is_err_and(|x| x == TensorError::IndexOutOfBounds(4)));
1560 Ok(())
1561 }
1562
1563 #[test]
1564 fn get_index_2d() -> Result<(), TensorError> {
1565 let data: Vec<u8> = vec![1, 2, 3, 4];
1566 let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator)?;
1567 assert_eq!(t.get_index_unchecked(3), [1, 1]);
1568 assert!(t
1569 .get_index(4)
1570 .is_err_and(|x| x == TensorError::IndexOutOfBounds(4)));
1571 Ok(())
1572 }
1573
1574 #[test]
1575 fn get_index_3d() -> Result<(), TensorError> {
1576 let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1577 let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1578 assert_eq!(t.get_index_unchecked(11), [1, 1, 2]);
1579 assert!(t
1580 .get_index(12)
1581 .is_err_and(|x| x == TensorError::IndexOutOfBounds(12)));
1582 Ok(())
1583 }
1584}