kornia_core/
tensor.rs

1use num_traits::Float;
2use thiserror::Error;
3
4use super::{
5    allocator::{CpuAllocator, TensorAllocator, TensorAllocatorError},
6    storage::TensorStorage,
7    view::TensorView,
8};
9
10/// An error type for tensor operations.
11#[derive(Error, Debug, PartialEq)]
12pub enum TensorError {
13    /// Error when the cast operation fails.
14    #[error("Failed to cast data")]
15    CastError,
16
17    /// The number of elements in the data does not match the shape of the tensor.
18    #[error("The number of elements in the data does not match the shape of the tensor: {0}")]
19    InvalidShape(usize),
20
21    /// Index out of bounds.
22    #[error("Index out of bounds. The index {0} is out of bounds.")]
23    IndexOutOfBounds(usize),
24
25    /// Error with the tensor storage.
26    #[error("Error with the tensor storage: {0}")]
27    StorageError(#[from] TensorAllocatorError),
28
29    /// Dimension mismatch for operations requiring compatible shapes.
30    #[error("Dimension mismatch: {0}")]
31    DimensionMismatch(String),
32
33    /// Unsupported operation for the given data type or tensor configuration.
34    #[error("Unsupported operation: {0}")]
35    UnsupportedOperation(String),
36}
37
38/// Compute the strides from the shape of a tensor.
39///
40/// # Arguments
41///
42/// * `shape` - The shape of the tensor.
43///
44/// # Returns
45///
46/// * `strides` - The strides of the tensor.
47pub(crate) fn get_strides_from_shape<const N: usize>(shape: [usize; N]) -> [usize; N] {
48    let mut strides: [usize; N] = [0; N];
49    let mut stride = 1;
50    for i in (0..shape.len()).rev() {
51        strides[i] = stride;
52        stride *= shape[i];
53    }
54    strides
55}
56
57/// A data structure to represent a multi-dimensional tensor.
58///
59/// NOTE: Internally, the data is stored as an `arrow::ScalarBuffer` which represents a contiguous memory
60/// region that can be shared with other buffers and across thread boundaries.
61///
62/// # Attributes
63///
64/// * `storage` - The storage of the tensor.
65/// * `shape` - The shape of the tensor.
66/// * `strides` - The strides of the tensor data in memory.
67///
68/// # Example
69///
70/// ```
71/// use kornia_core::{Tensor, CpuAllocator};
72///
73/// let data: Vec<u8> = vec![1, 2, 3, 4];
74/// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator).unwrap();
75/// assert_eq!(t.shape, [2, 2]);
76/// ```
77pub struct Tensor<T, const N: usize, A: TensorAllocator> {
78    /// The storage of the tensor.
79    pub storage: TensorStorage<T, A>,
80    /// The shape of the tensor.
81    pub shape: [usize; N],
82    /// The strides of the tensor data in memory.
83    pub strides: [usize; N],
84}
85
86impl<T, const N: usize, A: TensorAllocator> Tensor<T, N, A>
87where
88    A: 'static,
89{
90    /// Get the data of the tensor as a slice.
91    ///
92    /// # Returns
93    ///
94    /// A slice containing the data of the tensor.
95    #[inline]
96    pub fn as_slice(&self) -> &[T] {
97        self.storage.as_slice()
98    }
99
100    /// Get the data of the tensor as a mutable slice.
101    ///
102    /// # Returns
103    ///
104    /// A mutable slice containing the data of the tensor.
105    #[inline]
106    pub fn as_slice_mut(&mut self) -> &mut [T] {
107        self.storage.as_mut_slice()
108    }
109
110    /// Get the data of the tensor as a pointer.
111    ///
112    /// # Returns
113    ///
114    /// A pointer to the data of the tensor.
115    #[inline]
116    pub fn as_ptr(&self) -> *const T {
117        self.storage.as_ptr()
118    }
119
120    /// Get the data of the tensor as a mutable pointer.
121    ///
122    /// # Returns
123    ///
124    /// A mutable pointer to the data of the tensor.
125    #[inline]
126    pub fn as_mut_ptr(&mut self) -> *mut T {
127        self.storage.as_mut_ptr()
128    }
129
130    /// Consumes the tensor and returns the underlying vector.
131    ///
132    /// This method destroys the tensor and returns ownership of the underlying data.
133    /// The returned vector will have a length equal to the total number of elements in the tensor.
134    ///
135    #[inline]
136    pub fn into_vec(self) -> Vec<T> {
137        self.storage.into_vec()
138    }
139
140    /// Creates a new `Tensor` with the given shape and data.
141    ///
142    /// # Arguments
143    ///
144    /// * `shape` - An array containing the shape of the tensor.
145    /// * `data` - A vector containing the data of the tensor.
146    /// * `alloc` - The allocator to use.
147    ///
148    /// # Returns
149    ///
150    /// A new `Tensor` instance.
151    ///
152    /// # Errors
153    ///
154    /// If the number of elements in the data does not match the shape of the tensor, an error is returned.
155    ///
156    /// # Example
157    ///
158    /// ```
159    /// use kornia_core::{Tensor, CpuAllocator};
160    ///
161    /// let data: Vec<u8> = vec![1, 2, 3, 4];
162    /// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator).unwrap();
163    /// assert_eq!(t.shape, [2, 2]);
164    /// ```
165    pub fn from_shape_vec(shape: [usize; N], data: Vec<T>, alloc: A) -> Result<Self, TensorError> {
166        let numel = shape.iter().product::<usize>();
167        if numel != data.len() {
168            return Err(TensorError::InvalidShape(numel));
169        }
170        let storage = TensorStorage::from_vec(data, alloc);
171        let strides = get_strides_from_shape(shape);
172        Ok(Self {
173            storage,
174            shape,
175            strides,
176        })
177    }
178
179    /// Creates a new `Tensor` with the given shape and slice of data.
180    ///
181    /// # Arguments
182    ///
183    /// * `shape` - An array containing the shape of the tensor.
184    /// * `data` - A slice containing the data of the tensor.
185    /// * `alloc` - The allocator to use.
186    ///
187    /// # Returns
188    ///
189    /// A new `Tensor` instance.
190    ///
191    /// # Errors
192    ///
193    /// If the number of elements in the data does not match the shape of the tensor, an error is returned.
194    pub fn from_shape_slice(shape: [usize; N], data: &[T], alloc: A) -> Result<Self, TensorError>
195    where
196        T: Clone,
197    {
198        let numel = shape.iter().product::<usize>();
199        if numel != data.len() {
200            return Err(TensorError::InvalidShape(numel));
201        }
202        let storage = TensorStorage::from_vec(data.to_vec(), alloc);
203        let strides = get_strides_from_shape(shape);
204        Ok(Self {
205            storage,
206            shape,
207            strides,
208        })
209    }
210
211    /// Creates a new `Tensor` with the given shape and a default value.
212    ///
213    /// # Arguments
214    ///
215    /// * `shape` - An array containing the shape of the tensor.
216    /// * `value` - The default value to fill the tensor with.
217    ///
218    /// # Returns
219    ///
220    /// A new `Tensor` instance.
221    ///
222    /// # Example
223    ///
224    /// ```
225    /// use kornia_core::{Tensor, CpuAllocator};
226    ///
227    /// let t = Tensor::<u8, 1, CpuAllocator>::from_shape_val([4], 0, CpuAllocator);
228    /// assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
229    ///
230    /// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_val([2, 2], 1, CpuAllocator);
231    /// assert_eq!(t.as_slice(), vec![1, 1, 1, 1]);
232    ///
233    /// let t = Tensor::<u8, 3, CpuAllocator>::from_shape_val([2, 1, 3], 2, CpuAllocator);
234    /// assert_eq!(t.as_slice(), vec![2, 2, 2, 2, 2, 2]);
235    /// ```
236    pub fn from_shape_val(shape: [usize; N], value: T, alloc: A) -> Self
237    where
238        T: Clone,
239    {
240        let numel = shape.iter().product::<usize>();
241        let data = vec![value; numel];
242        let storage = TensorStorage::from_vec(data, alloc);
243        let strides = get_strides_from_shape(shape);
244        Self {
245            storage,
246            shape,
247            strides,
248        }
249    }
250
251    /// Create a new `Tensor` with the given shape and a function to generate the data.
252    ///
253    /// The function `f` is called with the index of the element to generate.
254    ///
255    /// # Arguments
256    ///
257    /// * `shape` - An array containing the shape of the tensor.
258    /// * `f` - The function to generate the data.
259    ///
260    /// # Returns
261    ///
262    /// A new `Tensor` instance.
263    ///
264    /// # Example
265    ///
266    /// ```
267    /// use kornia_core::{Tensor, CpuAllocator};
268    ///
269    /// let t = Tensor::<u8, 1, CpuAllocator>::from_shape_fn([4], CpuAllocator, |[i]| i as u8);
270    /// assert_eq!(t.as_slice(), vec![0, 1, 2, 3]);
271    ///
272    /// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_fn([2, 2], CpuAllocator, |[i, j]| (i * 2 + j) as u8);
273    /// assert_eq!(t.as_slice(), vec![0, 1, 2, 3]);
274    /// ```
275    pub fn from_shape_fn<F>(shape: [usize; N], alloc: A, f: F) -> Self
276    where
277        F: Fn([usize; N]) -> T,
278    {
279        let numel = shape.iter().product::<usize>();
280        let data: Vec<T> = (0..numel)
281            .map(|i| {
282                let mut index = [0; N];
283                let mut j = i;
284                for k in (0..N).rev() {
285                    index[k] = j % shape[k];
286                    j /= shape[k];
287                }
288                f(index)
289            })
290            .collect();
291        let storage = TensorStorage::from_vec(data, alloc);
292        let strides = get_strides_from_shape(shape);
293        Self {
294            storage,
295            shape,
296            strides,
297        }
298    }
299
300    /// Returns the number of elements in the tensor.
301    ///
302    /// # Returns
303    ///
304    /// The number of elements in the tensor.
305    #[inline]
306    pub fn numel(&self) -> usize {
307        self.storage.len() / std::mem::size_of::<T>()
308    }
309
310    /// Get the offset of the element at the given index.
311    ///
312    /// # Arguments
313    ///
314    /// * `index` - The list of indices to get the element from.
315    ///
316    /// # Returns
317    ///
318    /// The offset of the element at the given index.
319    pub fn get_iter_offset(&self, index: [usize; N]) -> Option<usize> {
320        let mut offset = 0;
321        for ((&idx, dim_size), stride) in index.iter().zip(self.shape).zip(self.strides) {
322            if idx >= dim_size {
323                return None;
324            }
325            offset += idx * stride;
326        }
327        Some(offset)
328    }
329
330    /// Get the offset of the element at the given index without checking dim sizes.
331    ///
332    /// # Arguments
333    ///
334    /// * `index` - The list of indices to get the element from.
335    ///
336    /// # Returns
337    ///
338    /// The offset of the element at the given index.
339    pub fn get_iter_offset_unchecked(&self, index: [usize; N]) -> usize {
340        let mut offset = 0;
341        for (&idx, stride) in index.iter().zip(self.strides) {
342            offset += idx * stride;
343        }
344        offset
345    }
346
347    /// Get the index of the element at the given offset without checking dim sizes. The reverse of `Self::get_iter_offset_unchecked`.
348    ///
349    /// # Arguments
350    ///
351    /// * `offset` - The offset of the element at the given index.
352    ///
353    /// # Returns
354    ///
355    /// The array of indices to get the element from.
356    pub fn get_index_unchecked(&self, offset: usize) -> [usize; N] {
357        let mut idx = [0; N];
358        let mut rem = offset;
359        for (dim_i, s) in self.strides.iter().enumerate() {
360            idx[dim_i] = rem / s;
361            rem = offset % s;
362        }
363
364        idx
365    }
366
367    /// Get the index of the element at the given offset. The reverse of `Self::get_iter_offset`.
368    ///
369    /// # Arguments
370    ///
371    /// * `offset` - The offset of the element at the given index.
372    ///
373    /// # Returns
374    ///
375    /// The array of indices to get the element from.
376    ///
377    /// # Errors
378    ///
379    /// If the offset is out of bounds (>= numel), an error is returned.
380    pub fn get_index(&self, offset: usize) -> Result<[usize; N], TensorError> {
381        if offset >= self.numel() {
382            return Err(TensorError::IndexOutOfBounds(offset));
383        }
384        let idx = self.get_index_unchecked(offset);
385
386        Ok(idx)
387    }
388
389    /// Get the element at the given index without checking if the index is out of bounds.
390    ///
391    /// # Arguments
392    ///
393    /// * `index` - The list of indices to get the element from.
394    ///
395    /// # Returns
396    ///
397    /// A reference to the element at the given index.
398    ///
399    /// # Example
400    ///
401    /// ```
402    /// use kornia_core::{Tensor, CpuAllocator};
403    ///
404    /// let data: Vec<u8> = vec![1, 2, 3, 4];
405    ///
406    /// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator).unwrap();
407    /// assert_eq!(*t.get_unchecked([0, 0]), 1);
408    /// assert_eq!(*t.get_unchecked([0, 1]), 2);
409    /// assert_eq!(*t.get_unchecked([1, 0]), 3);
410    /// assert_eq!(*t.get_unchecked([1, 1]), 4);
411    /// ```
412    pub fn get_unchecked(&self, index: [usize; N]) -> &T {
413        let offset = self.get_iter_offset_unchecked(index);
414        unsafe { self.storage.as_slice().get_unchecked(offset) }
415    }
416
417    /// Get the element at the given index, checking if the index is out of bounds.
418    ///
419    /// # Arguments
420    ///
421    /// * `index` - The list of indices to get the element from.
422    ///
423    /// # Returns
424    ///
425    /// A reference to the element at the given index.
426    ///
427    /// # Errors
428    ///
429    /// If the index is out of bounds, an error is returned.
430    ///
431    /// # Example
432    ///
433    /// ```
434    /// use kornia_core::{Tensor, CpuAllocator};
435    ///
436    /// let data: Vec<u8> = vec![1, 2, 3, 4];
437    ///
438    /// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator).unwrap();
439    ///
440    /// assert_eq!(t.get([0, 0]), Some(&1));
441    /// assert_eq!(t.get([0, 1]), Some(&2));
442    /// assert_eq!(t.get([1, 0]), Some(&3));
443    /// assert_eq!(t.get([1, 1]), Some(&4));
444    ///
445    /// assert!(t.get([2, 0]).is_none());
446    /// ```
447    pub fn get(&self, index: [usize; N]) -> Option<&T> {
448        self.get_iter_offset(index)
449            .and_then(|i| self.storage.as_slice().get(i))
450    }
451
452    /// Reshape the tensor to a new shape.
453    ///
454    /// # Arguments
455    ///
456    /// * `shape` - The new shape of the tensor.
457    ///
458    /// # Returns
459    ///
460    /// A new `TensorView` instance.
461    ///
462    /// # Errors
463    ///
464    /// If the number of elements in the new shape does not match the number of elements in the tensor, an error is returned.
465    ///
466    /// # Example
467    ///
468    /// ```
469    /// use kornia_core::{Tensor, CpuAllocator};
470    ///
471    /// let data: Vec<u8> = vec![1, 2, 3, 4];
472    ///
473    /// let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator).unwrap();
474    /// let t2 = t.reshape([2, 2]).unwrap();
475    /// assert_eq!(t2.shape, [2, 2]);
476    /// assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
477    /// assert_eq!(t2.strides, [2, 1]);
478    /// assert_eq!(t2.numel(), 4);
479    /// ```
480    pub fn reshape<const M: usize>(
481        &self,
482        shape: [usize; M],
483    ) -> Result<TensorView<T, M, A>, TensorError> {
484        let numel = shape.iter().product::<usize>();
485        if numel != self.storage.len() {
486            return Err(TensorError::DimensionMismatch(format!(
487                "Cannot reshape tensor of shape {:?} with {} elements to shape {:?} with {} elements",
488                self.shape, self.storage.len(), shape, numel
489            )));
490        }
491
492        let strides = get_strides_from_shape(shape);
493
494        Ok(TensorView {
495            storage: &self.storage,
496            shape,
497            strides,
498        })
499    }
500
501    /// Permute the dimensions of the tensor.
502    ///
503    /// The permutation is given as an array of indices, where the value at each index is the new index of the dimension.
504    /// The data is not moved, only the order of the dimensions is changed.
505    ///
506    /// # Arguments
507    ///
508    /// * `axes` - The new order of the dimensions.
509    ///
510    /// # Returns
511    ///
512    /// A view of the tensor with the dimensions permuted.
513    pub fn permute_axes(&self, axes: [usize; N]) -> TensorView<T, N, A> {
514        let mut new_shape = [0; N];
515        let mut new_strides = [0; N];
516        for (i, &axis) in axes.iter().enumerate() {
517            new_shape[i] = self.shape[axis];
518            new_strides[i] = self.strides[axis];
519        }
520
521        TensorView {
522            storage: &self.storage,
523            shape: new_shape,
524            strides: new_strides,
525        }
526    }
527
528    /// Return a view of the tensor.
529    ///
530    /// The view is a reference to the tensor storage with a different shape and strides.
531    ///
532    /// # Returns
533    ///
534    /// A `TensorView` instance.
535    pub fn view(&self) -> TensorView<T, N, A> {
536        TensorView {
537            storage: &self.storage,
538            shape: self.shape,
539            strides: self.strides,
540        }
541    }
542
543    /// Create a new tensor with all elements set to zero.
544    ///
545    /// # Arguments
546    ///
547    /// * `shape` - The shape of the tensor.
548    /// * `alloc` - The allocator to use.
549    ///
550    /// # Returns
551    pub fn zeros(shape: [usize; N], alloc: A) -> Tensor<T, N, A>
552    where
553        T: Clone + num_traits::Zero,
554    {
555        // TODO: add allocator parameter
556        Self::from_shape_val(shape, T::zero(), alloc)
557    }
558
559    /// Apply a function to each element of the tensor.
560    ///
561    /// # Arguments
562    ///
563    /// * `f` - The function to apply to each element.
564    ///
565    /// # Returns
566    ///
567    /// A new `Tensor` instance.
568    ///
569    /// # Example
570    ///
571    /// ```
572    /// use kornia_core::{Tensor, CpuAllocator};
573    ///
574    /// let data: Vec<u8> = vec![1, 2, 3, 4];
575    /// let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator).unwrap();
576    ///
577    /// let t2 = t.map(|x| *x + 1);
578    /// assert_eq!(t2.as_slice(), vec![2, 3, 4, 5]);
579    /// ```
580    pub fn map<U, F>(&self, f: F) -> Tensor<U, N, A>
581    where
582        F: Fn(&T) -> U,
583    {
584        let data: Vec<U> = self.as_slice().iter().map(f).collect();
585        let storage = TensorStorage::from_vec(data, self.storage.alloc().clone());
586
587        Tensor {
588            storage,
589            shape: self.shape,
590            strides: self.strides,
591        }
592    }
593
594    /// Apply the power function to the pixel data.
595    ///
596    /// # Arguments
597    ///
598    /// * `n` - The power to raise the pixel data to.
599    ///
600    /// # Returns
601    ///
602    /// A new image with the pixel data raised to the power.
603    pub fn powi(&self, n: i32) -> Tensor<T, N, A>
604    where
605        T: Float,
606    {
607        self.map(|x| x.powi(n))
608    }
609
610    /// Compute absolute value of the pixel data.
611    ///
612    /// # Returns
613    ///
614    /// A new image with the pixel data absolute value.
615    pub fn abs(&self) -> Tensor<T, N, A>
616    where
617        T: Float,
618    {
619        self.map(|x| x.abs())
620    }
621
622    /// Compute the mean of the pixel data.
623    ///
624    /// # Returns
625    ///
626    /// The mean of the pixel data.
627    pub fn mean(&self) -> Result<T, TensorError>
628    where
629        T: Float,
630    {
631        let data_acc = self.as_slice().iter().fold(T::zero(), |acc, &x| acc + x);
632        let mean = data_acc / T::from(self.as_slice().len()).ok_or(TensorError::CastError)?;
633
634        Ok(mean)
635    }
636
637    /// Cast the tensor to a new type.
638    ///
639    /// # Returns
640    ///
641    /// A new `Tensor` instance.
642    ///
643    /// # Example
644    ///
645    /// ```
646    /// use kornia_core::{Tensor, CpuAllocator};
647    ///
648    /// let data: Vec<u8> = vec![1, 2, 3, 4];
649    /// let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator).unwrap();
650    ///
651    /// let t2 = t.cast::<f32>();
652    /// assert_eq!(t2.as_slice(), vec![1.0, 2.0, 3.0, 4.0]);
653    /// ```
654    pub fn cast<U>(&self) -> Tensor<U, N, CpuAllocator>
655    where
656        U: From<T>,
657        T: Clone,
658    {
659        let mut data: Vec<U> = Vec::with_capacity(self.storage.len());
660        self.as_slice().iter().for_each(|x| {
661            data.push(U::from(x.clone()));
662        });
663        let storage = TensorStorage::from_vec(data, CpuAllocator);
664        Tensor {
665            storage,
666            shape: self.shape,
667            strides: self.strides,
668        }
669    }
670
671    /// Perform an element-wise operation on two tensors.
672    ///
673    /// # Arguments
674    ///
675    /// * `other` - The other tensor to perform the operation with.
676    /// * `op` - The operation to perform.
677    ///
678    /// # Returns
679    ///
680    /// A new `Tensor` instance.
681    ///
682    /// # Example
683    ///
684    /// ```
685    /// use kornia_core::{Tensor, CpuAllocator};
686    ///
687    /// let data1: Vec<u8> = vec![1, 2, 3, 4];
688    /// let t1 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data1, CpuAllocator).unwrap();
689    ///
690    /// let data2: Vec<u8> = vec![1, 2, 3, 4];
691    /// let t2 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data2, CpuAllocator).unwrap();
692    ///
693    /// let t3 = t1.element_wise_op(&t2, |a, b| *a + *b).unwrap();
694    /// assert_eq!(t3.as_slice(), vec![2, 4, 6, 8]);
695    ///
696    /// let t4 = t1.element_wise_op(&t2, |a, b| *a - *b).unwrap();
697    /// assert_eq!(t4.as_slice(), vec![0, 0, 0, 0]);
698    ///
699    /// let t5 = t1.element_wise_op(&t2, |a, b| *a * *b).unwrap();
700    /// assert_eq!(t5.as_slice(), vec![1, 4, 9, 16]);
701    ///
702    /// let t6 = t1.element_wise_op(&t2, |a, b| *a / *b).unwrap();
703    /// assert_eq!(t6.as_slice(), vec![1, 1, 1, 1]);
704    /// ```
705    pub fn element_wise_op<F>(
706        &self,
707        other: &Tensor<T, N, CpuAllocator>,
708        op: F,
709    ) -> Result<Tensor<T, N, CpuAllocator>, TensorError>
710    where
711        F: Fn(&T, &T) -> T,
712    {
713        if self.shape != other.shape {
714            return Err(TensorError::DimensionMismatch(format!(
715                "Shapes {:?} and {:?} are not compatible for element-wise operations",
716                self.shape, other.shape
717            )));
718        }
719
720        let data = self
721            .as_slice()
722            .iter()
723            .zip(other.as_slice().iter())
724            .map(|(a, b)| op(a, b))
725            .collect();
726
727        let storage = TensorStorage::from_vec(data, CpuAllocator);
728
729        Ok(Tensor {
730            storage,
731            shape: self.shape,
732            strides: self.strides,
733        })
734    }
735
736    /// Perform an element-wise addition on two tensors.
737    ///
738    /// # Arguments
739    ///
740    /// * `other` - The other tensor to add.
741    ///
742    /// # Returns
743    ///
744    /// A new `Tensor` instance.
745    ///
746    /// # Example
747    ///
748    /// ```
749    /// use kornia_core::{Tensor, CpuAllocator};
750    ///
751    /// let data1: Vec<u8> = vec![1, 2, 3, 4];
752    /// let t1 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data1, CpuAllocator).unwrap();
753    ///
754    /// let data2: Vec<u8> = vec![1, 2, 3, 4];
755    /// let t2 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data2, CpuAllocator).unwrap();
756    ///
757    /// let t3 = t1.add(&t2);
758    /// assert_eq!(t3.as_slice(), vec![2, 4, 6, 8]);
759    /// ```
760    pub fn add(&self, other: &Tensor<T, N, CpuAllocator>) -> Tensor<T, N, CpuAllocator>
761    where
762        T: std::ops::Add<Output = T> + Clone,
763    {
764        self.element_wise_op(other, |a, b| a.clone() + b.clone())
765            .expect("Tensor dimension mismatch")
766    }
767
768    /// Perform an element-wise subtraction on two tensors.
769    ///
770    /// # Arguments
771    ///
772    /// * `other` - The other tensor to subtract.
773    ///
774    /// # Returns
775    ///
776    /// A new `Tensor` instance.
777    ///
778    /// # Example
779    ///
780    /// ```
781    /// use kornia_core::{Tensor, CpuAllocator};
782    ///
783    /// let data1: Vec<u8> = vec![1, 2, 3, 4];
784    /// let t1 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data1, CpuAllocator).unwrap();
785    ///
786    /// let data2: Vec<u8> = vec![1, 2, 3, 4];
787    /// let t2 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data2, CpuAllocator).unwrap();
788    ///
789    /// let t3 = t1.sub(&t2);
790    /// assert_eq!(t3.as_slice(), vec![0, 0, 0, 0]);
791    /// ```
792    pub fn sub(&self, other: &Tensor<T, N, CpuAllocator>) -> Tensor<T, N, CpuAllocator>
793    where
794        T: std::ops::Sub<Output = T> + Clone,
795    {
796        self.element_wise_op(other, |a, b| a.clone() - b.clone())
797            .expect("Tensor dimension mismatch")
798    }
799
800    /// Perform an element-wise multiplication on two tensors.
801    ///
802    /// # Arguments
803    ///
804    /// * `other` - The other tensor to multiply.
805    ///
806    /// # Returns
807    ///
808    /// A new `Tensor` instance.
809    ///
810    /// # Example
811    ///
812    /// ```
813    /// use kornia_core::{Tensor, CpuAllocator};
814    ///
815    /// let data1: Vec<u8> = vec![1, 2, 3, 4];
816    /// let t1 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data1, CpuAllocator).unwrap();
817    ///
818    /// let data2: Vec<u8> = vec![1, 2, 3, 4];
819    /// let t2 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data2, CpuAllocator).unwrap();
820    ///
821    /// let t3 = t1.mul(&t2);
822    /// assert_eq!(t3.as_slice(), vec![1, 4, 9, 16]);
823    /// ```
824    pub fn mul(&self, other: &Tensor<T, N, CpuAllocator>) -> Tensor<T, N, CpuAllocator>
825    where
826        T: std::ops::Mul<Output = T> + Clone,
827    {
828        self.element_wise_op(other, |a, b| a.clone() * b.clone())
829            .expect("Tensor dimension mismatch")
830    }
831
832    /// Perform an element-wise division on two tensors.
833    ///
834    /// # Arguments
835    ///
836    /// * `other` - The other tensor to divide.
837    ///
838    /// # Returns
839    ///
840    /// A new `Tensor` instance.
841    ///
842    /// # Example
843    ///
844    /// ```
845    /// use kornia_core::{Tensor, CpuAllocator};
846    ///
847    /// let data1: Vec<u8> = vec![1, 2, 3, 4];
848    /// let t1 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data1, CpuAllocator).unwrap();
849    ///
850    /// let data2: Vec<u8> = vec![1, 2, 3, 4];
851    /// let t2 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data2, CpuAllocator).unwrap();
852    ///
853    /// let t3 = t1.div(&t2);
854    /// assert_eq!(t3.as_slice(), vec![1, 1, 1, 1]);
855    /// ```
856    pub fn div(&self, other: &Tensor<T, N, CpuAllocator>) -> Tensor<T, N, CpuAllocator>
857    where
858        T: std::ops::Div<Output = T> + Clone,
859    {
860        self.element_wise_op(other, |a, b| a.clone() / b.clone())
861            .expect("Tensor dimension mismatch")
862    }
863}
864
865impl<T, const N: usize, A> Clone for Tensor<T, N, A>
866where
867    T: Clone,
868    A: TensorAllocator + Clone + 'static,
869{
870    fn clone(&self) -> Self {
871        Self {
872            storage: self.storage.clone(),
873            shape: self.shape,
874            strides: self.strides,
875        }
876    }
877}
878
879impl<T, const N: usize, A> std::fmt::Display for Tensor<T, N, A>
880where
881    T: std::fmt::Display + std::fmt::LowerExp,
882    A: TensorAllocator + 'static,
883{
884    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
885        let width = self
886            .storage
887            .as_slice()
888            .iter()
889            .map(|v| format!("{v:.4}").len())
890            .max()
891            .unwrap();
892
893        let scientific = width > 8;
894
895        let should_mask: [bool; N] = self.shape.map(|s| s > 8);
896        let mut skip_until = 0;
897
898        for (i, v) in self.storage.as_slice().iter().enumerate() {
899            if i < skip_until {
900                continue;
901            }
902            let mut value = String::new();
903            let mut prefix = String::new();
904            let mut suffix = String::new();
905            let mut separator = ",".to_string();
906            let mut last_size = 1;
907            for (dim, (&size, maskable)) in self.shape.iter().zip(should_mask).enumerate().rev() {
908                let prod = size * last_size;
909                if i % prod == (3 * last_size) && maskable {
910                    let pad = if dim == (N - 1) { 0 } else { dim + 1 };
911                    value = format!("{}...", " ".repeat(pad));
912                    skip_until = i + (size - 4) * last_size;
913                    prefix = "".to_string();
914                    if dim != (N - 1) {
915                        separator = "\n".repeat(N - 1 - dim);
916                    }
917                    break;
918                } else if i % prod == 0 {
919                    prefix.push('[');
920                } else if (i + 1) % prod == 0 {
921                    suffix.push(']');
922                    separator.push('\n');
923                    if dim == 0 {
924                        separator = "".to_string();
925                    }
926                } else {
927                    break;
928                }
929                last_size = prod;
930            }
931            if !prefix.is_empty() {
932                prefix = format!("{prefix:>N$}");
933            }
934
935            if value.is_empty() {
936                value = if scientific {
937                    let num = format!("{v:.4e}");
938                    let (before, after) = num.split_once('e').unwrap();
939                    let after = if let Some(stripped) = after.strip_prefix('-') {
940                        format!("-{:0>2}", &stripped)
941                    } else {
942                        format!("+{:0>2}", &after)
943                    };
944                    format!("{before}e{after}")
945                } else {
946                    let rounded = format!("{v:.4}");
947                    format!("{rounded:>width$}")
948                }
949            };
950            write!(f, "{prefix}{value}{suffix}{separator}",)?;
951        }
952        Ok(())
953    }
954}
955
956#[cfg(test)]
957mod tests {
958    use crate::allocator::CpuAllocator;
959    use crate::tensor::{Tensor, TensorError};
960
961    #[test]
962    fn constructor_1d() -> Result<(), TensorError> {
963        let data: Vec<u8> = vec![1];
964        let t = Tensor::<u8, 1, _>::from_shape_vec([1], data, CpuAllocator)?;
965        assert_eq!(t.shape, [1]);
966        assert_eq!(t.as_slice(), vec![1]);
967        assert_eq!(t.strides, [1]);
968        assert_eq!(t.numel(), 1);
969        Ok(())
970    }
971
972    #[test]
973    fn constructor_2d() -> Result<(), TensorError> {
974        let data: Vec<u8> = vec![1, 2];
975        let t = Tensor::<u8, 2, _>::from_shape_vec([1, 2], data, CpuAllocator)?;
976        assert_eq!(t.shape, [1, 2]);
977        assert_eq!(t.as_slice(), vec![1, 2]);
978        assert_eq!(t.strides, [2, 1]);
979        assert_eq!(t.numel(), 2);
980        Ok(())
981    }
982
983    #[test]
984    fn get_1d() -> Result<(), TensorError> {
985        let data: Vec<u8> = vec![1, 2, 3, 4];
986        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
987        assert_eq!(t.get([0]), Some(&1));
988        assert_eq!(t.get([1]), Some(&2));
989        assert_eq!(t.get([2]), Some(&3));
990        assert_eq!(t.get([3]), Some(&4));
991        assert!(t.get([4]).is_none());
992        Ok(())
993    }
994
995    #[test]
996    fn get_2d() -> Result<(), TensorError> {
997        let data: Vec<u8> = vec![1, 2, 3, 4];
998        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
999        assert_eq!(t.get([0, 0]), Some(&1));
1000        assert_eq!(t.get([0, 1]), Some(&2));
1001        assert_eq!(t.get([1, 0]), Some(&3));
1002        assert_eq!(t.get([1, 1]), Some(&4));
1003        assert!(t.get([2, 0]).is_none());
1004        assert!(t.get([0, 2]).is_none());
1005        Ok(())
1006    }
1007
1008    #[test]
1009    fn get_3d() -> Result<(), TensorError> {
1010        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6];
1011        let t = Tensor::<u8, 3, _>::from_shape_vec([2, 1, 3], data, CpuAllocator)?;
1012        assert_eq!(t.get([0, 0, 0]), Some(&1));
1013        assert_eq!(t.get([0, 0, 1]), Some(&2));
1014        assert_eq!(t.get([0, 0, 2]), Some(&3));
1015        assert_eq!(t.get([1, 0, 0]), Some(&4));
1016        assert_eq!(t.get([1, 0, 1]), Some(&5));
1017        assert_eq!(t.get([1, 0, 2]), Some(&6));
1018        assert!(t.get([2, 0, 0]).is_none());
1019        assert!(t.get([0, 1, 0]).is_none());
1020        assert!(t.get([0, 0, 3]).is_none());
1021        Ok(())
1022    }
1023
1024    #[test]
1025    fn get_checked_1d() -> Result<(), TensorError> {
1026        let data: Vec<u8> = vec![1, 2, 3, 4];
1027        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1028        assert_eq!(*t.get_unchecked([0]), 1);
1029        assert_eq!(*t.get_unchecked([1]), 2);
1030        assert_eq!(*t.get_unchecked([2]), 3);
1031        assert_eq!(*t.get_unchecked([3]), 4);
1032        Ok(())
1033    }
1034
1035    #[test]
1036    fn get_checked_2d() -> Result<(), TensorError> {
1037        let data: Vec<u8> = vec![1, 2, 3, 4];
1038        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1039        assert_eq!(*t.get_unchecked([0, 0]), 1);
1040        assert_eq!(*t.get_unchecked([0, 1]), 2);
1041        assert_eq!(*t.get_unchecked([1, 0]), 3);
1042        assert_eq!(*t.get_unchecked([1, 1]), 4);
1043        Ok(())
1044    }
1045
1046    #[test]
1047    fn add_1d() -> Result<(), TensorError> {
1048        let data1: Vec<u8> = vec![1, 2, 3, 4];
1049        let t1 = Tensor::<u8, 1, _>::from_shape_vec([4], data1, CpuAllocator)?;
1050        let data2: Vec<u8> = vec![1, 2, 3, 4];
1051        let t2 = Tensor::<u8, 1, _>::from_shape_vec([4], data2, CpuAllocator)?;
1052        let t3 = t1.add(&t2);
1053        assert_eq!(t3.as_slice(), vec![2, 4, 6, 8]);
1054        Ok(())
1055    }
1056
1057    #[test]
1058    fn add_2d() -> Result<(), TensorError> {
1059        let data1: Vec<u8> = vec![1, 2, 3, 4];
1060        let t1 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data1, CpuAllocator)?;
1061        let data2: Vec<u8> = vec![1, 2, 3, 4];
1062        let t2 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data2, CpuAllocator)?;
1063        let t3 = t1.add(&t2);
1064        assert_eq!(t3.as_slice(), vec![2, 4, 6, 8]);
1065        Ok(())
1066    }
1067
1068    #[test]
1069    fn add_3d() -> Result<(), TensorError> {
1070        let data1: Vec<u8> = vec![1, 2, 3, 4, 5, 6];
1071        let t1 = Tensor::<u8, 3, _>::from_shape_vec([2, 1, 3], data1, CpuAllocator)?;
1072        let data2: Vec<u8> = vec![1, 2, 3, 4, 5, 6];
1073        let t2 = Tensor::<u8, 3, _>::from_shape_vec([2, 1, 3], data2, CpuAllocator)?;
1074        let t3 = t1.add(&t2);
1075        assert_eq!(t3.as_slice(), vec![2, 4, 6, 8, 10, 12]);
1076        Ok(())
1077    }
1078
1079    #[test]
1080    fn sub_1d() -> Result<(), TensorError> {
1081        let data1: Vec<u8> = vec![1, 2, 3, 4];
1082        let t1 = Tensor::<u8, 1, _>::from_shape_vec([4], data1, CpuAllocator)?;
1083        let data2: Vec<u8> = vec![1, 2, 3, 4];
1084        let t2 = Tensor::<u8, 1, _>::from_shape_vec([4], data2, CpuAllocator)?;
1085        let t3 = t1.sub(&t2);
1086        assert_eq!(t3.as_slice(), vec![0, 0, 0, 0]);
1087        Ok(())
1088    }
1089
1090    #[test]
1091    fn sub_2d() -> Result<(), TensorError> {
1092        let data1: Vec<u8> = vec![1, 2, 3, 4];
1093        let t1 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data1, CpuAllocator)?;
1094        let data2: Vec<u8> = vec![1, 2, 3, 4];
1095        let t2 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data2, CpuAllocator)?;
1096        let t3 = t1.sub(&t2);
1097        assert_eq!(t3.as_slice(), vec![0, 0, 0, 0]);
1098        Ok(())
1099    }
1100
1101    #[test]
1102    fn div_1d() -> Result<(), TensorError> {
1103        let data1: Vec<u8> = vec![1, 2, 3, 4];
1104        let t1 = Tensor::<u8, 1, _>::from_shape_vec([4], data1, CpuAllocator)?;
1105        let data2: Vec<u8> = vec![1, 2, 3, 4];
1106        let t2 = Tensor::<u8, 1, _>::from_shape_vec([4], data2, CpuAllocator)?;
1107        let t3 = t1.div(&t2);
1108        assert_eq!(t3.as_slice(), vec![1, 1, 1, 1]);
1109        Ok(())
1110    }
1111
1112    #[test]
1113    fn div_2d() -> Result<(), TensorError> {
1114        let data1: Vec<u8> = vec![1, 2, 3, 4];
1115        let t1 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data1, CpuAllocator)?;
1116        let data2: Vec<u8> = vec![1, 2, 3, 4];
1117        let t2 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data2, CpuAllocator)?;
1118        let t3 = t1.div(&t2);
1119        assert_eq!(t3.as_slice(), vec![1, 1, 1, 1]);
1120        Ok(())
1121    }
1122
1123    #[test]
1124    fn mul_1d() -> Result<(), TensorError> {
1125        let data1: Vec<u8> = vec![1, 2, 3, 4];
1126        let t1 = Tensor::<u8, 1, _>::from_shape_vec([4], data1, CpuAllocator)?;
1127        let data2: Vec<u8> = vec![1, 2, 3, 4];
1128        let t2 = Tensor::<u8, 1, _>::from_shape_vec([4], data2, CpuAllocator)?;
1129        let t3 = t1.mul(&t2);
1130        assert_eq!(t3.as_slice(), vec![1, 4, 9, 16]);
1131        Ok(())
1132    }
1133
1134    #[test]
1135    fn mul_2d() -> Result<(), TensorError> {
1136        let data1: Vec<u8> = vec![1, 2, 3, 4];
1137        let t1 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data1, CpuAllocator)?;
1138        let data2: Vec<u8> = vec![1, 2, 3, 4];
1139        let t2 = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data2, CpuAllocator)?;
1140        let t3 = t1.mul(&t2);
1141        assert_eq!(t3.as_slice(), vec![1, 4, 9, 16]);
1142        Ok(())
1143    }
1144
1145    #[test]
1146    fn reshape_1d() -> Result<(), TensorError> {
1147        let data: Vec<u8> = vec![1, 2, 3, 4];
1148        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1149
1150        let view = t.reshape([2, 2])?;
1151
1152        assert_eq!(view.shape, [2, 2]);
1153        assert_eq!(view.as_slice(), vec![1, 2, 3, 4]);
1154        assert_eq!(view.strides, [2, 1]);
1155        assert_eq!(view.numel(), 4);
1156        assert_eq!(view.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1157        Ok(())
1158    }
1159
1160    #[test]
1161    fn reshape_2d() -> Result<(), TensorError> {
1162        let data: Vec<u8> = vec![1, 2, 3, 4];
1163        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1164        let t2 = t.reshape([4])?;
1165
1166        assert_eq!(t2.shape, [4]);
1167        assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1168        assert_eq!(t2.strides, [1]);
1169        assert_eq!(t2.numel(), 4);
1170        assert_eq!(t2.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1171        Ok(())
1172    }
1173
1174    #[test]
1175    fn reshape_get_1d() -> Result<(), TensorError> {
1176        let data: Vec<u8> = vec![1, 2, 3, 4];
1177        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1178        let view = t.reshape([2, 2])?;
1179        assert_eq!(*view.get_unchecked([0, 0]), 1);
1180        assert_eq!(*view.get_unchecked([0, 1]), 2);
1181        assert_eq!(*view.get_unchecked([1, 0]), 3);
1182        assert_eq!(*view.get_unchecked([1, 1]), 4);
1183        assert_eq!(view.numel(), 4);
1184        assert_eq!(view.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1185        Ok(())
1186    }
1187
1188    #[test]
1189    fn permute_axes_1d() -> Result<(), TensorError> {
1190        let data: Vec<u8> = vec![1, 2, 3, 4];
1191        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1192        let t2 = t.permute_axes([0]);
1193        assert_eq!(t2.shape, [4]);
1194        assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1195        assert_eq!(t2.strides, [1]);
1196        assert_eq!(t2.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1197        Ok(())
1198    }
1199
1200    #[test]
1201    fn permute_axes_2d() -> Result<(), TensorError> {
1202        let data: Vec<u8> = vec![1, 2, 3, 4];
1203        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1204        let view = t.permute_axes([1, 0]);
1205        assert_eq!(view.shape, [2, 2]);
1206        assert_eq!(*view.get_unchecked([0, 0]), 1u8);
1207        assert_eq!(*view.get_unchecked([1, 0]), 2u8);
1208        assert_eq!(*view.get_unchecked([0, 1]), 3u8);
1209        assert_eq!(*view.get_unchecked([1, 1]), 4u8);
1210        assert_eq!(view.strides, [1, 2]);
1211        assert_eq!(view.as_contiguous().as_slice(), vec![1, 3, 2, 4]);
1212        Ok(())
1213    }
1214
1215    #[test]
1216    fn contiguous_2d() -> Result<(), TensorError> {
1217        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6];
1218        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 3], data, CpuAllocator)?;
1219
1220        let view = t.permute_axes([1, 0]);
1221
1222        let contiguous = view.as_contiguous();
1223
1224        assert_eq!(contiguous.shape, [3, 2]);
1225        assert_eq!(contiguous.strides, [2, 1]);
1226        assert_eq!(contiguous.as_slice(), vec![1, 4, 2, 5, 3, 6]);
1227
1228        Ok(())
1229    }
1230
1231    #[test]
1232    fn zeros_1d() -> Result<(), TensorError> {
1233        let t = Tensor::<u8, 1, _>::zeros([4], CpuAllocator);
1234        assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
1235        Ok(())
1236    }
1237
1238    #[test]
1239    fn zeros_2d() -> Result<(), TensorError> {
1240        let t = Tensor::<u8, 2, _>::zeros([2, 2], CpuAllocator);
1241        assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
1242        Ok(())
1243    }
1244
1245    #[test]
1246    fn map_1d() -> Result<(), TensorError> {
1247        let data: Vec<u8> = vec![1, 2, 3, 4];
1248        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1249        let t2 = t.map(|x| *x + 1);
1250        assert_eq!(t2.as_slice(), vec![2, 3, 4, 5]);
1251        Ok(())
1252    }
1253
1254    #[test]
1255    fn map_2d() -> Result<(), TensorError> {
1256        let data: Vec<u8> = vec![1, 2, 3, 4];
1257        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1258        let t2 = t.map(|x| *x + 1);
1259        assert_eq!(t2.as_slice(), vec![2, 3, 4, 5]);
1260        Ok(())
1261    }
1262
1263    #[test]
1264    fn from_shape_val_1d() -> Result<(), TensorError> {
1265        let t = Tensor::<u8, 1, _>::from_shape_val([4], 0, CpuAllocator);
1266        assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
1267        Ok(())
1268    }
1269
1270    #[test]
1271    fn from_shape_val_2d() -> Result<(), TensorError> {
1272        let t = Tensor::<u8, 2, _>::from_shape_val([2, 2], 1, CpuAllocator);
1273        assert_eq!(t.as_slice(), vec![1, 1, 1, 1]);
1274        Ok(())
1275    }
1276
1277    #[test]
1278    fn from_shape_val_3d() -> Result<(), TensorError> {
1279        let t = Tensor::<u8, 3, _>::from_shape_val([2, 1, 3], 2, CpuAllocator);
1280        assert_eq!(t.as_slice(), vec![2, 2, 2, 2, 2, 2]);
1281        Ok(())
1282    }
1283
1284    #[test]
1285    fn cast_1d() -> Result<(), TensorError> {
1286        let data: Vec<u8> = vec![1, 2, 3, 4];
1287        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1288        let t2 = t.cast::<u16>();
1289        assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1290        Ok(())
1291    }
1292
1293    #[test]
1294    fn cast_2d() -> Result<(), TensorError> {
1295        let data: Vec<u8> = vec![1, 2, 3, 4];
1296        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1297        let t2 = t.cast::<u16>();
1298        assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1299        Ok(())
1300    }
1301
1302    #[test]
1303    fn from_shape_fn_1d() -> Result<(), TensorError> {
1304        let alloc = CpuAllocator;
1305        let t = Tensor::from_shape_fn([3, 3], alloc, |[i, j]| ((1 + i) * (1 + j)) as u8);
1306        assert_eq!(t.as_slice(), vec![1, 2, 3, 2, 4, 6, 3, 6, 9]);
1307        Ok(())
1308    }
1309
1310    #[test]
1311    fn from_shape_fn_2d() -> Result<(), TensorError> {
1312        let alloc = CpuAllocator;
1313        let t = Tensor::from_shape_fn([3, 3], alloc, |[i, j]| ((1 + i) * (1 + j)) as f32);
1314        assert_eq!(
1315            t.as_slice(),
1316            vec![1.0, 2.0, 3.0, 2.0, 4.0, 6.0, 3.0, 6.0, 9.0]
1317        );
1318        Ok(())
1319    }
1320
1321    #[test]
1322    fn from_shape_fn_3d() -> Result<(), TensorError> {
1323        let alloc = CpuAllocator;
1324        let t = Tensor::from_shape_fn([2, 3, 3], alloc, |[x, y, c]| {
1325            ((1 + x) * (1 + y) * (1 + c)) as i16
1326        });
1327        assert_eq!(
1328            t.as_slice(),
1329            vec![1, 2, 3, 2, 4, 6, 3, 6, 9, 2, 4, 6, 4, 8, 12, 6, 12, 18]
1330        );
1331        Ok(())
1332    }
1333
1334    #[test]
1335    fn view_1d() -> Result<(), TensorError> {
1336        let alloc = CpuAllocator;
1337        let data: Vec<u8> = vec![1, 2, 3, 4];
1338        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, alloc)?;
1339        let view = t.view();
1340
1341        // check that the view has the same data
1342        assert_eq!(view.as_slice(), t.as_slice());
1343
1344        // check that the data pointer is the same
1345        assert!(std::ptr::eq(view.as_ptr(), t.as_ptr()));
1346
1347        Ok(())
1348    }
1349
1350    // New tests for added functionality
1351
1352    #[test]
1353    fn powi_and_abs() -> Result<(), TensorError> {
1354        let data: Vec<f32> = vec![-1.0, 2.0, -3.0, 4.0];
1355        let t = Tensor::<f32, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1356
1357        let t_powi = t.powi(2);
1358        assert_eq!(t_powi.as_slice(), &[1.0, 4.0, 9.0, 16.0]);
1359
1360        let t_abs = t.abs();
1361        assert_eq!(t_abs.as_slice(), &[1.0, 2.0, 3.0, 4.0]);
1362
1363        Ok(())
1364    }
1365
1366    #[test]
1367    fn from_slice() -> Result<(), TensorError> {
1368        let data: [u8; 4] = [1, 2, 3, 4];
1369        let t = Tensor::<u8, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1370
1371        assert_eq!(t.shape, [2, 2]);
1372        assert_eq!(t.as_slice(), &[1, 2, 3, 4]);
1373
1374        Ok(())
1375    }
1376
1377    #[test]
1378    fn display_2d() -> Result<(), TensorError> {
1379        let data: [u8; 4] = [1, 2, 3, 4];
1380        let t = Tensor::<u8, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1381        let disp = t.to_string();
1382        let lines = disp.lines().collect::<Vec<_>>();
1383
1384        #[rustfmt::skip]
1385        assert_eq!(lines.as_slice(),
1386        ["[[1,2],",
1387         " [3,4]]"]);
1388        Ok(())
1389    }
1390
1391    #[test]
1392    fn display_3d() -> Result<(), TensorError> {
1393        let data: [u8; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1394        let t = Tensor::<u8, 3, _>::from_shape_slice([2, 3, 2], &data, CpuAllocator)?;
1395        let disp = t.to_string();
1396        let lines = disp.lines().collect::<Vec<_>>();
1397
1398        #[rustfmt::skip]
1399        assert_eq!(lines.as_slice(),
1400        ["[[[ 1, 2],",
1401         "  [ 3, 4],",
1402         "  [ 5, 6]],",
1403         "",
1404         " [[ 7, 8],",
1405         "  [ 9,10],",
1406         "  [11,12]]]"]);
1407        Ok(())
1408    }
1409
1410    #[test]
1411    fn display_float() -> Result<(), TensorError> {
1412        let data: [f32; 4] = [1.00001, 1.00009, 0.99991, 0.99999];
1413        let t = Tensor::<f32, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1414        let disp = t.to_string();
1415        let lines = disp.lines().collect::<Vec<_>>();
1416
1417        #[rustfmt::skip]
1418        assert_eq!(lines.as_slice(),
1419        ["[[1.0000,1.0001],",
1420         " [0.9999,1.0000]]"]);
1421        Ok(())
1422    }
1423
1424    #[test]
1425    fn display_big_float() -> Result<(), TensorError> {
1426        let data: [f32; 4] = [1000.00001, 1.00009, 0.99991, 0.99999];
1427        let t = Tensor::<f32, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1428        let disp = t.to_string();
1429        let lines = disp.lines().collect::<Vec<_>>();
1430
1431        #[rustfmt::skip]
1432        assert_eq!(lines.as_slice(),
1433        ["[[1.0000e+03,1.0001e+00],",
1434         " [9.9991e-01,9.9999e-01]]"]);
1435        Ok(())
1436    }
1437
1438    #[test]
1439    fn display_big_tensor() -> Result<(), TensorError> {
1440        let data: [u8; 1000] = [0; 1000];
1441        let t = Tensor::<u8, 3, _>::from_shape_slice([10, 10, 10], &data, CpuAllocator)?;
1442        let disp = t.to_string();
1443        let lines = disp.lines().collect::<Vec<_>>();
1444
1445        #[rustfmt::skip]
1446        assert_eq!(lines.as_slice(),
1447        ["[[[0,0,0,...,0],",
1448         "  [0,0,0,...,0],",
1449         "  [0,0,0,...,0],",
1450         "  ...",
1451         "  [0,0,0,...,0]],",
1452         "",
1453         " [[0,0,0,...,0],",
1454         "  [0,0,0,...,0],",
1455         "  [0,0,0,...,0],",
1456         "  ...",
1457         "  [0,0,0,...,0]],",
1458         "",
1459         " [[0,0,0,...,0],",
1460         "  [0,0,0,...,0],",
1461         "  [0,0,0,...,0],",
1462         "  ...",
1463         "  [0,0,0,...,0]],",
1464         "",
1465         " ...",
1466         "",
1467         " [[0,0,0,...,0],",
1468         "  [0,0,0,...,0],",
1469         "  [0,0,0,...,0],",
1470         "  ...",
1471         "  [0,0,0,...,0]]]"]);
1472        Ok(())
1473    }
1474
1475    #[test]
1476    fn get_index_unchecked_1d() -> Result<(), TensorError> {
1477        let data: Vec<u8> = vec![1, 2, 3, 4];
1478        let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator)?;
1479        assert_eq!(t.get_index_unchecked(0), [0]);
1480        assert_eq!(t.get_index_unchecked(1), [1]);
1481        assert_eq!(t.get_index_unchecked(2), [2]);
1482        assert_eq!(t.get_index_unchecked(3), [3]);
1483        Ok(())
1484    }
1485
1486    #[test]
1487    fn get_index_unchecked_2d() -> Result<(), TensorError> {
1488        let data: Vec<u8> = vec![1, 2, 3, 4];
1489        let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator)?;
1490        assert_eq!(t.get_index_unchecked(0), [0, 0]);
1491        assert_eq!(t.get_index_unchecked(1), [0, 1]);
1492        assert_eq!(t.get_index_unchecked(2), [1, 0]);
1493        assert_eq!(t.get_index_unchecked(3), [1, 1]);
1494        Ok(())
1495    }
1496
1497    #[test]
1498    fn get_index_unchecked_3d() -> Result<(), TensorError> {
1499        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1500        let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1501        assert_eq!(t.get_index_unchecked(0), [0, 0, 0]);
1502        assert_eq!(t.get_index_unchecked(1), [0, 0, 1]);
1503        assert_eq!(t.get_index_unchecked(2), [0, 0, 2]);
1504        assert_eq!(t.get_index_unchecked(3), [0, 1, 0]);
1505        assert_eq!(t.get_index_unchecked(4), [0, 1, 1]);
1506        assert_eq!(t.get_index_unchecked(5), [0, 1, 2]);
1507        assert_eq!(t.get_index_unchecked(6), [1, 0, 0]);
1508        assert_eq!(t.get_index_unchecked(7), [1, 0, 1]);
1509        assert_eq!(t.get_index_unchecked(8), [1, 0, 2]);
1510        assert_eq!(t.get_index_unchecked(9), [1, 1, 0]);
1511        assert_eq!(t.get_index_unchecked(10), [1, 1, 1]);
1512        assert_eq!(t.get_index_unchecked(11), [1, 1, 2]);
1513        Ok(())
1514    }
1515
1516    #[test]
1517    fn get_index_to_offset_and_back() -> Result<(), TensorError> {
1518        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1519        let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1520        for offset in 0..12 {
1521            assert_eq!(
1522                t.get_iter_offset_unchecked(t.get_index_unchecked(offset)),
1523                offset
1524            );
1525        }
1526        Ok(())
1527    }
1528
1529    #[test]
1530    fn get_offset_to_index_and_back() -> Result<(), TensorError> {
1531        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1532        let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1533        for ind in [
1534            [0, 0, 0],
1535            [0, 0, 1],
1536            [0, 0, 2],
1537            [0, 1, 0],
1538            [0, 1, 1],
1539            [0, 1, 2],
1540            [1, 0, 0],
1541            [1, 0, 1],
1542            [1, 0, 2],
1543            [1, 1, 0],
1544            [1, 1, 1],
1545            [1, 1, 2],
1546        ] {
1547            assert_eq!(t.get_index_unchecked(t.get_iter_offset_unchecked(ind)), ind);
1548        }
1549        Ok(())
1550    }
1551
1552    #[test]
1553    fn get_index_1d() -> Result<(), TensorError> {
1554        let data: Vec<u8> = vec![1, 2, 3, 4];
1555        let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator)?;
1556        assert_eq!(t.get_index(3), Ok([3]));
1557        assert!(t
1558            .get_index(4)
1559            .is_err_and(|x| x == TensorError::IndexOutOfBounds(4)));
1560        Ok(())
1561    }
1562
1563    #[test]
1564    fn get_index_2d() -> Result<(), TensorError> {
1565        let data: Vec<u8> = vec![1, 2, 3, 4];
1566        let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator)?;
1567        assert_eq!(t.get_index_unchecked(3), [1, 1]);
1568        assert!(t
1569            .get_index(4)
1570            .is_err_and(|x| x == TensorError::IndexOutOfBounds(4)));
1571        Ok(())
1572    }
1573
1574    #[test]
1575    fn get_index_3d() -> Result<(), TensorError> {
1576        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1577        let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1578        assert_eq!(t.get_index_unchecked(11), [1, 1, 2]);
1579        assert!(t
1580            .get_index(12)
1581            .is_err_and(|x| x == TensorError::IndexOutOfBounds(12)));
1582        Ok(())
1583    }
1584}