kornia_tensor/
tensor.rs

1use thiserror::Error;
2
3use super::{
4    allocator::{CpuAllocator, TensorAllocator, TensorAllocatorError},
5    storage::TensorStorage,
6    view::TensorView,
7};
8
9/// An error type for tensor operations.
10#[derive(Error, Debug, PartialEq)]
11pub enum TensorError {
12    /// Error when the cast operation fails.
13    #[error("Failed to cast data")]
14    CastError,
15
16    /// The number of elements in the data does not match the shape of the tensor.
17    #[error("The number of elements in the data does not match the shape of the tensor: {0}")]
18    InvalidShape(usize),
19
20    /// Index out of bounds.
21    #[error("Index out of bounds. The index {0} is out of bounds.")]
22    IndexOutOfBounds(usize),
23
24    /// Error with the tensor storage.
25    #[error("Error with the tensor storage: {0}")]
26    StorageError(#[from] TensorAllocatorError),
27
28    /// Dimension mismatch for operations requiring compatible shapes.
29    #[error("Dimension mismatch: {0}")]
30    DimensionMismatch(String),
31
32    /// Unsupported operation for the given data type or tensor configuration.
33    #[error("Unsupported operation: {0}")]
34    UnsupportedOperation(String),
35}
36
37/// Compute the strides from the shape of a tensor.
38///
39/// # Arguments
40///
41/// * `shape` - The shape of the tensor.
42///
43/// # Returns
44///
45/// * `strides` - The strides of the tensor.
46pub fn get_strides_from_shape<const N: usize>(shape: [usize; N]) -> [usize; N] {
47    let mut strides: [usize; N] = [0; N];
48    let mut stride = 1;
49    for i in (0..shape.len()).rev() {
50        strides[i] = stride;
51        stride *= shape[i];
52    }
53    strides
54}
55
56/// A data structure to represent a multi-dimensional tensor.
57///
58/// NOTE: Internally, the data is stored as an `arrow::ScalarBuffer` which represents a contiguous memory
59/// region that can be shared with other buffers and across thread boundaries.
60///
61/// # Attributes
62///
63/// * `storage` - The storage of the tensor.
64/// * `shape` - The shape of the tensor.
65/// * `strides` - The strides of the tensor data in memory.
66///
67/// # Example
68///
69/// ```
70/// use kornia_tensor::{Tensor, CpuAllocator};
71///
72/// let data: Vec<u8> = vec![1, 2, 3, 4];
73/// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator).unwrap();
74/// assert_eq!(t.shape, [2, 2]);
75/// ```
76pub struct Tensor<T, const N: usize, A: TensorAllocator> {
77    /// The storage of the tensor.
78    pub storage: TensorStorage<T, A>,
79    /// The shape of the tensor.
80    pub shape: [usize; N],
81    /// The strides of the tensor data in memory.
82    pub strides: [usize; N],
83}
84
85impl<T, const N: usize, A: TensorAllocator> Tensor<T, N, A> {
86    /// Get the data of the tensor as a slice.
87    ///
88    /// # Returns
89    ///
90    /// A slice containing the data of the tensor.
91    #[inline]
92    pub fn as_slice(&self) -> &[T] {
93        self.storage.as_slice()
94    }
95
96    /// Get the data of the tensor as a mutable slice.
97    ///
98    /// # Returns
99    ///
100    /// A mutable slice containing the data of the tensor.
101    #[inline]
102    pub fn as_slice_mut(&mut self) -> &mut [T] {
103        self.storage.as_mut_slice()
104    }
105
106    /// Get the data of the tensor as a pointer.
107    ///
108    /// # Returns
109    ///
110    /// A pointer to the data of the tensor.
111    #[inline]
112    pub fn as_ptr(&self) -> *const T {
113        self.storage.as_ptr()
114    }
115
116    /// Get the data of the tensor as a mutable pointer.
117    ///
118    /// # Returns
119    ///
120    /// A mutable pointer to the data of the tensor.
121    #[inline]
122    pub fn as_mut_ptr(&mut self) -> *mut T {
123        self.storage.as_mut_ptr()
124    }
125
126    /// Consumes the tensor and returns the underlying vector.
127    ///
128    /// This method destroys the tensor and returns ownership of the underlying data.
129    /// The returned vector will have a length equal to the total number of elements in the tensor.
130    ///
131    #[inline]
132    pub fn into_vec(self) -> Vec<T> {
133        self.storage.into_vec()
134    }
135
136    /// Creates a new `Tensor` with the given shape and data.
137    ///
138    /// # Arguments
139    ///
140    /// * `shape` - An array containing the shape of the tensor.
141    /// * `data` - A vector containing the data of the tensor.
142    /// * `alloc` - The allocator to use.
143    ///
144    /// # Returns
145    ///
146    /// A new `Tensor` instance.
147    ///
148    /// # Errors
149    ///
150    /// If the number of elements in the data does not match the shape of the tensor, an error is returned.
151    ///
152    /// # Example
153    ///
154    /// ```
155    /// use kornia_tensor::{Tensor, CpuAllocator};
156    ///
157    /// let data: Vec<u8> = vec![1, 2, 3, 4];
158    /// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator).unwrap();
159    /// assert_eq!(t.shape, [2, 2]);
160    /// ```
161    pub fn from_shape_vec(shape: [usize; N], data: Vec<T>, alloc: A) -> Result<Self, TensorError> {
162        let numel = shape.iter().product::<usize>();
163        if numel != data.len() {
164            return Err(TensorError::InvalidShape(numel));
165        }
166        let storage = TensorStorage::from_vec(data, alloc);
167        let strides = get_strides_from_shape(shape);
168        Ok(Self {
169            storage,
170            shape,
171            strides,
172        })
173    }
174
175    /// Creates a new `Tensor` with the given shape and slice of data.
176    ///
177    /// # Arguments
178    ///
179    /// * `shape` - An array containing the shape of the tensor.
180    /// * `data` - A slice containing the data of the tensor.
181    /// * `alloc` - The allocator to use.
182    ///
183    /// # Returns
184    ///
185    /// A new `Tensor` instance.
186    ///
187    /// # Errors
188    ///
189    /// If the number of elements in the data does not match the shape of the tensor, an error is returned.
190    pub fn from_shape_slice(shape: [usize; N], data: &[T], alloc: A) -> Result<Self, TensorError>
191    where
192        T: Clone,
193    {
194        let numel = shape.iter().product::<usize>();
195        if numel != data.len() {
196            return Err(TensorError::InvalidShape(numel));
197        }
198        let storage = TensorStorage::from_vec(data.to_vec(), alloc);
199        let strides = get_strides_from_shape(shape);
200        Ok(Self {
201            storage,
202            shape,
203            strides,
204        })
205    }
206
207    /// Creates a new `Tensor` with the given shape and raw parts.
208    ///
209    /// # Arguments
210    ///
211    /// * `shape` - An array containing the shape of the tensor.
212    /// * `data` - A pointer to the data of the tensor.
213    /// * `len` - The length of the data.
214    /// * `alloc` - The allocator to use.
215    ///
216    /// # Safety
217    ///
218    /// The pointer must be non-null and the length must be valid.
219    pub unsafe fn from_raw_parts(
220        shape: [usize; N],
221        data: *const T,
222        len: usize,
223        alloc: A,
224    ) -> Result<Self, TensorError>
225    where
226        T: Clone,
227    {
228        let storage = TensorStorage::from_raw_parts(data, len, alloc);
229        let strides = get_strides_from_shape(shape);
230        Ok(Self {
231            storage,
232            shape,
233            strides,
234        })
235    }
236
237    /// Creates a new `Tensor` with the given shape and a default value.
238    /// Creates a new `Tensor` with the given shape and a default value.
239    ///
240    /// # Arguments
241    ///
242    /// * `shape` - An array containing the shape of the tensor.
243    /// * `value` - The default value to fill the tensor with.
244    ///
245    /// # Returns
246    ///
247    /// A new `Tensor` instance.
248    ///
249    /// # Example
250    ///
251    /// ```
252    /// use kornia_tensor::{Tensor, CpuAllocator};
253    ///
254    /// let t = Tensor::<u8, 1, CpuAllocator>::from_shape_val([4], 0, CpuAllocator);
255    /// assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
256    ///
257    /// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_val([2, 2], 1, CpuAllocator);
258    /// assert_eq!(t.as_slice(), vec![1, 1, 1, 1]);
259    ///
260    /// let t = Tensor::<u8, 3, CpuAllocator>::from_shape_val([2, 1, 3], 2, CpuAllocator);
261    /// assert_eq!(t.as_slice(), vec![2, 2, 2, 2, 2, 2]);
262    /// ```
263    pub fn from_shape_val(shape: [usize; N], value: T, alloc: A) -> Self
264    where
265        T: Clone,
266    {
267        let numel = shape.iter().product::<usize>();
268        let data = vec![value; numel];
269        let storage = TensorStorage::from_vec(data, alloc);
270        let strides = get_strides_from_shape(shape);
271        Self {
272            storage,
273            shape,
274            strides,
275        }
276    }
277
278    /// Create a new `Tensor` with the given shape and a function to generate the data.
279    ///
280    /// The function `f` is called with the index of the element to generate.
281    ///
282    /// # Arguments
283    ///
284    /// * `shape` - An array containing the shape of the tensor.
285    /// * `f` - The function to generate the data.
286    ///
287    /// # Returns
288    ///
289    /// A new `Tensor` instance.
290    ///
291    /// # Example
292    ///
293    /// ```
294    /// use kornia_tensor::{Tensor, CpuAllocator};
295    ///
296    /// let t = Tensor::<u8, 1, CpuAllocator>::from_shape_fn([4], CpuAllocator, |[i]| i as u8);
297    /// assert_eq!(t.as_slice(), vec![0, 1, 2, 3]);
298    ///
299    /// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_fn([2, 2], CpuAllocator, |[i, j]| (i * 2 + j) as u8);
300    /// assert_eq!(t.as_slice(), vec![0, 1, 2, 3]);
301    /// ```
302    pub fn from_shape_fn<F>(shape: [usize; N], alloc: A, f: F) -> Self
303    where
304        F: Fn([usize; N]) -> T,
305    {
306        let numel = shape.iter().product::<usize>();
307        let data: Vec<T> = (0..numel)
308            .map(|i| {
309                let mut index = [0; N];
310                let mut j = i;
311                for k in (0..N).rev() {
312                    index[k] = j % shape[k];
313                    j /= shape[k];
314                }
315                f(index)
316            })
317            .collect();
318        let storage = TensorStorage::from_vec(data, alloc);
319        let strides = get_strides_from_shape(shape);
320        Self {
321            storage,
322            shape,
323            strides,
324        }
325    }
326
327    /// Returns the number of elements in the tensor.
328    ///
329    /// # Returns
330    ///
331    /// The number of elements in the tensor.
332    #[inline]
333    pub fn numel(&self) -> usize {
334        self.storage.len() / std::mem::size_of::<T>()
335    }
336
337    /// Get the offset of the element at the given index.
338    ///
339    /// # Arguments
340    ///
341    /// * `index` - The list of indices to get the element from.
342    ///
343    /// # Returns
344    ///
345    /// The offset of the element at the given index.
346    pub fn get_iter_offset(&self, index: [usize; N]) -> Option<usize> {
347        let mut offset = 0;
348        for ((&idx, dim_size), stride) in index.iter().zip(self.shape).zip(self.strides) {
349            if idx >= dim_size {
350                return None;
351            }
352            offset += idx * stride;
353        }
354        Some(offset)
355    }
356
357    /// Get the offset of the element at the given index without checking dim sizes.
358    ///
359    /// # Arguments
360    ///
361    /// * `index` - The list of indices to get the element from.
362    ///
363    /// # Returns
364    ///
365    /// The offset of the element at the given index.
366    pub fn get_iter_offset_unchecked(&self, index: [usize; N]) -> usize {
367        let mut offset = 0;
368        for (&idx, stride) in index.iter().zip(self.strides) {
369            offset += idx * stride;
370        }
371        offset
372    }
373
374    /// Get the index of the element at the given offset without checking dim sizes. The reverse of `Self::get_iter_offset_unchecked`.
375    ///
376    /// # Arguments
377    ///
378    /// * `offset` - The offset of the element at the given index.
379    ///
380    /// # Returns
381    ///
382    /// The array of indices to get the element from.
383    pub fn get_index_unchecked(&self, offset: usize) -> [usize; N] {
384        let mut idx = [0; N];
385        let mut rem = offset;
386        for (dim_i, s) in self.strides.iter().enumerate() {
387            idx[dim_i] = rem / s;
388            rem = offset % s;
389        }
390
391        idx
392    }
393
394    /// Get the index of the element at the given offset. The reverse of `Self::get_iter_offset`.
395    ///
396    /// # Arguments
397    ///
398    /// * `offset` - The offset of the element at the given index.
399    ///
400    /// # Returns
401    ///
402    /// The array of indices to get the element from.
403    ///
404    /// # Errors
405    ///
406    /// If the offset is out of bounds (>= numel), an error is returned.
407    pub fn get_index(&self, offset: usize) -> Result<[usize; N], TensorError> {
408        if offset >= self.numel() {
409            return Err(TensorError::IndexOutOfBounds(offset));
410        }
411        let idx = self.get_index_unchecked(offset);
412
413        Ok(idx)
414    }
415
416    /// Get the element at the given index without checking if the index is out of bounds.
417    ///
418    /// # Arguments
419    ///
420    /// * `index` - The list of indices to get the element from.
421    ///
422    /// # Returns
423    ///
424    /// A reference to the element at the given index.
425    ///
426    /// # Example
427    ///
428    /// ```
429    /// use kornia_tensor::{Tensor, CpuAllocator};
430    ///
431    /// let data: Vec<u8> = vec![1, 2, 3, 4];
432    ///
433    /// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator).unwrap();
434    /// assert_eq!(*t.get_unchecked([0, 0]), 1);
435    /// assert_eq!(*t.get_unchecked([0, 1]), 2);
436    /// assert_eq!(*t.get_unchecked([1, 0]), 3);
437    /// assert_eq!(*t.get_unchecked([1, 1]), 4);
438    /// ```
439    pub fn get_unchecked(&self, index: [usize; N]) -> &T {
440        let offset = self.get_iter_offset_unchecked(index);
441        unsafe { self.storage.as_slice().get_unchecked(offset) }
442    }
443
444    /// Get the element at the given index, checking if the index is out of bounds.
445    ///
446    /// # Arguments
447    ///
448    /// * `index` - The list of indices to get the element from.
449    ///
450    /// # Returns
451    ///
452    /// A reference to the element at the given index.
453    ///
454    /// # Errors
455    ///
456    /// If the index is out of bounds, an error is returned.
457    ///
458    /// # Example
459    ///
460    /// ```
461    /// use kornia_tensor::{Tensor, CpuAllocator};
462    ///
463    /// let data: Vec<u8> = vec![1, 2, 3, 4];
464    ///
465    /// let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator).unwrap();
466    ///
467    /// assert_eq!(t.get([0, 0]), Some(&1));
468    /// assert_eq!(t.get([0, 1]), Some(&2));
469    /// assert_eq!(t.get([1, 0]), Some(&3));
470    /// assert_eq!(t.get([1, 1]), Some(&4));
471    ///
472    /// assert!(t.get([2, 0]).is_none());
473    /// ```
474    pub fn get(&self, index: [usize; N]) -> Option<&T> {
475        self.get_iter_offset(index)
476            .and_then(|i| self.storage.as_slice().get(i))
477    }
478
479    /// Reshape the tensor to a new shape.
480    ///
481    /// # Arguments
482    ///
483    /// * `shape` - The new shape of the tensor.
484    ///
485    /// # Returns
486    ///
487    /// A new `TensorView` instance.
488    ///
489    /// # Errors
490    ///
491    /// If the number of elements in the new shape does not match the number of elements in the tensor, an error is returned.
492    ///
493    /// # Example
494    ///
495    /// ```
496    /// use kornia_tensor::{Tensor, CpuAllocator};
497    ///
498    /// let data: Vec<u8> = vec![1, 2, 3, 4];
499    ///
500    /// let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator).unwrap();
501    /// let t2 = t.reshape([2, 2]).unwrap();
502    /// assert_eq!(t2.shape, [2, 2]);
503    /// assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
504    /// assert_eq!(t2.strides, [2, 1]);
505    /// assert_eq!(t2.numel(), 4);
506    /// ```
507    pub fn reshape<const M: usize>(
508        &self,
509        shape: [usize; M],
510    ) -> Result<TensorView<'_, T, M, A>, TensorError> {
511        let numel = shape.iter().product::<usize>();
512        if numel != self.storage.len() {
513            return Err(TensorError::DimensionMismatch(format!(
514                "Cannot reshape tensor of shape {:?} with {} elements to shape {:?} with {} elements",
515                self.shape, self.storage.len(), shape, numel
516            )));
517        }
518
519        let strides = get_strides_from_shape(shape);
520
521        Ok(TensorView {
522            storage: &self.storage,
523            shape,
524            strides,
525        })
526    }
527
528    /// Permute the dimensions of the tensor.
529    ///
530    /// The permutation is given as an array of indices, where the value at each index is the new index of the dimension.
531    /// The data is not moved, only the order of the dimensions is changed.
532    ///
533    /// # Arguments
534    ///
535    /// * `axes` - The new order of the dimensions.
536    ///
537    /// # Returns
538    ///
539    /// A view of the tensor with the dimensions permuted.
540    pub fn permute_axes(&self, axes: [usize; N]) -> TensorView<'_, T, N, A> {
541        let mut new_shape = [0; N];
542        let mut new_strides = [0; N];
543        for (i, &axis) in axes.iter().enumerate() {
544            new_shape[i] = self.shape[axis];
545            new_strides[i] = self.strides[axis];
546        }
547
548        TensorView {
549            storage: &self.storage,
550            shape: new_shape,
551            strides: new_strides,
552        }
553    }
554
555    /// Return a view of the tensor.
556    ///
557    /// The view is a reference to the tensor storage with a different shape and strides.
558    ///
559    /// # Returns
560    ///
561    /// A `TensorView` instance.
562    pub fn view(&self) -> TensorView<'_, T, N, A> {
563        TensorView {
564            storage: &self.storage,
565            shape: self.shape,
566            strides: self.strides,
567        }
568    }
569
570    /// Create a new tensor with all elements set to zero.
571    ///
572    /// # Arguments
573    ///
574    /// * `shape` - The shape of the tensor.
575    /// * `alloc` - The allocator to use.
576    ///
577    /// # Returns
578    pub fn zeros(shape: [usize; N], alloc: A) -> Tensor<T, N, A>
579    where
580        T: Clone + num_traits::Zero,
581    {
582        // TODO: add allocator parameter
583        Self::from_shape_val(shape, T::zero(), alloc)
584    }
585
586    /// Apply a function to each element of the tensor.
587    ///
588    /// # Arguments
589    ///
590    /// * `f` - The function to apply to each element.
591    ///
592    /// # Returns
593    ///
594    /// A new `Tensor` instance.
595    ///
596    /// # Example
597    ///
598    /// ```
599    /// use kornia_tensor::{Tensor, CpuAllocator};
600    ///
601    /// let data: Vec<u8> = vec![1, 2, 3, 4];
602    /// let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator).unwrap();
603    ///
604    /// let t2 = t.map(|x| *x + 1);
605    /// assert_eq!(t2.as_slice(), vec![2, 3, 4, 5]);
606    /// ```
607    pub fn map<U, F>(&self, f: F) -> Tensor<U, N, A>
608    where
609        F: Fn(&T) -> U,
610    {
611        let data: Vec<U> = self.as_slice().iter().map(f).collect();
612        let storage = TensorStorage::from_vec(data, self.storage.alloc().clone());
613
614        Tensor {
615            storage,
616            shape: self.shape,
617            strides: self.strides,
618        }
619    }
620
621    /// Checks if the Tensor has a standard contiguous layout according to its `shape` and `strides`.
622    ///
623    /// # Returns
624    ///
625    /// boolean, true if contiguous and false if not
626    ///
627    /// # Examples
628    ///
629    /// ```
630    /// use kornia_tensor::{Tensor, CpuAllocator};
631    /// let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
632    /// let mut t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator).unwrap();
633    /// // arbitrary incorrect stride
634    /// t.strides = [10, 5, 1];
635    /// assert!(!t.is_standard_layout());
636    /// ```
637    pub fn is_standard_layout(&self) -> bool {
638        let mut expected_stride: usize = 1;
639        for (&dim, &stride) in self.shape.iter().rev().zip(self.strides.iter().rev()) {
640            if stride != expected_stride {
641                return false;
642            }
643            expected_stride = expected_stride.saturating_mul(dim);
644        }
645        true
646    }
647
648    /// Copy Tensor storage data into contiguous memory if not already
649    ///
650    /// # Returns
651    ///
652    /// A new Tensor with contiguous storage
653    ///
654    /// # Examples
655    ///
656    /// ```
657    /// use kornia_tensor::{Tensor, CpuAllocator};
658    /// let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
659    /// let mut t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data.clone(), CpuAllocator).unwrap();
660    /// // altering strides
661    /// t.strides = [1, 6, 2];
662    /// assert!(!t.is_standard_layout());
663    /// let t2 = t.to_standard_layout(CpuAllocator);
664    /// match t.to_standard_layout(CpuAllocator) {
665    ///     Ok(t2) => {
666    ///         assert!(t2.is_standard_layout());
667    ///     }
668    ///     Err(e) => {
669    ///         eprintln!("to_standard_layout failed: {}", e);
670    ///     }
671    /// }
672    /// ```
673    pub fn to_standard_layout(&self, alloc: A) -> Result<Self, TensorError>
674    where
675        T: Clone + std::fmt::Debug,
676    {
677        if self.is_standard_layout() {
678            return Ok(self.clone());
679        }
680
681        let total_elems: usize = self.shape.iter().product();
682        let mut flat = Vec::with_capacity(total_elems);
683        let mut idx = [0; N];
684        let slice = self.storage.as_slice();
685
686        for _ in 0..total_elems {
687            let offset = idx
688                .iter()
689                .zip(self.strides.iter())
690                .map(|(&i, &s)| i * s)
691                .sum::<usize>();
692
693            flat.push(slice[offset].clone());
694
695            // increment index
696            for dim in (0..N).rev() {
697                idx[dim] += 1;
698                if idx[dim] < self.shape[dim] {
699                    break;
700                } else {
701                    idx[dim] = 0;
702                }
703            }
704        }
705
706        Tensor::from_shape_vec(self.shape, flat, alloc).map_err(|_| {
707            TensorError::DimensionMismatch(format!(
708                "Cannot construct tensor of shape {:?} with {:?} elements",
709                self.shape, total_elems,
710            ))
711        })
712    }
713
714    /// Cast the tensor to a new type.
715    ///
716    /// # Returns
717    ///
718    /// A new `Tensor` instance.
719    ///
720    /// # Example
721    ///
722    /// ```
723    /// use kornia_tensor::{Tensor, CpuAllocator};
724    ///
725    /// let data: Vec<u8> = vec![1, 2, 3, 4];
726    /// let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator).unwrap();
727    ///
728    /// let t2 = t.cast::<f32>();
729    /// assert_eq!(t2.as_slice(), vec![1.0, 2.0, 3.0, 4.0]);
730    /// ```
731    pub fn cast<U>(&self) -> Tensor<U, N, CpuAllocator>
732    where
733        U: From<T>,
734        T: Clone,
735    {
736        let mut data: Vec<U> = Vec::with_capacity(self.storage.len());
737        self.as_slice().iter().for_each(|x| {
738            data.push(U::from(x.clone()));
739        });
740        let storage = TensorStorage::from_vec(data, CpuAllocator);
741        Tensor {
742            storage,
743            shape: self.shape,
744            strides: self.strides,
745        }
746    }
747
748    /// Perform an element-wise operation on two tensors.
749    ///
750    /// # Arguments
751    ///
752    /// * `other` - The other tensor to perform the operation with.
753    /// * `op` - The operation to perform.
754    ///
755    /// # Returns
756    ///
757    /// A new `Tensor` instance.
758    ///
759    /// # Example
760    ///
761    /// ```
762    /// use kornia_tensor::{Tensor, CpuAllocator};
763    ///
764    /// let data1: Vec<u8> = vec![1, 2, 3, 4];
765    /// let t1 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data1, CpuAllocator).unwrap();
766    ///
767    /// let data2: Vec<u8> = vec![1, 2, 3, 4];
768    /// let t2 = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data2, CpuAllocator).unwrap();
769    ///
770    /// let t3 = t1.element_wise_op(&t2, |a, b| *a + *b).unwrap();
771    /// assert_eq!(t3.as_slice(), vec![2, 4, 6, 8]);
772    ///
773    /// let t4 = t1.element_wise_op(&t2, |a, b| *a - *b).unwrap();
774    /// assert_eq!(t4.as_slice(), vec![0, 0, 0, 0]);
775    ///
776    /// let t5 = t1.element_wise_op(&t2, |a, b| *a * *b).unwrap();
777    /// assert_eq!(t5.as_slice(), vec![1, 4, 9, 16]);
778    ///
779    /// let t6 = t1.element_wise_op(&t2, |a, b| *a / *b).unwrap();
780    /// assert_eq!(t6.as_slice(), vec![1, 1, 1, 1]);
781    /// ```
782    pub fn element_wise_op<F>(
783        &self,
784        other: &Tensor<T, N, CpuAllocator>,
785        op: F,
786    ) -> Result<Tensor<T, N, CpuAllocator>, TensorError>
787    where
788        F: Fn(&T, &T) -> T,
789    {
790        if self.shape != other.shape {
791            return Err(TensorError::DimensionMismatch(format!(
792                "Shapes {:?} and {:?} are not compatible for element-wise operations",
793                self.shape, other.shape
794            )));
795        }
796
797        let data = self
798            .as_slice()
799            .iter()
800            .zip(other.as_slice().iter())
801            .map(|(a, b)| op(a, b))
802            .collect();
803
804        let storage = TensorStorage::from_vec(data, CpuAllocator);
805
806        Ok(Tensor {
807            storage,
808            shape: self.shape,
809            strides: self.strides,
810        })
811    }
812}
813
814impl<T, const N: usize, A> Clone for Tensor<T, N, A>
815where
816    T: Clone,
817    A: TensorAllocator + Clone,
818{
819    fn clone(&self) -> Self {
820        Self {
821            storage: self.storage.clone(),
822            shape: self.shape,
823            strides: self.strides,
824        }
825    }
826}
827
828impl<T, const N: usize, A> std::fmt::Display for Tensor<T, N, A>
829where
830    T: std::fmt::Display + std::fmt::LowerExp,
831    A: TensorAllocator,
832{
833    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
834        let width = self
835            .storage
836            .as_slice()
837            .iter()
838            .map(|v| format!("{v:.4}").len())
839            .max()
840            .unwrap();
841
842        let scientific = width > 8;
843
844        let should_mask: [bool; N] = self.shape.map(|s| s > 8);
845        let mut skip_until = 0;
846
847        for (i, v) in self.storage.as_slice().iter().enumerate() {
848            if i < skip_until {
849                continue;
850            }
851            let mut value = String::new();
852            let mut prefix = String::new();
853            let mut suffix = String::new();
854            let mut separator = ",".to_string();
855            let mut last_size = 1;
856            for (dim, (&size, maskable)) in self.shape.iter().zip(should_mask).enumerate().rev() {
857                let prod = size * last_size;
858                if i % prod == (3 * last_size) && maskable {
859                    let pad = if dim == (N - 1) { 0 } else { dim + 1 };
860                    value = format!("{}...", " ".repeat(pad));
861                    skip_until = i + (size - 4) * last_size;
862                    prefix = "".to_string();
863                    if dim != (N - 1) {
864                        separator = "\n".repeat(N - 1 - dim);
865                    }
866                    break;
867                } else if i % prod == 0 {
868                    prefix.push('[');
869                } else if (i + 1) % prod == 0 {
870                    suffix.push(']');
871                    separator.push('\n');
872                    if dim == 0 {
873                        separator = "".to_string();
874                    }
875                } else {
876                    break;
877                }
878                last_size = prod;
879            }
880            if !prefix.is_empty() {
881                prefix = format!("{prefix:>N$}");
882            }
883
884            if value.is_empty() {
885                value = if scientific {
886                    let num = format!("{v:.4e}");
887                    let (before, after) = num.split_once('e').unwrap();
888                    let after = if let Some(stripped) = after.strip_prefix('-') {
889                        format!("-{:0>2}", &stripped)
890                    } else {
891                        format!("+{:0>2}", &after)
892                    };
893                    format!("{before}e{after}")
894                } else {
895                    let rounded = format!("{v:.4}");
896                    format!("{rounded:>width$}")
897                }
898            };
899            write!(f, "{prefix}{value}{suffix}{separator}",)?;
900        }
901        Ok(())
902    }
903}
904
905#[cfg(test)]
906mod tests {
907    use crate::allocator::CpuAllocator;
908    use crate::tensor::{Tensor, TensorError};
909
910    #[test]
911    fn constructor_1d() -> Result<(), TensorError> {
912        let data: Vec<u8> = vec![1];
913        let t = Tensor::<u8, 1, _>::from_shape_vec([1], data, CpuAllocator)?;
914        assert_eq!(t.shape, [1]);
915        assert_eq!(t.as_slice(), vec![1]);
916        assert_eq!(t.strides, [1]);
917        assert_eq!(t.numel(), 1);
918        Ok(())
919    }
920
921    #[test]
922    fn constructor_2d() -> Result<(), TensorError> {
923        let data: Vec<u8> = vec![1, 2];
924        let t = Tensor::<u8, 2, _>::from_shape_vec([1, 2], data, CpuAllocator)?;
925        assert_eq!(t.shape, [1, 2]);
926        assert_eq!(t.as_slice(), vec![1, 2]);
927        assert_eq!(t.strides, [2, 1]);
928        assert_eq!(t.numel(), 2);
929        Ok(())
930    }
931
932    #[test]
933    fn get_1d() -> Result<(), TensorError> {
934        let data: Vec<u8> = vec![1, 2, 3, 4];
935        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
936        assert_eq!(t.get([0]), Some(&1));
937        assert_eq!(t.get([1]), Some(&2));
938        assert_eq!(t.get([2]), Some(&3));
939        assert_eq!(t.get([3]), Some(&4));
940        assert!(t.get([4]).is_none());
941        Ok(())
942    }
943
944    #[test]
945    fn get_2d() -> Result<(), TensorError> {
946        let data: Vec<u8> = vec![1, 2, 3, 4];
947        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
948        assert_eq!(t.get([0, 0]), Some(&1));
949        assert_eq!(t.get([0, 1]), Some(&2));
950        assert_eq!(t.get([1, 0]), Some(&3));
951        assert_eq!(t.get([1, 1]), Some(&4));
952        assert!(t.get([2, 0]).is_none());
953        assert!(t.get([0, 2]).is_none());
954        Ok(())
955    }
956
957    #[test]
958    fn get_3d() -> Result<(), TensorError> {
959        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6];
960        let t = Tensor::<u8, 3, _>::from_shape_vec([2, 1, 3], data, CpuAllocator)?;
961        assert_eq!(t.get([0, 0, 0]), Some(&1));
962        assert_eq!(t.get([0, 0, 1]), Some(&2));
963        assert_eq!(t.get([0, 0, 2]), Some(&3));
964        assert_eq!(t.get([1, 0, 0]), Some(&4));
965        assert_eq!(t.get([1, 0, 1]), Some(&5));
966        assert_eq!(t.get([1, 0, 2]), Some(&6));
967        assert!(t.get([2, 0, 0]).is_none());
968        assert!(t.get([0, 1, 0]).is_none());
969        assert!(t.get([0, 0, 3]).is_none());
970        Ok(())
971    }
972
973    #[test]
974    fn get_checked_1d() -> Result<(), TensorError> {
975        let data: Vec<u8> = vec![1, 2, 3, 4];
976        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
977        assert_eq!(*t.get_unchecked([0]), 1);
978        assert_eq!(*t.get_unchecked([1]), 2);
979        assert_eq!(*t.get_unchecked([2]), 3);
980        assert_eq!(*t.get_unchecked([3]), 4);
981        Ok(())
982    }
983
984    #[test]
985    fn get_checked_2d() -> Result<(), TensorError> {
986        let data: Vec<u8> = vec![1, 2, 3, 4];
987        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
988        assert_eq!(*t.get_unchecked([0, 0]), 1);
989        assert_eq!(*t.get_unchecked([0, 1]), 2);
990        assert_eq!(*t.get_unchecked([1, 0]), 3);
991        assert_eq!(*t.get_unchecked([1, 1]), 4);
992        Ok(())
993    }
994    #[test]
995    fn reshape_1d() -> Result<(), TensorError> {
996        let data: Vec<u8> = vec![1, 2, 3, 4];
997        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
998
999        let view = t.reshape([2, 2])?;
1000
1001        assert_eq!(view.shape, [2, 2]);
1002        assert_eq!(view.as_slice(), vec![1, 2, 3, 4]);
1003        assert_eq!(view.strides, [2, 1]);
1004        assert_eq!(view.numel(), 4);
1005        assert_eq!(view.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1006        Ok(())
1007    }
1008
1009    #[test]
1010    fn reshape_2d() -> Result<(), TensorError> {
1011        let data: Vec<u8> = vec![1, 2, 3, 4];
1012        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1013        let t2 = t.reshape([4])?;
1014
1015        assert_eq!(t2.shape, [4]);
1016        assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1017        assert_eq!(t2.strides, [1]);
1018        assert_eq!(t2.numel(), 4);
1019        assert_eq!(t2.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1020        Ok(())
1021    }
1022
1023    #[test]
1024    fn reshape_get_1d() -> Result<(), TensorError> {
1025        let data: Vec<u8> = vec![1, 2, 3, 4];
1026        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1027        let view = t.reshape([2, 2])?;
1028        assert_eq!(*view.get_unchecked([0, 0]), 1);
1029        assert_eq!(*view.get_unchecked([0, 1]), 2);
1030        assert_eq!(*view.get_unchecked([1, 0]), 3);
1031        assert_eq!(*view.get_unchecked([1, 1]), 4);
1032        assert_eq!(view.numel(), 4);
1033        assert_eq!(view.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1034        Ok(())
1035    }
1036
1037    #[test]
1038    fn permute_axes_1d() -> Result<(), TensorError> {
1039        let data: Vec<u8> = vec![1, 2, 3, 4];
1040        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1041        let t2 = t.permute_axes([0]);
1042        assert_eq!(t2.shape, [4]);
1043        assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1044        assert_eq!(t2.strides, [1]);
1045        assert_eq!(t2.as_contiguous().as_slice(), vec![1, 2, 3, 4]);
1046        Ok(())
1047    }
1048
1049    #[test]
1050    fn permute_axes_2d() -> Result<(), TensorError> {
1051        let data: Vec<u8> = vec![1, 2, 3, 4];
1052        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1053        let view = t.permute_axes([1, 0]);
1054        assert_eq!(view.shape, [2, 2]);
1055        assert_eq!(*view.get_unchecked([0, 0]), 1u8);
1056        assert_eq!(*view.get_unchecked([1, 0]), 2u8);
1057        assert_eq!(*view.get_unchecked([0, 1]), 3u8);
1058        assert_eq!(*view.get_unchecked([1, 1]), 4u8);
1059        assert_eq!(view.strides, [1, 2]);
1060        assert_eq!(view.as_contiguous().as_slice(), vec![1, 3, 2, 4]);
1061        Ok(())
1062    }
1063
1064    #[test]
1065    fn contiguous_2d() -> Result<(), TensorError> {
1066        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6];
1067        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 3], data, CpuAllocator)?;
1068
1069        let view = t.permute_axes([1, 0]);
1070
1071        let contiguous = view.as_contiguous();
1072
1073        assert_eq!(contiguous.shape, [3, 2]);
1074        assert_eq!(contiguous.strides, [2, 1]);
1075        assert_eq!(contiguous.as_slice(), vec![1, 4, 2, 5, 3, 6]);
1076
1077        Ok(())
1078    }
1079
1080    #[test]
1081    fn zeros_1d() -> Result<(), TensorError> {
1082        let t = Tensor::<u8, 1, _>::zeros([4], CpuAllocator);
1083        assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
1084        Ok(())
1085    }
1086
1087    #[test]
1088    fn zeros_2d() -> Result<(), TensorError> {
1089        let t = Tensor::<u8, 2, _>::zeros([2, 2], CpuAllocator);
1090        assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
1091        Ok(())
1092    }
1093
1094    #[test]
1095    fn map_1d() -> Result<(), TensorError> {
1096        let data: Vec<u8> = vec![1, 2, 3, 4];
1097        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1098        let t2 = t.map(|x| *x + 1);
1099        assert_eq!(t2.as_slice(), vec![2, 3, 4, 5]);
1100        Ok(())
1101    }
1102
1103    #[test]
1104    fn map_2d() -> Result<(), TensorError> {
1105        let data: Vec<u8> = vec![1, 2, 3, 4];
1106        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1107        let t2 = t.map(|x| *x + 1);
1108        assert_eq!(t2.as_slice(), vec![2, 3, 4, 5]);
1109        Ok(())
1110    }
1111
1112    #[test]
1113    fn from_shape_val_1d() -> Result<(), TensorError> {
1114        let t = Tensor::<u8, 1, _>::from_shape_val([4], 0, CpuAllocator);
1115        assert_eq!(t.as_slice(), vec![0, 0, 0, 0]);
1116        Ok(())
1117    }
1118
1119    #[test]
1120    fn from_shape_val_2d() -> Result<(), TensorError> {
1121        let t = Tensor::<u8, 2, _>::from_shape_val([2, 2], 1, CpuAllocator);
1122        assert_eq!(t.as_slice(), vec![1, 1, 1, 1]);
1123        Ok(())
1124    }
1125
1126    #[test]
1127    fn from_shape_val_3d() -> Result<(), TensorError> {
1128        let t = Tensor::<u8, 3, _>::from_shape_val([2, 1, 3], 2, CpuAllocator);
1129        assert_eq!(t.as_slice(), vec![2, 2, 2, 2, 2, 2]);
1130        Ok(())
1131    }
1132
1133    #[test]
1134    fn cast_1d() -> Result<(), TensorError> {
1135        let data: Vec<u8> = vec![1, 2, 3, 4];
1136        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, CpuAllocator)?;
1137        let t2 = t.cast::<u16>();
1138        assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1139        Ok(())
1140    }
1141
1142    #[test]
1143    fn cast_2d() -> Result<(), TensorError> {
1144        let data: Vec<u8> = vec![1, 2, 3, 4];
1145        let t = Tensor::<u8, 2, _>::from_shape_vec([2, 2], data, CpuAllocator)?;
1146        let t2 = t.cast::<u16>();
1147        assert_eq!(t2.as_slice(), vec![1, 2, 3, 4]);
1148        Ok(())
1149    }
1150
1151    #[test]
1152    fn from_shape_fn_1d() -> Result<(), TensorError> {
1153        let alloc = CpuAllocator;
1154        let t = Tensor::from_shape_fn([3, 3], alloc, |[i, j]| ((1 + i) * (1 + j)) as u8);
1155        assert_eq!(t.as_slice(), vec![1, 2, 3, 2, 4, 6, 3, 6, 9]);
1156        Ok(())
1157    }
1158
1159    #[test]
1160    fn from_shape_fn_2d() -> Result<(), TensorError> {
1161        let alloc = CpuAllocator;
1162        let t = Tensor::from_shape_fn([3, 3], alloc, |[i, j]| ((1 + i) * (1 + j)) as f32);
1163        assert_eq!(
1164            t.as_slice(),
1165            vec![1.0, 2.0, 3.0, 2.0, 4.0, 6.0, 3.0, 6.0, 9.0]
1166        );
1167        Ok(())
1168    }
1169
1170    #[test]
1171    fn from_shape_fn_3d() -> Result<(), TensorError> {
1172        let alloc = CpuAllocator;
1173        let t = Tensor::from_shape_fn([2, 3, 3], alloc, |[x, y, c]| {
1174            ((1 + x) * (1 + y) * (1 + c)) as i16
1175        });
1176        assert_eq!(
1177            t.as_slice(),
1178            vec![1, 2, 3, 2, 4, 6, 3, 6, 9, 2, 4, 6, 4, 8, 12, 6, 12, 18]
1179        );
1180        Ok(())
1181    }
1182
1183    #[test]
1184    fn view_1d() -> Result<(), TensorError> {
1185        let alloc = CpuAllocator;
1186        let data: Vec<u8> = vec![1, 2, 3, 4];
1187        let t = Tensor::<u8, 1, _>::from_shape_vec([4], data, alloc)?;
1188        let view = t.view();
1189
1190        // check that the view has the same data
1191        assert_eq!(view.as_slice(), t.as_slice());
1192
1193        // check that the data pointer is the same
1194        assert!(std::ptr::eq(view.as_ptr(), t.as_ptr()));
1195
1196        Ok(())
1197    }
1198
1199    #[test]
1200    fn from_slice() -> Result<(), TensorError> {
1201        let data: [u8; 4] = [1, 2, 3, 4];
1202        let t = Tensor::<u8, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1203
1204        assert_eq!(t.shape, [2, 2]);
1205        assert_eq!(t.as_slice(), &[1, 2, 3, 4]);
1206
1207        Ok(())
1208    }
1209
1210    #[test]
1211    fn display_2d() -> Result<(), TensorError> {
1212        let data: [u8; 4] = [1, 2, 3, 4];
1213        let t = Tensor::<u8, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1214        let disp = t.to_string();
1215        let lines = disp.lines().collect::<Vec<_>>();
1216
1217        #[rustfmt::skip]
1218        assert_eq!(lines.as_slice(),
1219        ["[[1,2],",
1220         " [3,4]]"]);
1221        Ok(())
1222    }
1223
1224    #[test]
1225    fn display_3d() -> Result<(), TensorError> {
1226        let data: [u8; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1227        let t = Tensor::<u8, 3, _>::from_shape_slice([2, 3, 2], &data, CpuAllocator)?;
1228        let disp = t.to_string();
1229        let lines = disp.lines().collect::<Vec<_>>();
1230
1231        #[rustfmt::skip]
1232        assert_eq!(lines.as_slice(),
1233        ["[[[ 1, 2],",
1234         "  [ 3, 4],",
1235         "  [ 5, 6]],",
1236         "",
1237         " [[ 7, 8],",
1238         "  [ 9,10],",
1239         "  [11,12]]]"]);
1240        Ok(())
1241    }
1242
1243    #[test]
1244    fn display_float() -> Result<(), TensorError> {
1245        let data: [f32; 4] = [1.00001, 1.00009, 0.99991, 0.99999];
1246        let t = Tensor::<f32, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1247        let disp = t.to_string();
1248        let lines = disp.lines().collect::<Vec<_>>();
1249
1250        #[rustfmt::skip]
1251        assert_eq!(lines.as_slice(),
1252        ["[[1.0000,1.0001],",
1253         " [0.9999,1.0000]]"]);
1254        Ok(())
1255    }
1256
1257    #[test]
1258    fn display_big_float() -> Result<(), TensorError> {
1259        let data: [f32; 4] = [1000.00001, 1.00009, 0.99991, 0.99999];
1260        let t = Tensor::<f32, 2, _>::from_shape_slice([2, 2], &data, CpuAllocator)?;
1261        let disp = t.to_string();
1262        let lines = disp.lines().collect::<Vec<_>>();
1263
1264        #[rustfmt::skip]
1265        assert_eq!(lines.as_slice(),
1266        ["[[1.0000e+03,1.0001e+00],",
1267         " [9.9991e-01,9.9999e-01]]"]);
1268        Ok(())
1269    }
1270
1271    #[test]
1272    fn display_big_tensor() -> Result<(), TensorError> {
1273        let data: [u8; 1000] = [0; 1000];
1274        let t = Tensor::<u8, 3, _>::from_shape_slice([10, 10, 10], &data, CpuAllocator)?;
1275        let disp = t.to_string();
1276        let lines = disp.lines().collect::<Vec<_>>();
1277
1278        #[rustfmt::skip]
1279        assert_eq!(lines.as_slice(),
1280        ["[[[0,0,0,...,0],",
1281         "  [0,0,0,...,0],",
1282         "  [0,0,0,...,0],",
1283         "  ...",
1284         "  [0,0,0,...,0]],",
1285         "",
1286         " [[0,0,0,...,0],",
1287         "  [0,0,0,...,0],",
1288         "  [0,0,0,...,0],",
1289         "  ...",
1290         "  [0,0,0,...,0]],",
1291         "",
1292         " [[0,0,0,...,0],",
1293         "  [0,0,0,...,0],",
1294         "  [0,0,0,...,0],",
1295         "  ...",
1296         "  [0,0,0,...,0]],",
1297         "",
1298         " ...",
1299         "",
1300         " [[0,0,0,...,0],",
1301         "  [0,0,0,...,0],",
1302         "  [0,0,0,...,0],",
1303         "  ...",
1304         "  [0,0,0,...,0]]]"]);
1305        Ok(())
1306    }
1307
1308    #[test]
1309    fn get_index_unchecked_1d() -> Result<(), TensorError> {
1310        let data: Vec<u8> = vec![1, 2, 3, 4];
1311        let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator)?;
1312        assert_eq!(t.get_index_unchecked(0), [0]);
1313        assert_eq!(t.get_index_unchecked(1), [1]);
1314        assert_eq!(t.get_index_unchecked(2), [2]);
1315        assert_eq!(t.get_index_unchecked(3), [3]);
1316        Ok(())
1317    }
1318
1319    #[test]
1320    fn get_index_unchecked_2d() -> Result<(), TensorError> {
1321        let data: Vec<u8> = vec![1, 2, 3, 4];
1322        let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator)?;
1323        assert_eq!(t.get_index_unchecked(0), [0, 0]);
1324        assert_eq!(t.get_index_unchecked(1), [0, 1]);
1325        assert_eq!(t.get_index_unchecked(2), [1, 0]);
1326        assert_eq!(t.get_index_unchecked(3), [1, 1]);
1327        Ok(())
1328    }
1329
1330    #[test]
1331    fn get_index_unchecked_3d() -> Result<(), TensorError> {
1332        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1333        let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1334        assert_eq!(t.get_index_unchecked(0), [0, 0, 0]);
1335        assert_eq!(t.get_index_unchecked(1), [0, 0, 1]);
1336        assert_eq!(t.get_index_unchecked(2), [0, 0, 2]);
1337        assert_eq!(t.get_index_unchecked(3), [0, 1, 0]);
1338        assert_eq!(t.get_index_unchecked(4), [0, 1, 1]);
1339        assert_eq!(t.get_index_unchecked(5), [0, 1, 2]);
1340        assert_eq!(t.get_index_unchecked(6), [1, 0, 0]);
1341        assert_eq!(t.get_index_unchecked(7), [1, 0, 1]);
1342        assert_eq!(t.get_index_unchecked(8), [1, 0, 2]);
1343        assert_eq!(t.get_index_unchecked(9), [1, 1, 0]);
1344        assert_eq!(t.get_index_unchecked(10), [1, 1, 1]);
1345        assert_eq!(t.get_index_unchecked(11), [1, 1, 2]);
1346        Ok(())
1347    }
1348
1349    #[test]
1350    fn get_index_to_offset_and_back() -> Result<(), TensorError> {
1351        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1352        let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1353        for offset in 0..12 {
1354            assert_eq!(
1355                t.get_iter_offset_unchecked(t.get_index_unchecked(offset)),
1356                offset
1357            );
1358        }
1359        Ok(())
1360    }
1361
1362    #[test]
1363    fn get_offset_to_index_and_back() -> Result<(), TensorError> {
1364        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1365        let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1366        for ind in [
1367            [0, 0, 0],
1368            [0, 0, 1],
1369            [0, 0, 2],
1370            [0, 1, 0],
1371            [0, 1, 1],
1372            [0, 1, 2],
1373            [1, 0, 0],
1374            [1, 0, 1],
1375            [1, 0, 2],
1376            [1, 1, 0],
1377            [1, 1, 1],
1378            [1, 1, 2],
1379        ] {
1380            assert_eq!(t.get_index_unchecked(t.get_iter_offset_unchecked(ind)), ind);
1381        }
1382        Ok(())
1383    }
1384
1385    #[test]
1386    fn get_index_1d() -> Result<(), TensorError> {
1387        let data: Vec<u8> = vec![1, 2, 3, 4];
1388        let t = Tensor::<u8, 1, CpuAllocator>::from_shape_vec([4], data, CpuAllocator)?;
1389        assert_eq!(t.get_index(3), Ok([3]));
1390        assert!(t
1391            .get_index(4)
1392            .is_err_and(|x| x == TensorError::IndexOutOfBounds(4)));
1393        Ok(())
1394    }
1395
1396    #[test]
1397    fn get_index_2d() -> Result<(), TensorError> {
1398        let data: Vec<u8> = vec![1, 2, 3, 4];
1399        let t = Tensor::<u8, 2, CpuAllocator>::from_shape_vec([2, 2], data, CpuAllocator)?;
1400        assert_eq!(t.get_index_unchecked(3), [1, 1]);
1401        assert!(t
1402            .get_index(4)
1403            .is_err_and(|x| x == TensorError::IndexOutOfBounds(4)));
1404        Ok(())
1405    }
1406
1407    #[test]
1408    fn get_index_3d() -> Result<(), TensorError> {
1409        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1410        let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1411        assert_eq!(t.get_index_unchecked(11), [1, 1, 2]);
1412        assert!(t
1413            .get_index(12)
1414            .is_err_and(|x| x == TensorError::IndexOutOfBounds(12)));
1415        Ok(())
1416    }
1417
1418    #[test]
1419    fn from_raw_parts() -> Result<(), TensorError> {
1420        let data: Vec<u8> = vec![1, 2, 3, 4];
1421        let t = unsafe { Tensor::from_raw_parts([2, 2], data.as_ptr(), data.len(), CpuAllocator)? };
1422        std::mem::forget(data);
1423        assert_eq!(t.shape, [2, 2]);
1424        assert_eq!(t.as_slice(), &[1, 2, 3, 4]);
1425        Ok(())
1426    }
1427
1428    #[test]
1429    fn contiguous_tensor_is_standard_layout_true() -> Result<(), TensorError> {
1430        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1431        let t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1432        assert!(t.is_standard_layout());
1433        Ok(())
1434    }
1435
1436    #[test]
1437    fn broken_stride_is_standard_layout_false() -> Result<(), TensorError> {
1438        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1439        let mut t = Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data, CpuAllocator)?;
1440        // arbitrary incorrect stride
1441        t.strides = [10, 5, 1];
1442        assert!(!t.is_standard_layout());
1443        Ok(())
1444    }
1445
1446    #[test]
1447    fn contiguous_tensor_roundtrip() -> Result<(), TensorError> {
1448        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1449        let t =
1450            Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data.clone(), CpuAllocator)?;
1451        assert!(t.is_standard_layout());
1452        match t.to_standard_layout(CpuAllocator) {
1453            Ok(t2) => {
1454                assert!(t2.is_standard_layout());
1455                assert_eq!(t2.storage.as_slice(), data.as_slice());
1456            }
1457            Err(e) => return Err(e),
1458        }
1459        Ok(())
1460    }
1461
1462    #[test]
1463    fn non_contiguous_to_standard_layout() -> Result<(), TensorError> {
1464        let data: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1465        let mut t =
1466            Tensor::<u8, 3, CpuAllocator>::from_shape_vec([2, 2, 3], data.clone(), CpuAllocator)?;
1467        // altering strides
1468        t.strides = [1, 6, 2];
1469        assert!(!t.is_standard_layout());
1470        match t.to_standard_layout(CpuAllocator) {
1471            Ok(t2) => {
1472                assert!(t2.is_standard_layout());
1473            }
1474            Err(e) => return Err(e),
1475        }
1476        Ok(())
1477    }
1478}