Tensor

Struct Tensor 

Source
pub struct Tensor<T, B = Cpu, const DEVICE_ID: usize = 0, A = HptAllocator<B>>
where B: BackendTy + Buffer, A: Allocator,
{ /* private fields */ }
Expand description

Tensor is alias of N-dimensional array.

§Properties

  • data: The pointer to the data.
  • layout: The layout of the tensor. We can get strides, shape, ndim, size from it.
  • parent: The parent tensor of the tensor. parent is always the root tensor (not a view).
  • mem_layout: std::alloc::layout, use for deallocate the memory and find cache in the allocator.

Implementations§

Source§

impl<T, A2, const DEVICE: usize> Tensor<T, Cpu, DEVICE, A2>
where T: CommonBounds + Eval, <T as Eval>::Output: CommonBounds, T::Vec: Eval<Output = <<T as Eval>::Output as TypeCommon>::Vec>, A2: Allocator, A2::Output: AllocatorOutputRetrive,

Source

pub fn is_inf( &self, ) -> Result<Tensor<<T as Eval>::Output, Cpu, DEVICE, A2>, TensorError>

Checks for infinity (inf) values in the tensor.

This method returns a new tensor where each element indicates whether the corresponding element in the input tensor is an infinity value (+inf or -inf). The output tensor will contain boolean-like values (1 for inf, 0 for non-inf).

§Returns

This function returns a Result containing a tensor of type _Tensor<<T as Eval>::Output>, where each element is either 1 (if the corresponding element is inf) or 0 (if it is not).

Source

pub fn is_nan( &self, ) -> Result<Tensor<<T as Eval>::Output, Cpu, DEVICE, A2>, TensorError>

Checks for NaN (Not-a-Number) values in the tensor.

This method returns a new tensor where each element indicates whether the corresponding element in the input tensor is a NaN value. The output tensor will contain boolean-like values (1 for NaN, 0 for non-NaN).

§Returns

This function returns a Result containing a tensor of type _Tensor<<T as Eval>::Output>, where each element is either 1 (if the corresponding element is NaN) or 0 (if it is not).

Source§

impl<T: CommonBounds, const DEVICE: usize, A> Tensor<T, Cpu, DEVICE, A>

Source

pub unsafe fn from_raw<S: Into<Shape>>( data: *mut T, shape: S, ) -> Result<Self, TensorError>

create a new tensor from a raw pointer and a shape.

§Note

It is safer to call forget to get the pointer back because the forget method will track the reference count

§Safety
  • The pointer must be valid for the lifetime of the tensor.
  • The pointer must be aligned and properly sized.
  • The shape must be valid.
§Note

It is the user’s responsibility to manage the lifetime of the data. Hpt won’t drop the data even if the tensor is dropped.

Source

pub fn astype<U>(&self) -> Result<Tensor<U, Cpu, DEVICE, A>, TensorError>
where U: CommonBounds, T: Cast<U>,

cast the tensor to the new type

Source

pub fn allclose( &self, other: &Tensor<T, Cpu, DEVICE, A>, rtol: T, atol: T, ) -> bool
where T: Eval<Output = bool> + Cmp<Output = bool>,

check if two tensors are close to each other

Source

pub unsafe fn forget(self) -> Result<(*mut u8, Layout), TensorError>

Forget the tensor, return the raw pointer of the data

§Safety
  • The user must ensure the tensor is not used after forgetting
Source

pub unsafe fn forget_copy(&self) -> Result<(*mut u8, Layout), TensorError>

clone the tensor and return the cloned tensor data

Source§

impl<T, const DEVICE: usize, Al> Tensor<T, Cpu, DEVICE, Al>
where Al: Allocator,

Source

pub fn new<A>(data: A) -> Self
where A: Into<Tensor<T, Cpu, DEVICE, Al>>,

Creates a new tensor from the provided data.

Trait Implementations§

Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for Complex32

Source§

type Output = Tensor<<Complex<f32> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for Complex64

Source§

type Output = Tensor<<Complex<f64> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for bf16

Source§

type Output = Tensor<<bf16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for f16

Source§

type Output = Tensor<<f16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for f32

Source§

type Output = Tensor<<f32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for f64

Source§

type Output = Tensor<<f64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<&Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Add<&Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds + NormalOut<U>, U: CommonBounds, <T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output>, T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>, A: Allocator, A::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Add<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds + NormalOut<U>, U: CommonBounds, <T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output>, T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>, A: Allocator, A::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Complex<f32>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f32>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Complex32) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Complex<f32>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f32>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Complex32) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Complex<f64>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f64>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Complex64) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Complex<f64>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f64>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Complex64) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for Complex32

Source§

type Output = Tensor<<Complex<f32> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for Complex64

Source§

type Output = Tensor<<Complex<f64> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for bf16

Source§

type Output = Tensor<<bf16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for f16

Source§

type Output = Tensor<<f16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for f32

Source§

type Output = Tensor<<f32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for f64

Source§

type Output = Tensor<<f64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Add<Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds + NormalOut<U>, U: CommonBounds, <T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output>, T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>, A: Allocator, A::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Add<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds + NormalOut<U>, U: CommonBounds, <T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output>, T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>, A: Allocator, A::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<bf16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bf16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: bf16) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<bf16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bf16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: bf16) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<bool> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: bool) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<bool> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: bool) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<f16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: f16) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<f16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: f16) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<f32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: f32) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<f32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: f32) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<f64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: f64) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<f64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: f64) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<i16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: i16) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<i16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: i16) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<i32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: i32) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<i32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: i32) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<i64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: i64) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<i64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: i64) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<i8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: i8) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<i8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: i8) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<u16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: u16) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<u16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: u16) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<u32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: u32) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<u32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: u32) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<u64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: u64) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<u64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: u64) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<u8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: u8) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, const DEVICE: usize, A> Add<u8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: u8) -> Self::Output

Performs the + operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> AddAssign<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

fn add_assign(&mut self, rhs: &Tensor<U, Cpu, DEVICE, A>)

Performs the += operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> AddAssign<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

fn add_assign(&mut self, rhs: Tensor<U, Cpu, DEVICE, A>)

Performs the += operation. Read more
Source§

impl<T, const DEVICE: usize, Al> AdvancedOps for Tensor<T, Cpu, DEVICE, Al>

Source§

type Meta = T

The type of the meta data
Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

The type of the output tensor
Source§

type IndexOutput = Tensor<i64, Cpu, DEVICE, Al>

The type of the index tensor
Source§

fn pad( &self, pads: &[(i64, i64)], val: Self::Meta, ) -> Result<Self::Output, TensorError>

Pads a tensor with a given constant value. For each dimension, adds padding at the start and end as specified by pads. Read more
Source§

fn topk( &self, k: i64, dim: i64, largest: bool, sorted: bool, ) -> Result<(Self::IndexOutput, Self::Output), TensorError>

Returns the k largest or smallest elements along a specified dimension, and their indices. Read more
Source§

fn onehot( &self, depth: usize, axis: i64, true_val: Self::Meta, false_val: Self::Meta, ) -> Result<Self::Output, TensorError>

Creates a one-hot tensor from the input tensor. Read more
Source§

fn scatter( &self, indices: &Self::IndexOutput, axis: i64, src: &Self::Output, ) -> Result<Self::Output, TensorError>

Writes values from src tensor into a new tensor at the indices specified by indices along dimension axis. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<&Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<&Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<&Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<&Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<&Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<&Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<&Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<&Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<&Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitAnd<&Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitAnd<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitAnd<Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitAnd<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<bool> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: bool) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<bool> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: bool) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<i16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: i16) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<i16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: i16) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<i32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: i32) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<i32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: i32) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<i64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: i64) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<i64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: i64) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<i8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: i8) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<i8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: i8) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<u16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: u16) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<u16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: u16) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<u32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: u32) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<u32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: u32) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<u64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: u64) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<u64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: u64) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<u8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: u8) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitAnd<u8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: u8) -> Self::Output

Performs the & operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<&Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<&Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<&Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<&Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<&Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<&Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<&Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<&Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<&Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitOr<&Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitOr<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitOr<Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitOr<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<bool> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: bool) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<bool> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: bool) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<i16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: i16) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<i16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: i16) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<i32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: i32) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<i32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: i32) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<i64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: i64) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<i64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: i64) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<i8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: i8) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<i8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: i8) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<u16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: u16) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<u16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: u16) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<u32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: u32) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<u32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: u32) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<u64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: u64) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<u64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: u64) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<u8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: u8) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitOr<u8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: u8) -> Self::Output

Performs the | operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<&Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<&Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<&Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<&Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<&Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<&Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<&Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<&Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<&Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitXor<&Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitXor<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitXor<Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> BitXor<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<bool> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: bool) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<bool> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: bool) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<i16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: i16) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<i16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: i16) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<i32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: i32) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<i32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: i32) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<i64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: i64) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<i64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: i64) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<i8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: i8) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<i8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: i8) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<u16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: u16) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<u16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: u16) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<u32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: u32) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<u32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: u32) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<u64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: u64) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<u64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: u64) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<u8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: u8) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, const DEVICE: usize, A> BitXor<u8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: u8) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T, B, const DEVICE: usize, A> CPUTensorCreator for Tensor<T, B, DEVICE, A>
where T: CommonBounds + AnyBitPattern, B: BackendTy + Buffer, A: Allocator, Tensor<T, Cpu, DEVICE, A::CpuAllocator>: TensorCreator<Output = Tensor<T, Cpu, DEVICE, A::CpuAllocator>>,

Source§

type Output = Tensor<T, Cpu, DEVICE, <A as Allocator>::CpuAllocator>

the output type of the creator
Source§

type Meta = T

the meta type of the tensor
Source§

fn empty<S: Into<Shape>>(shape: S) -> Result<Self::Output, TensorError>

Creates a tensor with uninitialized elements of the specified shape. Read more
Source§

impl<T: Clone, B, const DEVICE_ID: usize, A> Clone for Tensor<T, B, DEVICE_ID, A>
where B: BackendTy + Buffer + Clone, A: Allocator + Clone,

Source§

fn clone(&self) -> Tensor<T, B, DEVICE_ID, A>

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl<T: CommonBounds, const DEVICE: usize, Al> Concat for Tensor<T, Cpu, DEVICE, Al>
where Al: Allocator + Send + Sync + Clone + 'static, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

the output type of concat
Source§

fn concat( tensors: Vec<Self>, axis: usize, keepdims: bool, ) -> Result<Self::Output, TensorError>

Concatenates a sequence of tensors along the specified axis. Read more
Source§

fn vstack(tensors: Vec<Self>) -> Result<Self::Output, TensorError>

Stacks tensors vertically (along axis 0). This is equivalent to concatenation along the first axis. Read more
Source§

fn hstack(tensors: Vec<Self>) -> Result<Self::Output, TensorError>

Stacks tensors horizontally (along axis 1 for 2D+ tensors, or axis 0 for 1D tensors). Read more
Source§

fn dstack(tensors: Vec<Self>) -> Result<Self::Output, TensorError>

Stacks tensors along the third axis (depth). Input tensors are promoted to 3D if necessary. Read more
Source§

impl<T, const DEVICE: usize, Al> Contiguous for Tensor<T, Cpu, DEVICE, Al>

Source§

fn contiguous(&self) -> Result<Self, TensorError>

Returns the tensor as a contiguous tensor. Read more
Source§

impl<T, const DEVICE: usize> Conv<T> for Tensor<T, Cpu, DEVICE>
where T: CommonBounds + Cast<T> + NormalOut<Output = T> + Conv2dMicroKernel + MatmulMicroKernel + Cast<<T as NormalOutPromote>::Intermediate>, <T as NormalOutPromote>::Intermediate: CommonBounds + Cast<T>, T::Vec: VecTrait<T> + Copy + Send + Sync + NormalOut<Output = T::Vec>, bool: Cast<T>,

Source§

type Output = Tensor<T, Cpu, DEVICE>

the output type of the conv operation
Source§

fn conv2d( &self, kernels: &Self::Output, bias: Option<&Self::Output>, steps: [i64; 2], padding: [(i64, i64); 2], dilation: [i64; 2], post_scalar: Option<fn(T) -> T>, post_vec: Option<fn(T::Vec) -> T::Vec>, ) -> Result<Self::Output, TensorError>

Performs a 2D convolution operation with support for stride, padding, dilation, and activation functions. Read more
Source§

fn conv2d_group( &self, kernels: &Self::Output, bias: Option<&Self::Output>, steps: [i64; 2], padding: [(i64, i64); 2], dilation: [i64; 2], groups: i64, post_scalar: Option<fn(T) -> T>, post_vec: Option<fn(T::Vec) -> T::Vec>, ) -> Result<Self::Output, TensorError>

Performs a grouped 2D convolution operation, which divides input channels into groups and performs separate convolutions on each group. Read more
Source§

fn dwconv2d( &self, kernels: &Self::Output, bias: Option<&Self::Output>, steps: [i64; 2], padding: [(i64, i64); 2], dilation: [i64; 2], post_scalar: Option<fn(T) -> T>, post_vec: Option<fn(T::Vec) -> T::Vec>, ) -> Result<Self::Output, TensorError>

Performs a depthwise 2D convolution operation with support for stride, padding, dilation, and activation functions. Read more
Source§

fn conv2d_transpose( &self, kernels: &Self::Output, steps: [i64; 2], padding: [(i64, i64); 2], output_padding: [i64; 2], dilation: [i64; 2], post_scalar: Option<fn(T) -> T>, post_vec: Option<fn(T::Vec) -> T::Vec>, ) -> Result<Self::Output, TensorError>

Performs a transpose 2D convolution operation with support for stride, padding, dilation, and activation functions. Read more
Source§

impl<T, const DEVICE: usize, A> ConvBatchNorm<T> for Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds + Conv2dMicroKernel + MatmulMicroKernel + FloatOutBinary<Output = T> + FloatOutUnary<Output = T>, T::Vec: FloatOutBinary<Output = T::Vec> + FloatOutUnary<Output = T::Vec>, A: Allocator + Send + Sync, A::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, A>

the output type of the conv operation
Source§

fn batchnorm_conv2d( &self, kernels: &Self::Output, mean: &Self::Output, var: &Self::Output, gamma: &Self::Output, beta: &Self::Output, bias: Option<&Self::Output>, eps: T, steps: [i64; 2], padding: [(i64, i64); 2], dilation: [i64; 2], post_scalar: Option<fn(T) -> T>, post_vec: Option<fn(T::Vec) -> T::Vec>, ) -> Result<Self::Output, TensorError>

Performs a 2D convolution operation followed by batch normalization in a single fused operation for improved performance. Read more
Source§

impl<T: CommonBounds, const DEVICE: usize, Al> CumulativeOps for Tensor<T, Cpu, DEVICE, Al>

Source§

fn cumsum<A: Into<Option<i64>>>(&self, axis: A) -> Result<Self, TensorError>

Computes the cumulative sum of tensor elements along a specified axis. Read more
Source§

fn cumprod<A: Into<Option<i64>>>(&self, axis: A) -> Result<Self, TensorError>

Computes the cumulative product of tensor elements along a specified axis. Read more
Source§

impl<T, B: BackendTy + Buffer, const DEVICE: usize, Al> Debug for Tensor<T, B, DEVICE, Al>

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl<T, const DEVICE: usize, Al> Display for Tensor<T, Cpu, DEVICE, Al>

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for Complex32

Source§

type Output = Tensor<<Complex<f32> as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for Complex64

Source§

type Output = Tensor<<Complex<f64> as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for bf16

Source§

type Output = Tensor<<bf16 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for f16

Source§

type Output = Tensor<<f16 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for f32

Source§

type Output = Tensor<<f32 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for f64

Source§

type Output = Tensor<<f64 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<&Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Div<&Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Div<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Complex<f32>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<Complex<f32>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Complex32) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Complex<f32>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<Complex<f32>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Complex32) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Complex<f64>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<Complex<f64>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Complex64) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Complex<f64>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<Complex<f64>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Complex64) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for Complex32

Source§

type Output = Tensor<<Complex<f32> as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for Complex64

Source§

type Output = Tensor<<Complex<f64> as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for bf16

Source§

type Output = Tensor<<bf16 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for f16

Source§

type Output = Tensor<<f16 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for f32

Source§

type Output = Tensor<<f32 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for f64

Source§

type Output = Tensor<<f64 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as FloatOutBinary<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Div<Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Div<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<bf16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<bf16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: bf16) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<bf16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<bf16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: bf16) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<bool> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: bool) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<bool> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: bool) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<f16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<f16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: f16) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<f16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<f16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: f16) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<f32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<f32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: f32) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<f32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<f32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: f32) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<f64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<f64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: f64) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<f64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<f64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: f64) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<i16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: i16) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<i16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: i16) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<i32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: i32) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<i32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: i32) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<i64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: i64) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<i64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: i64) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<i8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: i8) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<i8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: i8) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<u16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: u16) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<u16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: u16) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<u32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: u32) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<u32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: u32) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<u64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: u64) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<u64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: u64) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<u8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: u8) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, A> Div<u8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: u8) -> Self::Output

Performs the / operation. Read more
Source§

impl<T, const DEVICE: usize, Al> EvalReduce for Tensor<T, Cpu, DEVICE, Al>
where Al: Allocator + Send + Sync + 'static, Al::Output: AllocatorOutputRetrive, T: CommonBounds, _Tensor<T, Cpu, DEVICE, Al>: EvalReduce<BoolOutput = _Tensor<bool, Cpu, DEVICE, Al>>,

Source§

type BoolOutput = Tensor<bool, Cpu, DEVICE, Al>

The boolean tensor type.
Source§

fn all<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::BoolOutput, TensorError>

Test if all elements are true along the specified dimensions Read more
Source§

fn any<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::BoolOutput, TensorError>

Test if any elements are true along the specified dimensions Read more
Source§

impl FFTOps for Tensor<Complex32>

Source§

fn fft( &self, n: usize, axis: i64, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the Fast Fourier Transform (FFT) of the tensor along a specified axis. Read more
Source§

fn ifft( &self, n: usize, axis: i64, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the inverse Fast Fourier Transform (IFFT) of the tensor along a specified axis. Read more
Source§

fn fft2<S: Into<Shape>>( &self, s: S, axis1: i64, axis2: i64, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the 2D Fast Fourier Transform (FFT2) of the tensor. Read more
Source§

fn ifft2<S: Into<Shape>>( &self, s: S, axis1: i64, axis2: i64, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the 2D inverse Fast Fourier Transform (IFFT2) of the tensor. Read more
Source§

fn fftn<A: Into<Axis>, S: Into<Shape>>( &self, s: S, axes: A, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the N-dimensional Fast Fourier Transform (FFTN) of the tensor. Read more
Source§

fn ifftn<A: Into<Axis>, S: Into<Shape>>( &self, s: S, axes: A, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the N-dimensional inverse Fast Fourier Transform (IFFTN) of the tensor. Read more
Source§

impl FFTOps for Tensor<Complex64>

Source§

fn fft( &self, n: usize, axis: i64, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the Fast Fourier Transform (FFT) of the tensor along a specified axis. Read more
Source§

fn ifft( &self, n: usize, axis: i64, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the inverse Fast Fourier Transform (IFFT) of the tensor along a specified axis. Read more
Source§

fn fft2<S: Into<Shape>>( &self, s: S, axis1: i64, axis2: i64, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the 2D Fast Fourier Transform (FFT2) of the tensor. Read more
Source§

fn ifft2<S: Into<Shape>>( &self, s: S, axis1: i64, axis2: i64, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the 2D inverse Fast Fourier Transform (IFFT2) of the tensor. Read more
Source§

fn fftn<A: Into<Axis>, S: Into<Shape>>( &self, s: S, axes: A, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the N-dimensional Fast Fourier Transform (FFTN) of the tensor. Read more
Source§

fn ifftn<A: Into<Axis>, S: Into<Shape>>( &self, s: S, axes: A, norm: Option<&str>, ) -> Result<Self, TensorError>

Computes the N-dimensional inverse Fast Fourier Transform (IFFTN) of the tensor. Read more
Source§

impl<T, B, const DEVICE: usize, Al> FloatBinOps<Tensor<B, Cpu, DEVICE, Al>> for Tensor<T, Cpu, DEVICE, Al>

Source§

type Output = Tensor<<T as FloatOutBinary<B>>::Output, Cpu, DEVICE, Al>

The output tensor type.
Source§

type OutputMeta = <T as FloatOutBinary<B>>::Output

The output tensor data type.
Source§

type InplaceOutput = Tensor<<T as FloatOutBinary<B>>::Output, Cpu, DEVICE, Al>

The inplace output tensor type.
Source§

fn hypot<C>(&self, rhs: C) -> Result<Self::Output, TensorError>
where C: Into<Tensor<B, Cpu, DEVICE, Al>>,

Compute sqrt(x^2 + y^2) for all elements Read more
Source§

fn hypot_<C, U>(&self, rhs: C, out: U) -> Result<Self::Output, TensorError>
where C: Into<Tensor<B, Cpu, DEVICE, Al>>, U: BorrowMut<Self::InplaceOutput>,

hypot with specified output tensor Read more
Source§

fn div_<C, U>(&self, rhs: C, out: U) -> Result<Self::Output, TensorError>
where C: Into<Tensor<B, Cpu, DEVICE, Al>>, U: BorrowMut<Self::InplaceOutput>,

division with specified output tensor Read more
Source§

fn pow<C>(&self, rhs: C) -> Result<Self::Output, TensorError>
where C: Into<Tensor<B, Cpu, DEVICE, Al>>,

Power of self and rhs Read more
Source§

fn pow_<C, U>(&self, rhs: C, out: U) -> Result<Self::Output, TensorError>
where C: Into<Tensor<B, Cpu, DEVICE, Al>>, U: BorrowMut<Self::InplaceOutput>,

Power of self and rhs with specified output tensor Read more
Source§

impl<T, const DEVICE: usize, A> FloatOutPooling for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary>::Output, Cpu, DEVICE, A>

the output type is the same as the input type
Source§

fn avgpool2d<S: Into<Shape>>( &self, kernels_shape: S, steps: [i64; 2], padding: [(i64, i64); 2], dilation: [i64; 2], ) -> Result<Self::Output, TensorError>

Performs a 2D average pooling operation on the input tensor, computing the average value from each window. Read more
Source§

fn adaptive_avgpool2d( &self, output_size: [i64; 2], ) -> Result<Self::Output, TensorError>

Performs an adaptive avg pooling operation on the input tensor, automatically determining the kernel size and stride to produce the specified output dimensions. Read more
Source§

impl<T, const DEVICE: usize, Al> FloatReduce<T> for Tensor<T, Cpu, DEVICE, Al>
where T: CommonBounds, _Tensor<T, Cpu, DEVICE, Al>: FloatReduce<T, Output = _Tensor<<T as FloatOutBinary>::Output, Cpu, DEVICE, Al>>, Al: Allocator + Send + Sync + 'static, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<<T as FloatOutBinary>::Output, Cpu, DEVICE, Al>

The output tensor type.
Source§

fn mean<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Compute the mean of elements along the specified dimensions Read more
Source§

fn reducel2<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Compute the L2 norm (Euclidean norm) along the specified dimensions Read more
Source§

fn reducel3<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Compute the L3 norm (Euclidean norm) along the specified dimensions Read more
Source§

fn logsumexp<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Compute log(sum(exp(x_i))) along the specified dimensions. Read more
Source§

impl<T, const DEVICE: usize, Al> FloatUnaryOps for Tensor<T, Cpu, DEVICE, Al>

Source§

type Output = Tensor<<T as FloatOutUnary>::Output, Cpu, DEVICE, Al>

output tensor type
Source§

type InplaceOutput = Tensor<<T as FloatOutUnary>::Output, Cpu, DEVICE, Al>

output tensor type for inplace operation
Source§

type OutputMeta = <T as FloatOutUnary>::Output

output tensor data type
Source§

fn sin(&self) -> Result<Self::Output, TensorError>

Computes sine element-wise. Read more
Source§

fn cos(&self) -> Result<Self::Output, TensorError>

Computes cosine element-wise. Read more
Source§

fn tan(&self) -> Result<Self::Output, TensorError>

Computes tangent element-wise. Read more
Source§

fn asin(&self) -> Result<Self::Output, TensorError>

Computes arcsine element-wise. Read more
Source§

fn acos(&self) -> Result<Self::Output, TensorError>

Computes arccosine element-wise. Read more
Source§

fn atan(&self) -> Result<Self::Output, TensorError>

Computes arctangent element-wise. Read more
Source§

fn sinh(&self) -> Result<Self::Output, TensorError>

Computes hyperbolic sine element-wise. Read more
Source§

fn cosh(&self) -> Result<Self::Output, TensorError>

Computes hyperbolic cosine element-wise. Read more
Source§

fn tanh(&self) -> Result<Self::Output, TensorError>

Computes hyperbolic tangent element-wise. Read more
Source§

fn asinh(&self) -> Result<Self::Output, TensorError>

Computes the element-wise asinh of the tensor. Read more
Source§

fn acosh(&self) -> Result<Self::Output, TensorError>

Computes the element-wise acosh of the tensor. Read more
Source§

fn atanh(&self) -> Result<Self::Output, TensorError>

Computes the element-wise atanh of the tensor. Read more
Source§

fn sin_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

sin method with output tensor, this method will write the result to the output tensor Read more
Source§

fn cos_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

cos method with output tensor, this method will write the result to the output tensor Read more
Source§

fn tan_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

tan method with output tensor, this method will write the result to the output tensor Read more
Source§

fn asin_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

asin method with output tensor, this method will write the result to the output tensor Read more
Source§

fn acos_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

acos method with output tensor, this method will write the result to the output tensor Read more
Source§

fn atan_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

atan method with output tensor, this method will write the result to the output tensor Read more
Source§

fn sinh_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

sinh method with output tensor, this method will write the result to the output tensor Read more
Source§

fn cosh_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

cosh method with output tensor, this method will write the result to the output tensor Read more
Source§

fn tanh_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

tanh method with output tensor, this method will write the result to the output tensor Read more
Source§

fn asinh_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

asinh method with output tensor, this method will write the result to the output tensor Read more
Source§

fn acosh_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

acosh method with output tensor, this method will write the result to the output tensor Read more
Source§

fn atanh_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

atanh method with output tensor, this method will write the result to the output tensor Read more
Source§

fn exp(&self) -> Result<Self::Output, TensorError>

Computes the element-wise exponential of the tensor.
Source§

fn exp_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

exp method with output tensor, this method will write the result to the output tensor Read more
Source§

fn exp2(&self) -> Result<Self::Output, TensorError>

Computes the element-wise base-2 exponential of the tensor.
Source§

fn exp2_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

exp2 method with output tensor, this method will write the result to the output tensor Read more
Source§

fn sqrt(&self) -> Result<Self::Output, TensorError>

Computes the element-wise square root of the tensor. Read more
Source§

fn sqrt_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

sqrt method with output tensor, this method will write the result to the output tensor Read more
Source§

fn recip(&self) -> Result<Self::Output, TensorError>

Computes the element-wise reciprocal of the tensor. Read more
Source§

fn recip_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

recip method with output tensor, this method will write the result to the output tensor Read more
Source§

fn ln(&self) -> Result<Self::Output, TensorError>

Computes the element-wise natural logarithm of the tensor. Read more
Source§

fn ln_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

ln method with output tensor, this method will write the result to the output tensor Read more
Source§

fn log2(&self) -> Result<Self::Output, TensorError>

Computes the element-wise base-2 logarithm of the tensor. Read more
Source§

fn log2_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

log2 method with output tensor, this method will write the result to the output tensor Read more
Source§

fn log10(&self) -> Result<Self::Output, TensorError>

Computes the element-wise base-10 logarithm of the tensor. Read more
Source§

fn log10_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

log10 method with output tensor, this method will write the result to the output tensor Read more
Source§

fn celu<V: Cast<Self::OutputMeta>>( &self, alpha: V, ) -> Result<Self::Output, TensorError>

Computes the element-wise Continuously Differentiable Exponential Linear Unit (CELU) activation function. Read more
Source§

fn celu_<V, U>(&self, alpha: V, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>, V: Cast<Self::OutputMeta>,

celu method with output tensor, this method will write the result to the output tensor Read more
Source§

fn sigmoid(&self) -> Result<Self::Output, TensorError>

Computes the element-wise sigmoid activation function of the tensor. Read more
Source§

fn sigmoid_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

sigmoid method with output tensor, this method will write the result to the output tensor Read more
Source§

fn elu<V: Cast<Self::OutputMeta>>( &self, alpha: V, ) -> Result<Self::Output, TensorError>

Computes the element-wise Exponential Linear Unit (ELU) activation function. Read more
Source§

fn elu_<V, U>(&self, alpha: V, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>, V: Cast<Self::OutputMeta>,

elu method with output tensor, this method will write the result to the output tensor Read more
Source§

fn erf(&self) -> Result<Self::Output, TensorError>

Computes the element-wise error function (erf) of the tensor. Read more
Source§

fn gelu(&self) -> Result<Self::Output, TensorError>

Computes the element-wise Gaussian Error Linear Unit (GELU) activation function. Read more
Source§

fn gelu_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

gelu method with output tensor, this method will write the result to the output tensor Read more
Source§

fn selu(&self) -> Result<Self::Output, TensorError>

Computes the element-wise Scaled Exponential Linear Unit (SELU) activation function. Read more
Source§

fn selu_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

selu method with output tensor, this method will write the result to the output tensor Read more
Source§

fn hard_sigmoid(&self) -> Result<Self::Output, TensorError>

Computes the element-wise Hard Sigmoid activation function. Read more
Source§

fn hard_sigmoid_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

hard_sigmoid method with output tensor, this method will write the result to the output tensor Read more
Source§

fn hard_swish(&self) -> Result<Self::Output, TensorError>

Computes the element-wise Hard Swish activation function. Read more
Source§

fn hard_swish_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

hard_swish method with output tensor, this method will write the result to the output tensor Read more
Source§

fn softplus(&self) -> Result<Self::Output, TensorError>

Computes the element-wise Softplus activation function. Read more
Source§

fn softplus_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

Softplus method with output tensor, this method will write the result to the output tensor Read more
Source§

fn softsign(&self) -> Result<Self::Output, TensorError>

Computes the element-wise Softsign activation function. Read more
Source§

fn softsign_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

softsign method with output tensor, this method will write the result to the output tensor Read more
Source§

fn mish(&self) -> Result<Self::Output, TensorError>

Computes the element-wise Mish activation function. Read more
Source§

fn mish_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

mish method with output tensor, this method will write the result to the output tensor Read more
Source§

fn cbrt(&self) -> Result<Self::Output, TensorError>

Computes the element-wise cube root of the tensor. Read more
Source§

fn cbrt_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

cbrt method with output tensor, this method will write the result to the output tensor Read more
Source§

fn sincos(&self) -> Result<(Self::Output, Self::Output), TensorError>

Computes sine and cosine element-wise. Read more
Source§

fn exp10(&self) -> Result<Self::Output, TensorError>

Computes the element-wise base-10 exponential of the tensor. Read more
Source§

fn exp10_<U>(&self, out: U) -> Result<Self::InplaceOutput, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

exp10 method with output tensor, this method will write the result to the output tensor Read more
Source§

fn sincos_<U, O>( &self, outs: (U, O), ) -> Result<(Self::Output, Self::Output), TensorError>
where U: BorrowMut<Self::InplaceOutput>, O: BorrowMut<Self::InplaceOutput>,

sincos method with output tensor, this method will write the result to the output tensor Read more
Source§

fn erf_<U>(&self, out: U) -> Result<Self::InplaceOutput, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

erf method with output tensor, this method will write the result to the output tensor Read more
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[Complex<f32>; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[Complex32; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize> From<&[[[[[[[[Complex<f64>; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE>

Source§

fn from(data: &[[[[[[[[Complex64; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[bool; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[bool; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[f16; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[f16; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[f32; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[f32; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[f64; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[f64; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[i16; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[i16; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[i32; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[i32; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[i64; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[i64; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[i8; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[i8; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[u16; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[u16; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[u32; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[u32; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[u64; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[u64; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<&[[[[[[[[u8; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[[u8; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[Complex<f32>; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[Complex32; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize> From<&[[[[[[[Complex<f64>; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE>

Source§

fn from(data: &[[[[[[[Complex64; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[bool; S]; R]; Q]; P]; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[bool; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[f16; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[f16; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[f32; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[f32; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[f64; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[f64; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[i16; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[i16; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[i32; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[i32; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[i64; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[i64; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[i8; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[i8; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[u16; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[u16; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[u32; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[u32; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[u64; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[u64; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<&[[[[[[[u8; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[[u8; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[Complex<f32>; R]; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[Complex32; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize> From<&[[[[[[Complex<f64>; R]; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE>

Source§

fn from(data: &[[[[[[Complex64; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[bool; R]; Q]; P]; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[bool; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[f16; R]; Q]; P]; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[f16; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[f32; R]; Q]; P]; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[f32; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[f64; R]; Q]; P]; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[f64; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[i16; R]; Q]; P]; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[i16; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[i32; R]; Q]; P]; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[i32; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[i64; R]; Q]; P]; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[i64; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[i8; R]; Q]; P]; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[i8; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[u16; R]; Q]; P]; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[u16; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[u32; R]; Q]; P]; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[u32; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[u64; R]; Q]; P]; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[u64; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<&[[[[[[u8; R]; Q]; P]; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[[u8; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[Complex<f32>; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[Complex32; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize> From<&[[[[[Complex<f64>; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE>

Source§

fn from(data: &[[[[[Complex64; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[bool; Q]; P]; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[bool; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[f16; Q]; P]; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[f16; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[f32; Q]; P]; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[f32; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[f64; Q]; P]; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[f64; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[i16; Q]; P]; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[i16; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[i32; Q]; P]; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[i32; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[i64; Q]; P]; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[i64; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[i8; Q]; P]; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[i8; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[u16; Q]; P]; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[u16; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[u32; Q]; P]; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[u32; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[u64; Q]; P]; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[u64; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<&[[[[[u8; Q]; P]; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[[u8; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[Complex<f32>; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[Complex32; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize> From<&[[[[Complex<f64>; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE>

Source§

fn from(data: &[[[[Complex64; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[bool; P]; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[bool; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[f16; P]; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[f16; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[f32; P]; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[f32; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[f64; P]; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[f64; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[i16; P]; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[i16; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[i32; P]; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[i32; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[i64; P]; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[i64; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[i8; P]; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[i8; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[u16; P]; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[u16; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[u32; P]; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[u32; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[u64; P]; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[u64; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<&[[[[u8; P]; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[[u8; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[Complex<f32>; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[Complex32; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize> From<&[[[Complex<f64>; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE>

Source§

fn from(data: &[[[Complex64; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[bool; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: &[[[bool; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[f16; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[f16; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[f32; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[f32; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[f64; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[f64; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[i16; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[i16; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[i32; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[i32; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[i64; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[i64; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[i8; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[i8; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[u16; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: &[[[u16; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[u32; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: &[[[u32; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[u64; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: &[[[u64; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<&[[[u8; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: &[[[u8; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[Complex<f32>; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: &[[Complex32; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize> From<&[[Complex<f64>; M]; N]> for Tensor<Complex64, Cpu, DEVICE>

Source§

fn from(data: &[[Complex64; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[bool; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: &[[bool; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[f16; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: &[[f16; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[f32; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: &[[f32; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[f64; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: &[[f64; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[i16; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: &[[i16; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[i32; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: &[[i32; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[i64; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: &[[i64; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[i8; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: &[[i8; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[u16; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: &[[u16; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[u32; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: &[[u32; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[u64; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: &[[u64; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<&[[u8; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: &[[u8; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[Complex<f32>; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: &[Complex32; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize> From<&[Complex<f64>; N]> for Tensor<Complex64, Cpu, DEVICE>

Source§

fn from(data: &[Complex64; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[bool; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: &[bool; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[f16; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: &[f16; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[f32; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: &[f32; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[f64; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: &[f64; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[i16; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: &[i16; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[i32; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: &[i32; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[i64; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: &[i64; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[i8; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: &[i8; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[u16; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: &[u16; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[u32; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: &[u32; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[u64; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: &[u64; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<&[u8; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: &[u8; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[Complex<f32>; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[Complex32; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[Complex<f64>; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[Complex64; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[bool; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[bool; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[f16; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[f16; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[f32; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[f32; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[f32; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[f32; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[f64; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[f64; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[f64; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[f64; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[i16; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[i16; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[i32; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[i32; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[i64; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[i64; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[i8; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[i8; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[u16; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[u16; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[u32; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[u32; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[u64; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[u64; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const T: usize, const DEVICE: usize, A> From<[[[[[[[[u8; T]; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[[u8; T]; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[Complex<f32>; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[Complex32; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[Complex<f64>; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[Complex64; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[bool; S]; R]; Q]; P]; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[bool; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[f16; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[f16; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[f32; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[f32; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[f32; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[f32; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[f64; S]; R]; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[f64; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[f64; S]; R]; Q]; P]; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[f64; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[i16; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[i16; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[i32; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[i32; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[i64; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[i64; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[i8; S]; R]; Q]; P]; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[i8; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[u16; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[u16; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[u32; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[u32; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[u64; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[u64; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const S: usize, const DEVICE: usize, A> From<[[[[[[[u8; S]; R]; Q]; P]; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[[u8; S]; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[Complex<f32>; R]; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[Complex32; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[Complex<f64>; R]; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[Complex64; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[bool; R]; Q]; P]; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[bool; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[f16; R]; Q]; P]; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[f16; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[f32; R]; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[f32; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[f32; R]; Q]; P]; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[f32; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[f64; R]; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[f64; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[f64; R]; Q]; P]; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[f64; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[i16; R]; Q]; P]; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[i16; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[i32; R]; Q]; P]; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[i32; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[i64; R]; Q]; P]; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[i64; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[i8; R]; Q]; P]; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[i8; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[u16; R]; Q]; P]; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[u16; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[u32; R]; Q]; P]; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[u32; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[u64; R]; Q]; P]; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[u64; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const R: usize, const DEVICE: usize, A> From<[[[[[[u8; R]; Q]; P]; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[[u8; R]; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[Complex<f32>; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[Complex32; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[Complex<f64>; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[Complex64; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[bool; Q]; P]; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[bool; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[f16; Q]; P]; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[f16; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[f32; Q]; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[f32; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[f32; Q]; P]; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[f32; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[f64; Q]; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[f64; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[f64; Q]; P]; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[f64; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[i16; Q]; P]; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[i16; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[i32; Q]; P]; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[i32; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[i64; Q]; P]; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[i64; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[i8; Q]; P]; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[i8; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[u16; Q]; P]; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[u16; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[u32; Q]; P]; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[u32; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[u64; Q]; P]; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[u64; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const Q: usize, const DEVICE: usize, A> From<[[[[[u8; Q]; P]; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: [[[[[u8; Q]; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[Complex<f32>; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[Complex32; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[Complex<f64>; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[Complex64; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[bool; P]; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: [[[[bool; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[f16; P]; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[f16; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[f32; P]; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[f32; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[f32; P]; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[f32; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[f64; P]; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[f64; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[f64; P]; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[f64; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[i16; P]; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[i16; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[i32; P]; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[i32; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[i64; P]; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[i64; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[i8; P]; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: [[[[i8; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[u16; P]; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: [[[[u16; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[u32; P]; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: [[[[u32; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[u64; P]; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: [[[[u64; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const P: usize, const DEVICE: usize, A> From<[[[[u8; P]; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: [[[[u8; P]; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[Complex<f32>; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[Complex32; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[Complex<f64>; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[Complex64; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[bool; O]; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: [[[bool; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[f16; O]; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: [[[f16; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[f32; O]; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[[f32; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[f32; O]; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: [[[f32; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[f64; O]; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[[f64; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[f64; O]; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: [[[f64; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[i16; O]; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: [[[i16; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[i32; O]; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: [[[i32; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[i64; O]; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: [[[i64; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[i8; O]; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: [[[i8; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[u16; O]; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: [[[u16; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[u32; O]; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: [[[u32; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[u64; O]; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: [[[u64; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const O: usize, const DEVICE: usize, A> From<[[[u8; O]; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: [[[u8; O]; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[Complex<f32>; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[Complex32; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[Complex<f64>; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[Complex64; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[bool; M]; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: [[bool; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[f16; M]; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: [[f16; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[f32; M]; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [[f32; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[f32; M]; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: [[f32; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[f64; M]; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [[f64; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[f64; M]; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: [[f64; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[i16; M]; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: [[i16; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[i32; M]; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: [[i32; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[i64; M]; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: [[i64; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[i8; M]; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: [[i8; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[u16; M]; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: [[u16; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[u32; M]; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: [[u32; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[u64; M]; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: [[u64; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const M: usize, const DEVICE: usize, A> From<[[u8; M]; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: [[u8; M]; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[Complex<f32>; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [Complex32; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[Complex<f64>; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [Complex64; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[bool; N]> for Tensor<bool, Cpu, DEVICE, A>

Source§

fn from(data: [bool; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[f16; N]> for Tensor<f16, Cpu, DEVICE, A>

Source§

fn from(data: [f16; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[f32; N]> for Tensor<Complex32, Cpu, DEVICE, A>

Source§

fn from(data: [f32; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[f32; N]> for Tensor<f32, Cpu, DEVICE, A>

Source§

fn from(data: [f32; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[f64; N]> for Tensor<Complex64, Cpu, DEVICE, A>

Source§

fn from(data: [f64; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[f64; N]> for Tensor<f64, Cpu, DEVICE, A>

Source§

fn from(data: [f64; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[i16; N]> for Tensor<i16, Cpu, DEVICE, A>

Source§

fn from(data: [i16; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[i32; N]> for Tensor<i32, Cpu, DEVICE, A>

Source§

fn from(data: [i32; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[i64; N]> for Tensor<i64, Cpu, DEVICE, A>

Source§

fn from(data: [i64; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[i8; N]> for Tensor<i8, Cpu, DEVICE, A>

Source§

fn from(data: [i8; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[u16; N]> for Tensor<u16, Cpu, DEVICE, A>

Source§

fn from(data: [u16; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[u32; N]> for Tensor<u32, Cpu, DEVICE, A>

Source§

fn from(data: [u32; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[u64; N]> for Tensor<u64, Cpu, DEVICE, A>

Source§

fn from(data: [u64; N]) -> Self

Converts to this type from the input type.
Source§

impl<const N: usize, const DEVICE: usize, A> From<[u8; N]> for Tensor<u8, Cpu, DEVICE, A>

Source§

fn from(data: [u8; N]) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<Complex<f32>>> for Tensor<Complex32, Cpu, DEVICE>

Source§

fn from(data: Vec<Complex32>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<Complex<f64>>> for Tensor<Complex64, Cpu, DEVICE>

Source§

fn from(data: Vec<Complex64>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<bool>> for Tensor<bool, Cpu, DEVICE>

Source§

fn from(data: Vec<bool>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<f16>> for Tensor<f16, Cpu, DEVICE>

Source§

fn from(data: Vec<f16>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<f32>> for Tensor<f32, Cpu, DEVICE>

Source§

fn from(data: Vec<f32>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<f64>> for Tensor<f64, Cpu, DEVICE>

Source§

fn from(data: Vec<f64>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<i16>> for Tensor<i16, Cpu, DEVICE>

Source§

fn from(data: Vec<i16>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<i32>> for Tensor<i32, Cpu, DEVICE>

Source§

fn from(data: Vec<i32>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<i64>> for Tensor<i64, Cpu, DEVICE>

Source§

fn from(data: Vec<i64>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<i8>> for Tensor<i8, Cpu, DEVICE>

Source§

fn from(data: Vec<i8>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<u16>> for Tensor<u16, Cpu, DEVICE>

Source§

fn from(data: Vec<u16>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<u32>> for Tensor<u32, Cpu, DEVICE>

Source§

fn from(data: Vec<u32>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<u64>> for Tensor<u64, Cpu, DEVICE>

Source§

fn from(data: Vec<u64>) -> Self

Converts to this type from the input type.
Source§

impl<const DEVICE: usize> From<Vec<u8>> for Tensor<u8, Cpu, DEVICE>

Source§

fn from(data: Vec<u8>) -> Self

Converts to this type from the input type.
Source§

impl<T: CommonBounds, const DEVICE: usize> FromSafeTensors for Tensor<T, Cpu, DEVICE>

Source§

fn from_safe_tensors(data: &SafeTensors<'_>, tensor_name: &str) -> Self

Source§

impl<A, B, A2, const DEVICE: usize> Gemm<&Tensor<B, Cpu, DEVICE, A2>> for Tensor<A, Cpu, DEVICE, A2>

Source§

type Output = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, A2>

The output tensor type.
Source§

type OutputMeta = <A as NormalOut<B>>::Output

The output tensor data type.
Source§

type InplaceOutput = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, A2>

The inplace output tensor type.
Source§

fn gemm( &self, rhs: &Tensor<B, Cpu, DEVICE, A2>, alpha: Self::OutputMeta, beta: Self::OutputMeta, conj_dst: bool, conj_lhs: bool, conj_rhs: bool, ) -> Result<Self::Output, TensorError>

Perform gemm (general matrix multiplication) of two tensors. The behavior depends on the dimensions of the input tensors: Read more
Source§

fn gemm_<U>( &self, rhs: &Tensor<B, Cpu, DEVICE, A2>, alpha: Self::OutputMeta, beta: Self::OutputMeta, conj_dst: bool, conj_lhs: bool, conj_rhs: bool, out: U, ) -> Result<Self::Output, TensorError>
where U: Borrow<Self::InplaceOutput> + BorrowMut<Self::InplaceOutput>,

gemm (general matrix multiplication) with specified output tensor Read more
Source§

impl<A, B, A2, const DEVICE: usize> Gemm<Tensor<B, Cpu, DEVICE, A2>> for Tensor<A, Cpu, DEVICE, A2>

Source§

type Output = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, A2>

The output tensor type.
Source§

type OutputMeta = <A as NormalOut<B>>::Output

The output tensor data type.
Source§

type InplaceOutput = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, A2>

The inplace output tensor type.
Source§

fn gemm( &self, rhs: Tensor<B, Cpu, DEVICE, A2>, alpha: Self::OutputMeta, beta: Self::OutputMeta, conj_dst: bool, conj_lhs: bool, conj_rhs: bool, ) -> Result<Self::Output, TensorError>

Perform gemm (general matrix multiplication) of two tensors. The behavior depends on the dimensions of the input tensors: Read more
Source§

fn gemm_<U>( &self, rhs: Tensor<B, Cpu, DEVICE, A2>, alpha: Self::OutputMeta, beta: Self::OutputMeta, conj_dst: bool, conj_lhs: bool, conj_rhs: bool, out: U, ) -> Result<Self::Output, TensorError>
where U: Borrow<Self::InplaceOutput> + BorrowMut<Self::InplaceOutput>,

gemm (general matrix multiplication) with specified output tensor Read more
Source§

impl<T, const DEVICE: usize, Al> HardMax<T> for Tensor<T, Cpu, DEVICE, Al>
where T: CommonBounds + Cmp<Output = bool>, <T as TypeCommon>::Vec: SimdCmp, <T::Vec as SimdCmp>::Output: IntoVec<T::Vec>, bool: NormalOut<T> + Cast<T>, Al: Allocator + Send + Sync + 'static, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

The type of the output tensor
Source§

fn hardmax(&self, axis: i64) -> Result<Self::Output, TensorError>

Applies the hardmax function to the input tensor along the specified dimension. Read more
Source§

impl<T: CommonBounds + NormalOut<Output = T> + Cmp<T, Output = bool>, const DEVICE: usize> IndexReduce for Tensor<T, Cpu, DEVICE>

Source§

type Output = Tensor<i64, Cpu, DEVICE>

The output tensor type.
Source§

fn argmax<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Return the indices of the maximum values along the specified dimensions Read more
Source§

fn argmin<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Return the indices of the minimum values along the specified dimensions Read more
Source§

impl<const DEVICE: usize, A> Into<Tensor<Complex<f32>, Cpu, DEVICE, A>> for Complex32

Source§

fn into(self) -> Tensor<Complex32, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<Complex<f64>, Cpu, DEVICE, A>> for Complex64

Source§

fn into(self) -> Tensor<Complex64, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<T, const DEVICE: usize, A> Into<Tensor<T, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds, A: Allocator,

Source§

fn into(self) -> Tensor<T, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<bf16, Cpu, DEVICE, A>> for bf16

Source§

fn into(self) -> Tensor<bf16, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<bool, Cpu, DEVICE, A>> for bool

Source§

fn into(self) -> Tensor<bool, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<f16, Cpu, DEVICE, A>> for f16

Source§

fn into(self) -> Tensor<f16, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<f32, Cpu, DEVICE, A>> for f32

Source§

fn into(self) -> Tensor<f32, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<f64, Cpu, DEVICE, A>> for f64

Source§

fn into(self) -> Tensor<f64, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<i16, Cpu, DEVICE, A>> for i16

Source§

fn into(self) -> Tensor<i16, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<i32, Cpu, DEVICE, A>> for i32

Source§

fn into(self) -> Tensor<i32, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<i64, Cpu, DEVICE, A>> for i64

Source§

fn into(self) -> Tensor<i64, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<i8, Cpu, DEVICE, A>> for i8

Source§

fn into(self) -> Tensor<i8, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<u16, Cpu, DEVICE, A>> for u16

Source§

fn into(self) -> Tensor<u16, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<u32, Cpu, DEVICE, A>> for u32

Source§

fn into(self) -> Tensor<u32, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<u64, Cpu, DEVICE, A>> for u64

Source§

fn into(self) -> Tensor<u64, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<const DEVICE: usize, A> Into<Tensor<u8, Cpu, DEVICE, A>> for u8

Source§

fn into(self) -> Tensor<u8, Cpu, DEVICE, A>

Converts this type into the (usually inferred) input type.
Source§

impl<T, const DEVICE: usize, Al> Matmul<&Tensor<T, Cpu, DEVICE, Al>> for Tensor<T, Cpu, DEVICE, Al>
where T: CommonBounds + MatmulMicroKernel, Al: Allocator, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

The output tensor type.
Source§

type OutputMeta = T

The output tensor data type.
Source§

type InplaceOutput = Tensor<T, Cpu, DEVICE, Al>

The inplace output tensor type.
Source§

fn matmul( &self, rhs: &Tensor<T, Cpu, DEVICE, Al>, ) -> Result<Self::Output, TensorError>

Perform matrix multiplication of two tensors. The behavior depends on the dimensions of the input tensors: Read more
Source§

fn matmul_<U>( &self, rhs: &Tensor<T, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: Borrow<Self::InplaceOutput> + BorrowMut<Self::InplaceOutput>,

matrix multiplication with specified output tensor Read more
Source§

impl<T, const DEVICE: usize, Al> Matmul for Tensor<T, Cpu, DEVICE, Al>
where T: CommonBounds + MatmulMicroKernel, Al: Allocator, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

The output tensor type.
Source§

type OutputMeta = T

The output tensor data type.
Source§

type InplaceOutput = Tensor<T, Cpu, DEVICE, Al>

The inplace output tensor type.
Source§

fn matmul( &self, rhs: Tensor<T, Cpu, DEVICE, Al>, ) -> Result<Self::Output, TensorError>

Perform matrix multiplication of two tensors. The behavior depends on the dimensions of the input tensors: Read more
Source§

fn matmul_<U>( &self, rhs: Tensor<T, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: Borrow<Self::InplaceOutput> + BorrowMut<Self::InplaceOutput>,

matrix multiplication with specified output tensor Read more
Source§

impl<T, A, const DEVICE: usize> MatmulPost<&Tensor<T, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds + MatmulMicroKernel, A: Allocator, A::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, A>

The output tensor type.
Source§

type OutputMeta = T

The output tensor data type.
Source§

type InplaceOutput = Tensor<T, Cpu, DEVICE, A>

The inplace output tensor type.
Source§

fn matmul_post( &self, rhs: &Tensor<T, Cpu, DEVICE, A>, post_op: fn(T) -> T, post_op_vec: fn(T::Vec) -> T::Vec, ) -> Result<Self::Output, TensorError>

Same as matmul but will perform post operation before writing final result to the memory. Read more
Source§

fn matmul_post_<U>( &self, rhs: &Tensor<T, Cpu, DEVICE, A>, post_op: fn(T) -> T, post_op_vec: fn(T::Vec) -> T::Vec, out: U, ) -> Result<Self::InplaceOutput, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

matrix multiplication with specified output tensor and post operation Read more
Source§

impl<T, A, const DEVICE: usize> MatmulPost for Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds + MatmulMicroKernel, A: Allocator, A::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, A>

The output tensor type.
Source§

type OutputMeta = T

The output tensor data type.
Source§

type InplaceOutput = Tensor<T, Cpu, DEVICE, A>

The inplace output tensor type.
Source§

fn matmul_post( &self, rhs: Tensor<T, Cpu, DEVICE, A>, post_op: fn(T) -> T, post_op_vec: fn(T::Vec) -> T::Vec, ) -> Result<Self::Output, TensorError>

Same as matmul but will perform post operation before writing final result to the memory. Read more
Source§

fn matmul_post_<U>( &self, rhs: Tensor<T, Cpu, DEVICE, A>, post_op: fn(T) -> T, post_op_vec: fn(T::Vec) -> T::Vec, out: U, ) -> Result<Self::InplaceOutput, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

matrix multiplication with specified output tensor and post operation Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for Complex32

Source§

type Output = Tensor<<Complex<f32> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for Complex64

Source§

type Output = Tensor<<Complex<f64> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for bf16

Source§

type Output = Tensor<<bf16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for f16

Source§

type Output = Tensor<<f16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for f32

Source§

type Output = Tensor<<f32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for f64

Source§

type Output = Tensor<<f64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<&Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Mul<&Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Mul<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Complex<f32>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f32>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Complex32) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Complex<f32>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f32>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Complex32) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Complex<f64>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f64>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Complex64) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Complex<f64>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f64>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Complex64) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for Complex32

Source§

type Output = Tensor<<Complex<f32> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for Complex64

Source§

type Output = Tensor<<Complex<f64> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for bf16

Source§

type Output = Tensor<<bf16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for f16

Source§

type Output = Tensor<<f16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for f32

Source§

type Output = Tensor<<f32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for f64

Source§

type Output = Tensor<<f64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Mul<Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Mul<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<bf16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bf16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: bf16) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<bf16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bf16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: bf16) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<bool> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: bool) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<bool> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: bool) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<f16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: f16) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<f16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: f16) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<f32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: f32) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<f32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: f32) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<f64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: f64) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<f64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: f64) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<i16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: i16) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<i16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: i16) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<i32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: i32) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<i32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: i32) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<i64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: i64) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<i64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: i64) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<i8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: i8) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<i8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: i8) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<u16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: u16) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<u16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: u16) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<u32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: u32) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<u32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: u32) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<u64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: u64) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<u64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: u64) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<u8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: u8) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, const DEVICE: usize, A> Mul<u8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: u8) -> Self::Output

Performs the * operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> MulAssign<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

fn mul_assign(&mut self, rhs: &Tensor<U, Cpu, DEVICE, A>)

Performs the *= operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> MulAssign<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

fn mul_assign(&mut self, rhs: Tensor<U, Cpu, DEVICE, A>)

Performs the *= operation. Read more
Source§

impl<T, const DEVICE: usize, Al> Neg for &Tensor<T, Cpu, DEVICE, Al>

Source§

type Output = Tensor<<T as NormalOut>::Output, Cpu, DEVICE, Al>

The resulting type after applying the - operator.
Source§

fn neg(self) -> Self::Output

Performs the unary - operation. Read more
Source§

impl<T, const DEVICE: usize, Al> Neg for Tensor<T, Cpu, DEVICE, Al>

Source§

type Output = Tensor<<T as NormalOut>::Output, Cpu, DEVICE, Al>

The resulting type after applying the - operator.
Source§

fn neg(self) -> Self::Output

Performs the unary - operation. Read more
Source§

impl<A, B, const DEVICE: usize, Al> NormalBinOps<&Tensor<B, Cpu, DEVICE, Al>> for &Tensor<A, Cpu, DEVICE, Al>
where A: CommonBounds + NormalOut<B>, B: CommonBounds, <A as NormalOut<B>>::Output: CommonBounds + Cast<<A as NormalOut<B>>::Output>, A::Vec: NormalOut<B::Vec, Output = <<A as NormalOut<B>>::Output as TypeCommon>::Vec>, Al: Allocator, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>

The output tensor type.
Source§

type OutputMeta = <A as NormalOut<B>>::Output

The output tensor data type.
Source§

type InplaceOutput = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>

The inplace output tensor type.
Source§

fn add_<U>( &self, rhs: &Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

add with specified output tensor Read more
Source§

fn sub_<U>( &self, rhs: &Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

subtract with specified output tensor Read more
Source§

fn mul_<U>( &self, rhs: &Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

multiply with specified output tensor Read more
Source§

fn rem_<U>( &self, rhs: &Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

rem with specified output tensor Read more
Source§

impl<A, B, const DEVICE: usize, Al> NormalBinOps<&Tensor<B, Cpu, DEVICE, Al>> for Tensor<A, Cpu, DEVICE, Al>
where A: CommonBounds + NormalOut<B>, B: CommonBounds, <A as NormalOut<B>>::Output: CommonBounds + Cast<<A as NormalOut<B>>::Output>, A::Vec: NormalOut<B::Vec, Output = <<A as NormalOut<B>>::Output as TypeCommon>::Vec>, Al: Allocator, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>

The output tensor type.
Source§

type OutputMeta = <A as NormalOut<B>>::Output

The output tensor data type.
Source§

type InplaceOutput = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>

The inplace output tensor type.
Source§

fn add_<U>( &self, rhs: &Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

add with specified output tensor Read more
Source§

fn sub_<U>( &self, rhs: &Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

subtract with specified output tensor Read more
Source§

fn mul_<U>( &self, rhs: &Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

multiply with specified output tensor Read more
Source§

fn rem_<U>( &self, rhs: &Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

rem with specified output tensor Read more
Source§

impl<A, B, const DEVICE: usize, Al> NormalBinOps<Tensor<B, Cpu, DEVICE, Al>> for &Tensor<A, Cpu, DEVICE, Al>
where A: CommonBounds + NormalOut<B>, B: CommonBounds, <A as NormalOut<B>>::Output: CommonBounds + Cast<<A as NormalOut<B>>::Output>, A::Vec: NormalOut<B::Vec, Output = <<A as NormalOut<B>>::Output as TypeCommon>::Vec>, Al: Allocator, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>

The output tensor type.
Source§

type OutputMeta = <A as NormalOut<B>>::Output

The output tensor data type.
Source§

type InplaceOutput = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>

The inplace output tensor type.
Source§

fn add_<U>( &self, rhs: Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

add with specified output tensor Read more
Source§

fn sub_<U>( &self, rhs: Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

subtract with specified output tensor Read more
Source§

fn mul_<U>( &self, rhs: Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

multiply with specified output tensor Read more
Source§

fn rem_<U>( &self, rhs: Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

rem with specified output tensor Read more
Source§

impl<A, B, const DEVICE: usize, Al> NormalBinOps<Tensor<B, Cpu, DEVICE, Al>> for Tensor<A, Cpu, DEVICE, Al>
where A: CommonBounds + NormalOut<B>, B: CommonBounds, <A as NormalOut<B>>::Output: CommonBounds + Cast<<A as NormalOut<B>>::Output>, A::Vec: NormalOut<B::Vec, Output = <<A as NormalOut<B>>::Output as TypeCommon>::Vec>, Al: Allocator, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>

The output tensor type.
Source§

type OutputMeta = <A as NormalOut<B>>::Output

The output tensor data type.
Source§

type InplaceOutput = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>

The inplace output tensor type.
Source§

fn add_<U>( &self, rhs: Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

add with specified output tensor Read more
Source§

fn sub_<U>( &self, rhs: Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

subtract with specified output tensor Read more
Source§

fn mul_<U>( &self, rhs: Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

multiply with specified output tensor Read more
Source§

fn rem_<U>( &self, rhs: Tensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

rem with specified output tensor Read more
Source§

impl<T, const DEVICE: usize, Al> NormalEvalReduce<T> for Tensor<T, Cpu, DEVICE, Al>
where T: CommonBounds + Eval<Output = bool> + Cast<bool>, T::Vec: Eval, <T::Vec as Eval>::Output: SimdSelect<T::Vec>, Al: Allocator + Send + Sync + 'static, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

the output tensor type.
Source§

fn nansum<S: Into<Axis>>( &self, axes: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Compute the sum of elements along the specified dimensions, treating NaNs as zero Read more
Source§

fn nansum_<S: Into<Axis>, O>( &self, axes: S, keep_dims: bool, init_out: bool, out: O, ) -> Result<Self::Output, TensorError>
where O: BorrowMut<Self::Output>,

Compute the sum of elements along the specified dimensions, treating NaNs as zero with out with specified output tensor Read more
Source§

fn nanprod<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Compute the product of elements along the specified dimensions, treating NaNs as one Read more
Source§

impl<T, const DEVICE: usize, A> NormalPooling for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<T, Cpu, DEVICE, A>

the output type is the same as the input type
Source§

fn maxpool2d<S: Into<Shape>>( &self, kernels_shape: S, steps: [i64; 2], padding: [(i64, i64); 2], dilation: [i64; 2], ) -> Result<Self::Output, TensorError>

Performs a 2D max pooling operation on the input tensor, selecting the maximum value from each window. Read more
Source§

fn adaptive_maxpool2d( &self, output_size: [i64; 2], ) -> Result<Self::Output, TensorError>

Performs an adaptive max pooling operation on the input tensor, automatically determining the kernel size and stride to produce the specified output dimensions. Read more
Source§

impl<T: CommonBounds, const DEVICE: usize, Al> NormalReduce<T> for Tensor<T, Cpu, DEVICE, Al>
where Al: Allocator + Send + Sync + 'static, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

The output tensor type.
Source§

fn sum<S: Into<Axis>>( &self, axes: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Compute the sum of elements along the specified dimensions Read more
Source§

fn sum_<S: Into<Axis>, O>( &self, axes: S, keep_dims: bool, init_out: bool, out: O, ) -> Result<Self::Output, TensorError>
where O: BorrowMut<Self::Output>,

Compute the sum of elements along the specified dimensions with specified output tensor Read more
Source§

fn prod<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Compute the product of elements along the specified dimensions Read more
Source§

fn min<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self, TensorError>

Find the minimum of element along the specified dimensions Read more
Source§

fn max<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self, TensorError>

Find the maximum of element along the specified dimensions Read more
Source§

fn reducel1<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Compute the L1 norm along the specified dimensions Read more
Source§

fn sum_square<S: Into<Axis>>( &self, axis: S, keep_dims: bool, ) -> Result<Self::Output, TensorError>

Compute the sum of squares of elements along the specified dimensions Read more
Source§

impl<T, const DEVICE: usize, Al> NormalUaryOps for Tensor<T, Cpu, DEVICE, Al>
where T: CommonBounds, <T as NormalOut>::Output: CommonBounds, T::Vec: NormalOutUnary, _Tensor<<T as NormalOut>::Output, Cpu, DEVICE, Al>: TensorLike<<T as NormalOut>::Output>, Al: Allocator, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<<T as NormalOut>::Output, Cpu, DEVICE, Al>

The output type of the unary operation.
Source§

type InplaceOutput = Tensor<<T as NormalOut>::Output, Cpu, DEVICE, Al>

The output type of the inplace unary operation.
Source§

type OutputMeta = <T as NormalOut>::Output

The output type of the unary operation.
Source§

fn floor(&self) -> Result<Self::Output, TensorError>

Computes the element-wise floor of the tensor. Read more
Source§

fn floor_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

Floor method with output tensor, this method will write the result to the output tensor Read more
Source§

fn square(&self) -> Result<Self::Output, TensorError>

Computes the element-wise square of the tensor. Read more
Source§

fn square_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

Square method with output tensor, this method will write the result to the output tensor Read more
Source§

fn abs(&self) -> Result<Self::Output, TensorError>

Computes the element-wise absolute value of the tensor. Read more
Source§

fn abs_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

abs method with output tensor, this method will write the result to the output tensor Read more
Source§

fn ceil(&self) -> Result<Self::Output, TensorError>

Computes the element-wise ceiling of the tensor. Read more
Source§

fn ceil_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

Ceil method with output tensor, this method will write the result to the output tensor Read more
Source§

fn sign(&self) -> Result<Self::Output, TensorError>

Computes the element-wise sign of the tensor. Read more
Source§

fn sign_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

sign method with output tensor, this method will write the result to the output tensor Read more
Source§

fn clamp( &self, min: Self::OutputMeta, max: Self::OutputMeta, ) -> Result<Self::Output, TensorError>

Clamps (limits) the values of the tensor between the specified min and max. Read more
Source§

fn clamp_<U>( &self, min: Self::OutputMeta, max: Self::OutputMeta, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

clamp method with output tensor, this method will write the result to the output tensor Read more
Source§

fn round(&self) -> Result<Self::Output, TensorError>

Computes the element-wise rounding of the tensor. Read more
Source§

fn round_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

round method with output tensor, this method will write the result to the output tensor Read more
Source§

fn neg(&self) -> Result<Self::Output, TensorError>

Computes the element-wise negation (multiplying by -1) of the tensor. Read more
Source§

fn neg_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

Inplace Version of neg. Read more
Source§

fn relu(&self) -> Result<Self::Output, TensorError>

Computes the element-wise Rectified Linear Unit (ReLU) activation function. Read more
Source§

fn relu_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

relu method with output tensor, this method will write the result to the output tensor Read more
Source§

fn leaky_relu( &self, alpha: Self::OutputMeta, ) -> Result<Self::Output, TensorError>

Computes the element-wise Leaky Rectified Linear Unit (Leaky ReLU) activation function. Read more
Source§

fn leaky_relu_<U>( &self, alpha: Self::OutputMeta, out: U, ) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

leaky_relu method with output tensor, this method will write the result to the output tensor Read more
Source§

fn relu6(&self) -> Result<Self::Output, TensorError>

Computes the element-wise Rectified Linear Unit 6 (ReLU6) activation function. Read more
Source§

fn relu6_<U>(&self, out: U) -> Result<Self::Output, TensorError>
where U: BorrowMut<Self::InplaceOutput>,

relu6 method with output tensor, this method will write the result to the output tensor Read more
Source§

impl<T, const DEVICE: usize, A> NormalizationOps for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as FloatOutBinary>::Output, Cpu, DEVICE, A>

The type of the output tensor
Source§

type OutputMeta = <T as FloatOutBinary>::Output

The type of the output meta
Source§

fn layernorm<S: Into<Shape>>( &self, normalized_shape: S, gamma: Option<&Self::Output>, beta: Option<&Self::Output>, eps: Self::OutputMeta, ) -> Result<Self::Output, TensorError>
where usize: Cast<Self::OutputMeta>,

Applies Layer Normalization over a specified axes. Read more
Source§

fn softmax(&self, axis: i64) -> Result<Self::Output, TensorError>

Applies the softmax function to the input tensor along the specified dimension. The softmax function normalizes the input to a probability distribution, such that each element is in the range [0, 1] and all elements sum to 1. Read more
Source§

fn log_softmax(&self, axis: i64) -> Result<Self::Output, TensorError>

Applies the log-softmax function to the input tensor along the specified dimension. The log-softmax function is equivalent to applying the logarithm to the output of the softmax function, but is more numerically stable when computed directly. Read more
Source§

impl<T, const DEVICE: usize, Al> Not for &Tensor<T, Cpu, DEVICE, Al>

Source§

type Output = Tensor<<T as BitWiseOut>::Output, Cpu, DEVICE, Al>

The resulting type after applying the ! operator.
Source§

fn not(self) -> Self::Output

Performs the unary ! operation. Read more
Source§

impl<T, const DEVICE: usize, Al> Not for Tensor<T, Cpu, DEVICE, Al>

Source§

type Output = Tensor<<T as BitWiseOut>::Output, Cpu, DEVICE, Al>

The resulting type after applying the ! operator.
Source§

fn not(self) -> Self::Output

Performs the unary ! operation. Read more
Source§

impl<T, const DEVICE: usize, Al> PartialEq for Tensor<T, Cpu, DEVICE, Al>
where T: Eval<Output = bool> + Cmp<Output = bool> + CommonBounds, Al: Allocator, Al::Output: AllocatorOutputRetrive,

Source§

fn eq(&self, other: &Tensor<T, Cpu, DEVICE, Al>) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · Source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
Source§

impl<T, const DEVICE: usize, Al> Random for Tensor<T, Cpu, DEVICE, Al>

Source§

type Meta = T

Associated type for meta-information or parameters relevant to distributions.
Source§

fn randn<S: Into<Shape>>(shape: S) -> Result<Self, TensorError>

create a Tensor with data in normal distribution. mean = 0.0, std_dev = 1.0. Read more
Source§

fn randn_like(&self) -> Result<Self, TensorError>

same as randn but the shape will be based on x. Read more
Source§

fn rand<S: Into<Shape>>( shape: S, low: Self::Meta, high: Self::Meta, ) -> Result<Self, TensorError>

create a Tensor with data uniformly distributed between low and high. Read more
Source§

fn rand_like( &self, low: Self::Meta, high: Self::Meta, ) -> Result<Self, TensorError>

same as rand but the shape will be based on x. Read more
Source§

fn beta<S: Into<Shape>>( a: Self::Meta, b: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a beta distribution with parameters alpha and beta. The beta distribution is a continuous probability distribution defined on the interval [0, 1]. Read more
Source§

fn beta_like(&self, a: Self::Meta, b: Self::Meta) -> Result<Self, TensorError>

Same as beta but the shape will be based on x. Creates a Tensor with values drawn from a beta distribution with parameters alpha and beta. Read more
Source§

fn chisquare<S: Into<Shape>>( df: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a chi-square distribution with df degrees of freedom. The chi-square distribution is a continuous probability distribution of the sum of squares of df independent standard normal random variables. Read more
Source§

fn chisquare_like(&self, df: Self::Meta) -> Result<Self, TensorError>

Same as chisquare but the shape will be based on x. Creates a Tensor with values drawn from a chi-square distribution with df degrees of freedom. Read more
Source§

fn exponential<S: Into<Shape>>( lambda: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from an exponential distribution with rate parameter lambda. The exponential distribution describes the time between events in a Poisson point process. Read more
Source§

fn exponential_like(&self, lambda: Self::Meta) -> Result<Self, TensorError>

Same as exponential but the shape will be based on x. Creates a Tensor with values drawn from an exponential distribution with rate parameter lambda. Read more
Source§

fn gamma<S: Into<Shape>>( gamm_shape: Self::Meta, scale: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a gamma distribution with shape parameter shape_param (often denoted as k or α) and scale parameter scale (often denoted as θ). The gamma distribution is a continuous probability distribution that generalizes the exponential distribution. Read more
Source§

fn gamma_like( &self, shape: Self::Meta, scale: Self::Meta, ) -> Result<Self, TensorError>

Same as gamma but the shape will be based on x. Creates a Tensor with values drawn from a gamma distribution with shape parameter shape_param and scale parameter scale. Read more
Source§

fn gumbel<S: Into<Shape>>( mu: Self::Meta, beta: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a Gumbel distribution (also known as the Extreme Value Type I distribution) with location parameter mu and scale parameter beta. The Gumbel distribution is commonly used to model the distribution of extreme values. Read more
Source§

fn gumbel_like( &self, mu: Self::Meta, beta: Self::Meta, ) -> Result<Self, TensorError>

Same as gumbel but the shape will be based on x. Creates a Tensor with values drawn from a Gumbel distribution with location parameter mu and scale parameter beta. Read more
Source§

fn lognormal<S: Into<Shape>>( mean: Self::Meta, std: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a log-normal distribution. A random variable is log-normally distributed if the logarithm of the random variable is normally distributed. The parameters mean and std are the mean and standard deviation of the underlying normal distribution. Read more
Source§

fn lognormal_like( &self, mean: Self::Meta, std: Self::Meta, ) -> Result<Self, TensorError>

Same as lognormal but the shape will be based on x. Creates a Tensor with values drawn from a log-normal distribution with parameters mean and std of the underlying normal distribution. Read more
Source§

fn normal_gaussian<S: Into<Shape>>( mean: Self::Meta, std: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a normal (Gaussian) distribution with specified mean and standard deviation. The normal distribution is a continuous probability distribution that is symmetric around its mean, showing the familiar bell-shaped curve. Read more
Source§

fn normal_gaussian_like( &self, mean: Self::Meta, std: Self::Meta, ) -> Result<Self, TensorError>

Same as normal_gaussian but the shape will be based on x. Creates a Tensor with values drawn from a normal distribution with specified mean and standard deviation. Read more
Source§

fn pareto<S: Into<Shape>>( pareto_shape: Self::Meta, a: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a Pareto distribution. The Pareto distribution is a power-law probability distribution often used to describe the distribution of wealth, population sizes, and many other natural and social phenomena. Read more
Source§

fn pareto_like( &self, pareto_shape: Self::Meta, a: Self::Meta, ) -> Result<Self, TensorError>

Same as pareto but the shape will be based on x. Creates a Tensor with values drawn from a Pareto distribution with specified scale and shape parameters. Read more
Source§

fn poisson<S: Into<Shape>>( lambda: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a Poisson distribution. The Poisson distribution is a discrete probability distribution that expresses the probability of a given number of events occurring in a fixed interval of time or space, assuming these events occur with a known constant mean rate (λ) and independently of the time since the last event. Read more
Source§

fn poisson_like(&self, lambda: Self::Meta) -> Result<Self, TensorError>

Same as poisson but the shape will be based on x. Creates a Tensor with values drawn from a Poisson distribution with specified rate parameter. Read more
Source§

fn weibull<S: Into<Shape>>( a: Self::Meta, b: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a Weibull distribution. The Weibull distribution is a continuous probability distribution commonly used in reliability engineering, survival analysis, and extreme value theory. Read more
Source§

fn weibull_like( &self, a: Self::Meta, b: Self::Meta, ) -> Result<Self, TensorError>

Same as weibull but the shape will be based on x. Creates a Tensor with values drawn from a Weibull distribution with specified shape and scale parameters. Read more
Source§

fn zipf<S: Into<Shape>>( n: Self::Meta, a: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a Zipf distribution. The Zipf distribution is a discrete probability distribution commonly used to model frequency distributions of ranked data in various physical and social phenomena, where the frequency of any element is inversely proportional to its rank. Read more
Source§

fn zipf_like(&self, n: Self::Meta, a: Self::Meta) -> Result<Self, TensorError>

Same as zipf but the shape will be based on x. Creates a Tensor with values drawn from a Zipf distribution with specified number of elements and exponent parameter. Read more
Source§

fn triangular<S: Into<Shape>>( low: Self::Meta, high: Self::Meta, mode: Self::Meta, shape: S, ) -> Result<Self, TensorError>

Create a Tensor with values drawn from a triangular distribution. The triangular distribution is a continuous probability distribution with a lower limit low, upper limit high, and mode mode. It forms a triangular shape in its probability density function. Read more
Source§

fn triangular_like( &self, low: Self::Meta, high: Self::Meta, mode: Self::Meta, ) -> Result<Self, TensorError>

Same as triangular but the shape will be based on x. Creates a Tensor with values drawn from a triangular distribution with specified lower limit, upper limit, and mode. Read more
Source§

fn bernoulli<S: Into<Shape>>( shape: S, p: Self::Meta, ) -> Result<Self, TensorError>
where T: Cast<f64>, bool: Cast<T>,

Create a Tensor with values drawn from a Bernoulli distribution. The Bernoulli distribution is a discrete probability distribution of a random variable which takes the value 1 with probability p and the value 0 with probability q = 1 - p. Read more
Source§

impl<T, const DEVICE: usize, Al> RandomInt for Tensor<T, Cpu, DEVICE, Al>

Source§

type Meta = T

Associated type for meta-information or parameters relevant to distributions.
Source§

fn randint<S: Into<Shape>>( low: Self::Meta, high: Self::Meta, shape: S, ) -> Result<Self, TensorError>
where <T as SampleUniform>::Sampler: Sync,

Create a Tensor with random integers drawn uniformly from the half-open interval [low, high). The distribution is uniform, meaning each integer in the range has an equal probability of being drawn. Read more
Source§

fn randint_like( &self, low: Self::Meta, high: Self::Meta, ) -> Result<Self, TensorError>
where <T as SampleUniform>::Sampler: Sync,

Same as randint but the shape will be based on x. Creates a Tensor with random integers drawn uniformly from the half-open interval [low, high). Read more
Source§

impl<T, const DEVICE: usize, A> RegularizationOps for Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds + Cmp<Output = bool>, T::Vec: SimdCmp, <T::Vec as SimdCmp>::Output: SimdSelect<T::Vec>, A: Allocator + Send + Sync, A::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, A>

The type of the output tensor
Source§

type OutputMeta = T

The type of the output meta
Source§

fn dropout(&self, rate: f64) -> Result<Self::Output, TensorError>
where f64: Cast<Self::OutputMeta>, bool: Cast<Self::OutputMeta>, Self::OutputMeta: NormalOut<bool, Output = Self::OutputMeta>,

Randomly zeroes some of the elements of the input tensor with probability rate using samples from a Bernoulli distribution. Each element is zeroed independently. Read more
Source§

fn shrinkage( &self, bias: Self::OutputMeta, lambda: Self::OutputMeta, ) -> Result<Self::Output, TensorError>

Applies the shrinkage function to the input tensor. The shrinkage function is a soft thresholding operator commonly used in signal processing and optimization algorithms, defined as: sign(x - bias) * max(abs(x - bias) - lambda, 0) Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for Complex32

Source§

type Output = Tensor<<Complex<f32> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for Complex64

Source§

type Output = Tensor<<Complex<f64> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for bf16

Source§

type Output = Tensor<<bf16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for f16

Source§

type Output = Tensor<<f16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for f32

Source§

type Output = Tensor<<f32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for f64

Source§

type Output = Tensor<<f64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<&Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Rem<&Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Rem<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Complex<f32>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f32>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Complex32) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Complex<f32>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f32>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Complex32) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Complex<f64>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f64>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Complex64) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Complex<f64>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f64>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Complex64) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for Complex32

Source§

type Output = Tensor<<Complex<f32> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for Complex64

Source§

type Output = Tensor<<Complex<f64> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for bf16

Source§

type Output = Tensor<<bf16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for f16

Source§

type Output = Tensor<<f16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for f32

Source§

type Output = Tensor<<f32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for f64

Source§

type Output = Tensor<<f64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Rem<Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Rem<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<bf16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bf16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: bf16) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<bf16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bf16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: bf16) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<bool> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: bool) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<bool> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: bool) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<f16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: f16) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<f16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: f16) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<f32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: f32) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<f32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: f32) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<f64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: f64) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<f64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: f64) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<i16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: i16) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<i16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: i16) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<i32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: i32) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<i32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: i32) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<i64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: i64) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<i64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: i64) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<i8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: i8) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<i8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: i8) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<u16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: u16) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<u16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: u16) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<u32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: u32) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<u32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: u32) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<u64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: u64) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<u64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: u64) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<u8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: u8) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, const DEVICE: usize, A> Rem<u8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the % operator.
Source§

fn rem(self, rhs: u8) -> Self::Output

Performs the % operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> RemAssign<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

fn rem_assign(&mut self, rhs: &Tensor<U, Cpu, DEVICE, A>)

Performs the %= operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> RemAssign<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

fn rem_assign(&mut self, rhs: Tensor<U, Cpu, DEVICE, A>)

Performs the %= operation. Read more
Source§

impl<T, const DEVICE: usize, A> Save for Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds + NoUninit + Pod, A: Allocator + 'static, Tensor<T, Cpu, DEVICE, A>: TensorCreator<Output = Tensor<T, Cpu, DEVICE, A>>,

Source§

type Meta = TensorMeta<T, Tensor<T, Cpu, DEVICE, A>>

Source§

fn __save( data: &Self, file: &mut File, len_so_far: &mut usize, global_cnt: &mut usize, compression_algo: CompressionAlgo, level: u32, ) -> Result<Self::Meta>

Source§

fn save<P>(&self, path: P) -> Result<(), Error>
where P: Into<PathBuf>, Self::Meta: Serialize,

Source§

impl<T: CommonBounds, const DEVICE: usize, Al> ShapeManipulate for Tensor<T, Cpu, DEVICE, Al>

Source§

type Meta = T

tensor data type
Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

the output type
Source§

fn squeeze<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>

Remove single-dimensional entries (axes with size 1) from the shape of the tensor at specified positions. Read more
Source§

fn unsqueeze<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>

Adds a new dimension of size 1 to the tensor at the specified dimention. Read more
Source§

fn reshape<S: Into<Shape>>(&self, shape: S) -> Result<Self::Output, TensorError>

Gives a new shape to the tensor without changing its data when it is possible. Read more
Source§

fn transpose(&self, axis1: i64, axis2: i64) -> Result<Self::Output, TensorError>

Swaps two axes of the 2D tensor, returning a view of the tensor with axes transposed. Read more
Source§

fn permute<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>

Permutes the dimensions of the tensor according to the given axes order. Read more
Source§

fn permute_inv<A: Into<Axis>>( &self, axes: A, ) -> Result<Self::Output, TensorError>

Performs the inverse permutation of dimensions according to the given axes order. This is equivalent to undoing a previous permutation. Read more
Source§

fn expand<S: Into<Shape>>(&self, shape: S) -> Result<Self::Output, TensorError>

Expands the tensor to a larger size, replicating the data along specified dimensions. Read more
Source§

fn t(&self) -> Result<Self::Output, TensorError>

Transposes the tensor by swapping the last two dimensions. For 1D or 2D tensors, this is equivalent to a regular transpose. For higher dimensional tensors, only the last two dimensions are swapped. Read more
Source§

fn mt(&self) -> Result<Self::Output, TensorError>

Performs a complete transpose by reversing all dimensions of the tensor. This is different from t() which only swaps the last two dimensions. Read more
Source§

fn flip<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>

Reverses the order of elements in the tensor along the specified axes. Read more
Source§

fn fliplr(&self) -> Result<Self::Output, TensorError>

Reverses the order of elements along axis 1 (columns) of the tensor. The tensor must be at least 2-dimensional. Read more
Source§

fn flipud(&self) -> Result<Self::Output, TensorError>

Reverses the order of elements along axis 0 (rows) of the tensor. The tensor must be at least 1-dimensional. Read more
Source§

fn tile<S: Into<Axis>>(&self, repeats: S) -> Result<Self::Output, TensorError>

Constructs a new tensor by repeating the input tensor along specified dimensions. Read more
Source§

fn trim_zeros(&self, trim: &str) -> Result<Self::Output, TensorError>
where Self::Meta: PartialEq,

Removes zeros from the beginning and/or end of a 1-D tensor. Read more
Source§

fn repeat(&self, repeats: usize, axes: i16) -> Result<Self::Output, TensorError>

Repeats elements of a tensor along a specified axis. Read more
Source§

fn split( &self, indices_or_sections: &[i64], axis: i64, ) -> Result<Vec<Self>, TensorError>

Splits a tensor into multiple sub-tensors along a specified axis at given indices. Read more
Source§

fn dsplit(&self, indices: &[i64]) -> Result<Vec<Self>, TensorError>

Splits a tensor into multiple sub-tensors along axis 2 (depth). The tensor must be at least 3-dimensional. Read more
Source§

fn hsplit(&self, indices: &[i64]) -> Result<Vec<Self>, TensorError>

Splits a tensor into multiple sub-tensors horizontally (along axis 1). The tensor must be at least 2-dimensional. Read more
Source§

fn vsplit(&self, indices: &[i64]) -> Result<Vec<Self>, TensorError>

Splits a tensor into multiple sub-tensors vertically (along axis 0). The tensor must be at least 1-dimensional. Read more
Source§

fn swap_axes(&self, axis1: i64, axis2: i64) -> Result<Self::Output, TensorError>

Interchanges two axes of a tensor. This operation creates a view of the tensor with the specified axes swapped. Read more
Source§

fn flatten<A>(&self, start: A, end: A) -> Result<Self::Output, TensorError>
where A: Into<Option<usize>>,

Flattens a contiguous range of dimensions in a tensor into a single dimension. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<&Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<&Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<&Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<&Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<&Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<&Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<&Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<&Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<&Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Shl<&Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Shl<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Shl<Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Shl<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<bool> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: bool) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<bool> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: bool) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<i16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: i16) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<i16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: i16) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<i32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: i32) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<i32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: i32) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<i64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: i64) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<i64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: i64) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<i8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: i8) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<i8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: i8) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<u16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: u16) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<u16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: u16) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<u32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: u32) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<u32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: u32) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<u64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: u64) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<u64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: u64) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<u8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: u8) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shl<u8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the << operator.
Source§

fn shl(self, rhs: u8) -> Self::Output

Performs the << operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<&Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<&Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<&Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<&Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<&Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<&Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<&Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<&Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<&Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Shr<&Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Shr<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as BitWiseOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Shr<Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Shr<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<bool> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: bool) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<bool> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: bool) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<i16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: i16) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<i16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: i16) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<i32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: i32) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<i32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: i32) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<i64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: i64) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<i64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: i64) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<i8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: i8) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<i8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: i8) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<u16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: u16) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<u16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: u16) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<u32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: u32) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<u32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: u32) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<u64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: u64) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<u64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: u64) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<u8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: u8) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T, const DEVICE: usize, A> Shr<u8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as BitWiseOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the >> operator.
Source§

fn shr(self, rhs: u8) -> Self::Output

Performs the >> operation. Read more
Source§

impl<T: CommonBounds, B: BackendTy + Buffer + Clone, const DEVICE: usize, A> Slice for Tensor<T, B, DEVICE, A>
where A: Allocator,

Source§

fn slice( &self, index: &[(i64, i64, i64)], ) -> Result<Tensor<T, B, DEVICE, A>, TensorError>

Create a new Tensor by slicing an existing Tensor. Slicing allows you to extract a portion of a tensor using index ranges for each dimension. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for Complex32

Source§

type Output = Tensor<<Complex<f32> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for Complex64

Source§

type Output = Tensor<<Complex<f64> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for bf16

Source§

type Output = Tensor<<bf16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for f16

Source§

type Output = Tensor<<f16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for f32

Source§

type Output = Tensor<<f32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for f64

Source§

type Output = Tensor<<f64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<&Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Sub<&Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Sub<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Complex<f32>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f32>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Complex32) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Complex<f32>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f32>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Complex32) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Complex<f64>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f64>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Complex64) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Complex<f64>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<Complex<f64>>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Complex64) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for Complex32

Source§

type Output = Tensor<<Complex<f32> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for Complex64

Source§

type Output = Tensor<<Complex<f64> as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for bf16

Source§

type Output = Tensor<<bf16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for bool

Source§

type Output = Tensor<<bool as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for f16

Source§

type Output = Tensor<<f16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for f32

Source§

type Output = Tensor<<f32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for f64

Source§

type Output = Tensor<<f64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for i16

Source§

type Output = Tensor<<i16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for i32

Source§

type Output = Tensor<<i32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for i64

Source§

type Output = Tensor<<i64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for i8

Source§

type Output = Tensor<<i8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for u16

Source§

type Output = Tensor<<u16 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for u32

Source§

type Output = Tensor<<u32 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for u64

Source§

type Output = Tensor<<u64 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<Tensor<T, Cpu, DEVICE, A>> for u8

Source§

type Output = Tensor<<u8 as NormalOut<T>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<T, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Sub<Tensor<U, Cpu, DEVICE, A>> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> Sub<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<U>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<U, Cpu, DEVICE, A>) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<bf16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bf16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: bf16) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<bf16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bf16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: bf16) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<bool> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: bool) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<bool> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<bool>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: bool) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<f16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: f16) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<f16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: f16) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<f32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: f32) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<f32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: f32) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<f64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: f64) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<f64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<f64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: f64) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<i16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: i16) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<i16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: i16) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<i32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: i32) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<i32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: i32) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<i64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: i64) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<i64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: i64) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<i8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: i8) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<i8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<i8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: i8) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<u16> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: u16) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<u16> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u16>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: u16) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<u32> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: u32) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<u32> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u32>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: u32) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<u64> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: u64) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<u64> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u64>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: u64) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<u8> for &Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: u8) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, const DEVICE: usize, A> Sub<u8> for Tensor<T, Cpu, DEVICE, A>

Source§

type Output = Tensor<<T as NormalOut<u8>>::Output, Cpu, DEVICE, A>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: u8) -> Self::Output

Performs the - operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> SubAssign<&Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

fn sub_assign(&mut self, rhs: &Tensor<U, Cpu, DEVICE, A>)

Performs the -= operation. Read more
Source§

impl<T, U, const DEVICE: usize, A> SubAssign<Tensor<U, Cpu, DEVICE, A>> for Tensor<T, Cpu, DEVICE, A>

Source§

fn sub_assign(&mut self, rhs: Tensor<U, Cpu, DEVICE, A>)

Performs the -= operation. Read more
Source§

impl<T, C, const DEVICE: usize, Al> TensorCmp<T, C> for Tensor<T, Cpu, DEVICE, Al>
where T: CommonBounds + Cmp<C, Output = bool>, C: CommonBounds, T::Vec: SimdCmp<C::Vec>, <T::Vec as SimdCmp<C::Vec>>::Output: IntoVec<boolx16>, Al: Allocator, Al::Output: AllocatorOutputRetrive,

Source§

type RHS = Tensor<C, Cpu, DEVICE, Al>

right hand side tensor type
Source§

type Output = Tensor<bool, Cpu, DEVICE, Al>

output tensor type, normally a boolean tensor
Source§

fn tensor_neq<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
where D: Borrow<Self::RHS>,

check if element from x is not equal to element from y Read more
Source§

fn tensor_eq<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
where D: Borrow<Self::RHS>,

check if element from x is equal to element from y Read more
Source§

fn tensor_lt<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
where D: Borrow<Self::RHS>,

check if element from x is less than the element from y Read more
Source§

fn tensor_gt<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
where D: Borrow<Self::RHS>,

check if element from x is greater than the element from y Read more
Source§

fn tensor_le<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
where D: Borrow<Self::RHS>,

check if element from x is less or equal to the element from y Read more
Source§

fn tensor_ge<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
where D: Borrow<Self::RHS>,

check if element from x is greater or equal to the element from y Read more
Source§

impl<T: CommonBounds, const DEVICE: usize, Al> TensorCreator for Tensor<T, Cpu, DEVICE, Al>

Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

the output type of the tensor
Source§

type Meta = T

the meta type of the tensor
Source§

fn empty<S: Into<Shape>>(shape: S) -> Result<Self::Output, TensorError>

Creates a new uninitialized tensor with the specified shape. The tensor’s values will be whatever was in memory at the time of allocation. Read more
Source§

fn zeros<S: Into<Shape>>(shape: S) -> Result<Self::Output, TensorError>

Creates a new tensor of the specified shape, filled with zeros. Read more
Source§

fn ones<S: Into<Shape>>(shape: S) -> Result<Self::Output, TensorError>
where u8: Cast<T>,

Creates a new tensor of the specified shape, filled with ones. Read more
Source§

fn empty_like(&self) -> Result<Self::Output, TensorError>

Creates a new uninitialized tensor with the same shape as the input tensor. Read more
Source§

fn zeros_like(&self) -> Result<Self::Output, TensorError>

Creates a new zeroed tensor with the same shape as the input tensor. Read more
Source§

fn ones_like(&self) -> Result<Self::Output, TensorError>
where u8: Cast<T>,

Creates a new tensor with all ones with the same shape as the input tensor. Read more
Source§

fn full<S: Into<Shape>>(val: T, shape: S) -> Result<Self::Output, TensorError>

Creates a new tensor of the specified shape, filled with a specified value. Read more
Source§

fn full_like(&self, val: T) -> Result<Self::Output, TensorError>

Creates a new tensor filled with a specified value with the same shape as the input tensor. Read more
Source§

fn arange<U>(start: U, end: U) -> Result<Self::Output, TensorError>
where usize: Cast<T>, U: Cast<i64> + Cast<T> + Copy,

Creates a 1-D tensor with evenly spaced values within a given interval [start, end). Read more
Source§

fn arange_step(start: T, end: T, step: T) -> Result<Self::Output, TensorError>
where T: Cast<f64>, f64: Cast<T>, usize: Cast<T>,

Creates a 1-D tensor with evenly spaced values within a given interval [start, end) with a specified step size. Read more
Source§

fn eye(n: usize, m: usize, k: usize) -> Result<Self::Output, TensorError>

Creates a 2-D tensor with ones on the k-th diagonal and zeros elsewhere. Read more
Source§

fn linspace<U>( start: U, end: U, num: usize, include_end: bool, ) -> Result<Self::Output, TensorError>
where U: Cast<f64> + Cast<T> + Copy, usize: Cast<T>, f64: Cast<T>,

Creates a 1-D tensor of num evenly spaced values between start and end. Read more
Source§

fn logspace<V: Cast<T>>( start: V, end: V, num: usize, include_end: bool, base: V, ) -> Result<Self::Output, TensorError>
where T: Cast<f64> + Float + FloatOutBinary<T, Output = T>, usize: Cast<T>, f64: Cast<T>,

Creates a 1-D tensor with num numbers logarithmically spaced between base^start and base^end. Read more
Source§

fn geomspace<V: Cast<T>>( start: V, end: V, n: usize, include_end: bool, ) -> Result<Self::Output, TensorError>
where f64: Cast<T>, usize: Cast<T>, T: Cast<f64> + FloatOutBinary<T, Output = T>,

Creates a 1-D tensor with n numbers geometrically spaced between start and end. Read more
Source§

fn tri( n: usize, m: usize, k: i64, low_triangle: bool, ) -> Result<Self::Output, TensorError>
where u8: Cast<T>,

Creates a tensor with ones at and below (or above) the k-th diagonal. Read more
Source§

fn tril(&self, k: i64) -> Result<Self::Output, TensorError>
where T: NormalOut<bool, Output = T> + Cast<T> + TypeCommon, T::Vec: NormalOut<boolx16, Output = T::Vec>,

Returns a copy of the tensor with elements above the k-th diagonal zeroed. Read more
Source§

fn triu(&self, k: i64) -> Result<Self::Output, TensorError>
where T: NormalOut<bool, Output = T> + Cast<T> + TypeCommon, T::Vec: NormalOut<boolx16, Output = T::Vec>,

Returns a copy of the tensor with elements below the k-th diagonal zeroed. Read more
Source§

fn identity(n: usize) -> Result<Self::Output, TensorError>
where u8: Cast<T>,

Creates a 2-D identity tensor (1’s on the main diagonal and 0’s elsewhere). Read more
Source§

impl<A, B> TensorDot<Tensor<B>> for Tensor<A>
where _Tensor<A>: TensorDot<_Tensor<B>, Output = _Tensor<<A as NormalOut<B>>::Output>>, A: CommonBounds + NormalOut<B>, B: CommonBounds, <A as NormalOut<B>>::Output: CommonBounds,

Source§

type Output = Tensor<<A as NormalOut<B>>::Output>

The output tensor type.
Source§

fn tensordot<const N: usize>( &self, rhs: &Tensor<B>, axes: ([i64; N], [i64; N]), ) -> Result<Self::Output, TensorError>

Compute tensor dot product along specified axes. This is a generalization of matrix multiplication to higher dimensions. Read more
Source§

impl<T, B, const DEVICE: usize, A> TensorInfo<T> for &Tensor<T, B, DEVICE, A>

Source§

fn ptr(&self) -> Pointer<T>

Returns a pointer to the tensor’s first data.
Source§

fn size(&self) -> usize

Returns the size of the tensor based on the shape
Source§

fn shape(&self) -> &Shape

Returns the shape of the tensor.
Source§

fn strides(&self) -> &Strides

Returns the strides of the tensor.
Source§

fn layout(&self) -> &Layout

Returns the layout of the tensor. Layout contains shape and strides.
Source§

fn parent(&self) -> Option<Pointer<T>>

Returns the root tensor, if any. Read more
Source§

fn ndim(&self) -> usize

Returns the number of dimensions of the tensor.
Source§

fn is_contiguous(&self) -> bool

Returns whether the tensor is contiguous in memory. View or transpose tensors are not contiguous.
Source§

fn elsize() -> usize

Returns the data type memory size in bytes.
Source§

impl<T, B, const DEVICE: usize, A> TensorInfo<T> for &mut Tensor<T, B, DEVICE, A>

Source§

fn ptr(&self) -> Pointer<T>

Returns a pointer to the tensor’s first data.
Source§

fn size(&self) -> usize

Returns the size of the tensor based on the shape
Source§

fn shape(&self) -> &Shape

Returns the shape of the tensor.
Source§

fn strides(&self) -> &Strides

Returns the strides of the tensor.
Source§

fn layout(&self) -> &Layout

Returns the layout of the tensor. Layout contains shape and strides.
Source§

fn parent(&self) -> Option<Pointer<T>>

Returns the root tensor, if any. Read more
Source§

fn ndim(&self) -> usize

Returns the number of dimensions of the tensor.
Source§

fn is_contiguous(&self) -> bool

Returns whether the tensor is contiguous in memory. View or transpose tensors are not contiguous.
Source§

fn elsize() -> usize

Returns the data type memory size in bytes.
Source§

impl<T, B, const DEVICE: usize, A> TensorInfo<T> for Tensor<T, B, DEVICE, A>

Source§

fn ptr(&self) -> Pointer<T>

Returns a pointer to the tensor’s first data.
Source§

fn size(&self) -> usize

Returns the size of the tensor based on the shape
Source§

fn shape(&self) -> &Shape

Returns the shape of the tensor.
Source§

fn strides(&self) -> &Strides

Returns the strides of the tensor.
Source§

fn layout(&self) -> &Layout

Returns the layout of the tensor. Layout contains shape and strides.
Source§

fn parent(&self) -> Option<Pointer<T>>

Returns the root tensor, if any. Read more
Source§

fn ndim(&self) -> usize

Returns the number of dimensions of the tensor.
Source§

fn is_contiguous(&self) -> bool

Returns whether the tensor is contiguous in memory. View or transpose tensors are not contiguous.
Source§

fn elsize() -> usize

Returns the data type memory size in bytes.
Source§

impl<'a, T: CommonBounds, const DEVICE: usize, Al> TensorIterator<'a, T> for Tensor<T, Cpu, DEVICE, Al>

Source§

fn iter(&'a self) -> Strided<T>

Convert the tensor into a strided iterator. Read more
Source§

fn iter_mut(&'a mut self) -> StridedMut<'a, T>

Convert the tensor into a mutable strided iterator. Read more
Source§

fn iter_simd(&'a self) -> StridedSimd<T>

Convert the tensor into a strided simd iterator. Read more
Source§

fn iter_mut_simd(&'a self) -> StridedMutSimd<'a, T>

Convert the tensor into a mutable strided simd iterator. Read more
Source§

fn par_iter_simd(&'a self) -> ParStridedSimd<T>

Convert the tensor into a parallel strided simd iterator. Read more
Source§

fn par_iter_mut_simd(&'a mut self) -> ParStridedMutSimd<'a, T>

Convert the tensor into a mutable parallel strided simd iterator. Read more
Source§

fn par_iter(&'a self) -> ParStrided<T>

Convert the tensor into a parallel strided iterator. Read more
Source§

fn par_iter_mut(&'a mut self) -> ParStridedMut<'a, T>

Convert the tensor into a mutable parallel strided iterator. Read more
Source§

impl<T, const DEVICE: usize, A> TensorLike<T> for Tensor<T, Cpu, DEVICE, A>
where T: CommonBounds, A: Allocator,

Source§

fn as_raw(&self) -> &[T]

directly convert the tensor to raw slice Read more
Source§

fn as_raw_mut(&mut self) -> &mut [T]

directly convert the tensor to mutable raw slice Read more
Source§

fn elsize() -> usize

Returns the data type memory size in bytes.
Source§

impl<T, const DEVICE: usize, Al> TensorWhere for Tensor<T, Cpu, DEVICE, Al>

Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

The type of the output tensor
Source§

type Condition = Tensor<bool, Cpu, DEVICE, Al>

The type of the condition tensor
Source§

fn tensor_where( condition: &Self::Condition, x: &Self::Output, y: &Self::Output, ) -> Result<Self::Output, TensorError>

Element-wise selection based on a condition tensor. Returns a tensor of elements selected from x where condition is true, and from y where condition is false. Read more
Source§

impl<T, const DEVICE_ID: usize, A> ToDataLoader for Tensor<T, Cpu, DEVICE_ID, A>
where T: CommonBounds, A: Allocator,

Source§

type Output = DataLoader<T, Tensor<T, Cpu, DEVICE_ID, A>>

the output type of the conversion
Source§

fn to_dataloader(self) -> Self::Output

convert to DataLoader
Source§

impl<T, const DEVICE: usize, Al> WindowOps for Tensor<T, Cpu, DEVICE, Al>
where T: CommonBounds, _Tensor<T, Cpu, DEVICE, Al>: WindowOps<Output = _Tensor<T, Cpu, DEVICE, Al>> + Into<Tensor<T, Cpu, DEVICE, Al>>, Al: Allocator, Al::Output: AllocatorOutputRetrive,

Source§

type Output = Tensor<T, Cpu, DEVICE, Al>

The type of the output tensor
Source§

type Meta = T

The type of the meta data
Source§

fn hamming_window( window_length: i64, periodic: bool, ) -> Result<Self::Output, TensorError>

Generates a Hamming window of a specified length. Read more
Source§

fn hann_window( window_length: i64, periodic: bool, ) -> Result<Self::Output, TensorError>

Generates a Hann window of a specified length. Read more
Source§

fn blackman_window( window_length: i64, periodic: bool, ) -> Result<Self::Output, TensorError>

Generates a Blackman window tensor. Read more

Auto Trait Implementations§

§

impl<T, B, const DEVICE_ID: usize, A> Freeze for Tensor<T, B, DEVICE_ID, A>

§

impl<T, B, const DEVICE_ID: usize, A> RefUnwindSafe for Tensor<T, B, DEVICE_ID, A>

§

impl<T, B, const DEVICE_ID: usize, A> Send for Tensor<T, B, DEVICE_ID, A>
where B: Sync + Send, A: Sync + Send,

§

impl<T, B, const DEVICE_ID: usize, A> Sync for Tensor<T, B, DEVICE_ID, A>
where B: Sync + Send, A: Sync + Send,

§

impl<T, B, const DEVICE_ID: usize, A> Unpin for Tensor<T, B, DEVICE_ID, A>

§

impl<T, B, const DEVICE_ID: usize, A> UnwindSafe for Tensor<T, B, DEVICE_ID, A>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T> ToString for T
where T: Display + ?Sized,

Source§

fn to_string(&self) -> String

Converts the given value to a String. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T, Rhs, Output> NumOps<Rhs, Output> for T
where T: Sub<Rhs, Output = Output> + Mul<Rhs, Output = Output> + Div<Rhs, Output = Output> + Add<Rhs, Output = Output> + Rem<Rhs, Output = Output>,

Source§

impl<T, Base> RefNum<Base> for T
where T: NumOps<Base, Base> + for<'r> NumOps<&'r Base, Base>,