pub struct DiffTensor<T, B = Cpu, const DEVICE_ID: usize = 0, A = HptAllocator<B>>{ /* private fields */ }Expand description
DiffTensor is a tensor that has a gradient.
Implementations§
Source§impl<T, const DEVICE: usize, A> DiffTensor<T, Cpu, DEVICE, A>
impl<T, const DEVICE: usize, A> DiffTensor<T, Cpu, DEVICE, A>
Sourcepub fn log_softmax(
&self,
axis: i64,
) -> Result<DiffTensor<<T as FloatOutUnary>::Output, Cpu, DEVICE, A>, TensorError>where
T: CommonBounds + Cast<<T as FloatOutUnary>::Output> + FloatOutUnary,
<T as FloatOutUnary>::Output: CommonBounds + NormalOut<T, Output = <T as FloatOutUnary>::Output> + FloatOutUnary<Output = <T as FloatOutUnary>::Output> + Cast<T>,
T::Vec: FloatOutUnary<Output = <<T as FloatOutUnary>::Output as TypeCommon>::Vec> + IntoVec<<<T as FloatOutUnary>::Output as TypeCommon>::Vec>,
<<T as FloatOutUnary>::Output as TypeCommon>::Vec: FloatOutBinary<Output = <<T as FloatOutUnary>::Output as TypeCommon>::Vec> + FloatOutUnary<Output = <<T as FloatOutUnary>::Output as TypeCommon>::Vec>,
pub fn log_softmax(
&self,
axis: i64,
) -> Result<DiffTensor<<T as FloatOutUnary>::Output, Cpu, DEVICE, A>, TensorError>where
T: CommonBounds + Cast<<T as FloatOutUnary>::Output> + FloatOutUnary,
<T as FloatOutUnary>::Output: CommonBounds + NormalOut<T, Output = <T as FloatOutUnary>::Output> + FloatOutUnary<Output = <T as FloatOutUnary>::Output> + Cast<T>,
T::Vec: FloatOutUnary<Output = <<T as FloatOutUnary>::Output as TypeCommon>::Vec> + IntoVec<<<T as FloatOutUnary>::Output as TypeCommon>::Vec>,
<<T as FloatOutUnary>::Output as TypeCommon>::Vec: FloatOutBinary<Output = <<T as FloatOutUnary>::Output as TypeCommon>::Vec> + FloatOutUnary<Output = <<T as FloatOutUnary>::Output as TypeCommon>::Vec>,
Applies the logsoftmax function along a specified axis.
The logsoftmax function normalizes the elements along the specified axis such that they sum to 1.
It is commonly used in machine learning models, particularly for multi-class classification tasks.
The logsoftmax function transforms each element x_i in the input tensor into a probability by computing:
logsoftmax(x_i) = log(exp(x_i) / sum(exp(x_j))) for all j along the specified axis§Arguments
axis- The axis along which to apply the softmax function. The elements along this axis will be transformed into probabilities that sum to 1.
§Returns
This function returns a Result containing a tensor with the softmax values computed along
the specified axis.
Source§impl<T, const DEVICE: usize, A> DiffTensor<T, Cpu, DEVICE, A>
impl<T, const DEVICE: usize, A> DiffTensor<T, Cpu, DEVICE, A>
Sourcepub fn softmax(
&self,
axis: i64,
) -> Result<DiffTensor<<T as FloatOutUnary>::Output, Cpu, DEVICE, A>, TensorError>where
T: CommonBounds + Cast<<T as FloatOutUnary>::Output> + FloatOutUnary,
<T as FloatOutUnary>::Output: CommonBounds + NormalOut<T, Output = <T as FloatOutUnary>::Output> + FloatOutUnary<Output = <T as FloatOutUnary>::Output> + Cast<T>,
T::Vec: FloatOutUnary<Output = <<T as FloatOutUnary>::Output as TypeCommon>::Vec>,
<<T as FloatOutUnary>::Output as TypeCommon>::Vec: FloatOutBinary<Output = <<T as FloatOutUnary>::Output as TypeCommon>::Vec>,
pub fn softmax(
&self,
axis: i64,
) -> Result<DiffTensor<<T as FloatOutUnary>::Output, Cpu, DEVICE, A>, TensorError>where
T: CommonBounds + Cast<<T as FloatOutUnary>::Output> + FloatOutUnary,
<T as FloatOutUnary>::Output: CommonBounds + NormalOut<T, Output = <T as FloatOutUnary>::Output> + FloatOutUnary<Output = <T as FloatOutUnary>::Output> + Cast<T>,
T::Vec: FloatOutUnary<Output = <<T as FloatOutUnary>::Output as TypeCommon>::Vec>,
<<T as FloatOutUnary>::Output as TypeCommon>::Vec: FloatOutBinary<Output = <<T as FloatOutUnary>::Output as TypeCommon>::Vec>,
Applies the softmax function along a specified axis.
The softmax function normalizes the elements along the specified axis such that they sum to 1.
It is commonly used in machine learning models, particularly for multi-class classification tasks.
The softmax function transforms each element x_i in the input tensor into a probability by computing:
softmax(x_i) = exp(x_i) / sum(exp(x_j)) for all j along the specified axis§Arguments
axis- The axis along which to apply the softmax function. The elements along this axis will be transformed into probabilities that sum to 1.
§Returns
This function returns a Result containing a tensor with the softmax values computed along
the specified axis.
Trait Implementations§
Source§impl<T, U, const DEVICE: usize, A> Add<&DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Add<&DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Add<&DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Add<&DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Add<DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Add<DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Add<DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Add<DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, const DEVICE: usize, Al> AdvancedOps for DiffTensor<T, Cpu, DEVICE, Al>where
T: NormalOut<bool, Output = T> + Cast<i64> + CommonBounds + PartialOrd,
f64: Cast<T>,
Al: Allocator + Send + Sync + 'static,
Al::Output: AllocatorOutputRetrive,
impl<T, const DEVICE: usize, Al> AdvancedOps for DiffTensor<T, Cpu, DEVICE, Al>where
T: NormalOut<bool, Output = T> + Cast<i64> + CommonBounds + PartialOrd,
f64: Cast<T>,
Al: Allocator + Send + Sync + 'static,
Al::Output: AllocatorOutputRetrive,
Source§type Output = DiffTensor<T, Cpu, DEVICE, Al>
type Output = DiffTensor<T, Cpu, DEVICE, Al>
Source§fn pad(
&self,
pads: &[(i64, i64)],
val: Self::Meta,
) -> Result<Self::Output, TensorError>
fn pad( &self, pads: &[(i64, i64)], val: Self::Meta, ) -> Result<Self::Output, TensorError>
Source§fn topk(
&self,
k: i64,
dim: i64,
largest: bool,
sorted: bool,
) -> Result<(Self::IndexOutput, Self::Output), TensorError>
fn topk( &self, k: i64, dim: i64, largest: bool, sorted: bool, ) -> Result<(Self::IndexOutput, Self::Output), TensorError>
Source§fn onehot(
&self,
_: usize,
_: i64,
_: Self::Meta,
_: Self::Meta,
) -> Result<Self::Output, TensorError>
fn onehot( &self, _: usize, _: i64, _: Self::Meta, _: Self::Meta, ) -> Result<Self::Output, TensorError>
Source§fn dropout(&self, _: f64) -> Result<Self::Output, TensorError>
fn dropout(&self, _: f64) -> Result<Self::Output, TensorError>
Source§fn scatter(
&self,
_: &Self::IndexOutput,
_: i64,
_: &Self::Output,
) -> Result<Self::Output, TensorError>
fn scatter( &self, _: &Self::IndexOutput, _: i64, _: &Self::Output, ) -> Result<Self::Output, TensorError>
Source§impl<T: Clone, B, const DEVICE_ID: usize, A> Clone for DiffTensor<T, B, DEVICE_ID, A>
impl<T: Clone, B, const DEVICE_ID: usize, A> Clone for DiffTensor<T, B, DEVICE_ID, A>
Source§fn clone(&self) -> DiffTensor<T, B, DEVICE_ID, A>
fn clone(&self) -> DiffTensor<T, B, DEVICE_ID, A>
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreSource§impl<T: CommonBounds, const DEVICE: usize, Al> Concat for DiffTensor<T, Cpu, DEVICE, Al>
impl<T: CommonBounds, const DEVICE: usize, Al> Concat for DiffTensor<T, Cpu, DEVICE, Al>
Source§type Output = DiffTensor<T, Cpu, DEVICE, Al>
type Output = DiffTensor<T, Cpu, DEVICE, Al>
Source§fn concat(
tensors: Vec<Self>,
axis: usize,
keepdims: bool,
) -> Result<Self::Output, TensorError>
fn concat( tensors: Vec<Self>, axis: usize, keepdims: bool, ) -> Result<Self::Output, TensorError>
Source§fn vstack(tensors: Vec<Self>) -> Result<Self::Output, TensorError>
fn vstack(tensors: Vec<Self>) -> Result<Self::Output, TensorError>
Source§impl<T, U, const DEVICE: usize, A> Div<&DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + FloatOutBinary<U>,
U: CommonBounds,
<T as FloatOutBinary<U>>::Output: CommonBounds + Cast<U> + Cast<<T as FloatOutBinary<U>>::Output> + FloatOutBinary<U>,
T::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + Cast<T>,
<<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Div<&DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + FloatOutBinary<U>,
U: CommonBounds,
<T as FloatOutBinary<U>>::Output: CommonBounds + Cast<U> + Cast<<T as FloatOutBinary<U>>::Output> + FloatOutBinary<U>,
T::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + Cast<T>,
<<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§type Output = DiffTensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>
type Output = DiffTensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>
/ operator.Source§impl<T, U, const DEVICE: usize, A> Div<&DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + FloatOutBinary<U>,
U: CommonBounds,
<T as FloatOutBinary<U>>::Output: CommonBounds + Cast<U> + Cast<<T as FloatOutBinary<U>>::Output> + FloatOutBinary<U>,
T::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + Cast<T>,
<<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Div<&DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + FloatOutBinary<U>,
U: CommonBounds,
<T as FloatOutBinary<U>>::Output: CommonBounds + Cast<U> + Cast<<T as FloatOutBinary<U>>::Output> + FloatOutBinary<U>,
T::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + Cast<T>,
<<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§type Output = DiffTensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>
type Output = DiffTensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>
/ operator.Source§impl<T, U, const DEVICE: usize, A> Div<DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + FloatOutBinary<U>,
U: CommonBounds,
<T as FloatOutBinary<U>>::Output: CommonBounds + Cast<U> + Cast<<T as FloatOutBinary<U>>::Output> + FloatOutBinary<U>,
T::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + Cast<T>,
<<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Div<DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + FloatOutBinary<U>,
U: CommonBounds,
<T as FloatOutBinary<U>>::Output: CommonBounds + Cast<U> + Cast<<T as FloatOutBinary<U>>::Output> + FloatOutBinary<U>,
T::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + Cast<T>,
<<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§type Output = DiffTensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>
type Output = DiffTensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>
/ operator.Source§impl<T, U, const DEVICE: usize, A> Div<DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + FloatOutBinary<U>,
U: CommonBounds,
<T as FloatOutBinary<U>>::Output: CommonBounds + Cast<U> + Cast<<T as FloatOutBinary<U>>::Output> + FloatOutBinary<U>,
T::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + Cast<T>,
<<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Div<DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + FloatOutBinary<U>,
U: CommonBounds,
<T as FloatOutBinary<U>>::Output: CommonBounds + Cast<U> + Cast<<T as FloatOutBinary<U>>::Output> + FloatOutBinary<U>,
T::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + Cast<T>,
<<T as FloatOutBinary<U>>::Output as TypeCommon>::Vec: FloatOutBinary<<U as TypeCommon>::Vec, Output = <<<T as FloatOutBinary<U>>::Output as FloatOutBinary<U>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§type Output = DiffTensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>
type Output = DiffTensor<<T as FloatOutBinary<U>>::Output, Cpu, DEVICE, A>
/ operator.Source§impl<T: CommonBounds + NormalOut<Output = T> + Cmp<T, Output = bool>, const DEVICE: usize> IndexReduce for DiffTensor<T, Cpu, DEVICE>
impl<T: CommonBounds + NormalOut<Output = T> + Cmp<T, Output = bool>, const DEVICE: usize> IndexReduce for DiffTensor<T, Cpu, DEVICE>
Source§impl<A, B, const DEVICE: usize, Al> Matmul<DiffTensor<B, Cpu, DEVICE, Al>> for DiffTensor<A, Cpu, DEVICE, Al>where
A: CommonBounds + NormalOut<B> + Cast<<A as NormalOut<B>>::Output> + NormalOut<<A as NormalOut<B>>::Output> + Cast<<A as NormalOut<<A as NormalOut<B>>::Output>>::Output>,
B: CommonBounds + Cast<<A as NormalOut<B>>::Output> + Cast<<<A as NormalOut<B>>::Output as NormalOut<B>>::Output>,
<A as NormalOut<B>>::Output: CommonBounds + NormalOut<A> + NormalOut<B> + Cast<<<A as NormalOut<B>>::Output as NormalOut<B>>::Output> + Cast<<A as NormalOut<<A as NormalOut<B>>::Output>>::Output>,
<<A as NormalOut<B>>::Output as NormalOut<B>>::Output: CommonBounds + Cast<<A as NormalOut<<A as NormalOut<B>>::Output>>::Output> + Cast<A>,
<A as NormalOut<<A as NormalOut<B>>::Output>>::Output: CommonBounds + Cast<B>,
Al: Allocator + 'static + Send + Sync,
Al::Output: AllocatorOutputRetrive,
impl<A, B, const DEVICE: usize, Al> Matmul<DiffTensor<B, Cpu, DEVICE, Al>> for DiffTensor<A, Cpu, DEVICE, Al>where
A: CommonBounds + NormalOut<B> + Cast<<A as NormalOut<B>>::Output> + NormalOut<<A as NormalOut<B>>::Output> + Cast<<A as NormalOut<<A as NormalOut<B>>::Output>>::Output>,
B: CommonBounds + Cast<<A as NormalOut<B>>::Output> + Cast<<<A as NormalOut<B>>::Output as NormalOut<B>>::Output>,
<A as NormalOut<B>>::Output: CommonBounds + NormalOut<A> + NormalOut<B> + Cast<<<A as NormalOut<B>>::Output as NormalOut<B>>::Output> + Cast<<A as NormalOut<<A as NormalOut<B>>::Output>>::Output>,
<<A as NormalOut<B>>::Output as NormalOut<B>>::Output: CommonBounds + Cast<<A as NormalOut<<A as NormalOut<B>>::Output>>::Output> + Cast<A>,
<A as NormalOut<<A as NormalOut<B>>::Output>>::Output: CommonBounds + Cast<B>,
Al: Allocator + 'static + Send + Sync,
Al::Output: AllocatorOutputRetrive,
Source§type Output = DiffTensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>
type Output = DiffTensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>
Source§type OutputMeta = <A as NormalOut<B>>::Output
type OutputMeta = <A as NormalOut<B>>::Output
Source§type InplaceOutput = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>
type InplaceOutput = Tensor<<A as NormalOut<B>>::Output, Cpu, DEVICE, Al>
Source§fn matmul(
&self,
rhs: DiffTensor<B, Cpu, DEVICE, Al>,
) -> Result<Self::Output, TensorError>
fn matmul( &self, rhs: DiffTensor<B, Cpu, DEVICE, Al>, ) -> Result<Self::Output, TensorError>
Source§fn matmul_<U>(
&self,
rhs: DiffTensor<B, Cpu, DEVICE, Al>,
out: U,
) -> Result<Self::InplaceOutput, TensorError>
fn matmul_<U>( &self, rhs: DiffTensor<B, Cpu, DEVICE, Al>, out: U, ) -> Result<Self::InplaceOutput, TensorError>
Source§impl<T, U, const DEVICE: usize, A> Mul<&DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U> + NormalOut<U> + NormalOut<T>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as NormalOut<U>>::Output: CommonBounds + Cast<T>,
<<T as NormalOut<U>>::Output as NormalOut<T>>::Output: CommonBounds + Cast<U>,
<<T as NormalOut<U>>::Output as TypeCommon>::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<U>>::Output as TypeCommon>::Vec> + NormalOut<<T as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<T>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Mul<&DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U> + NormalOut<U> + NormalOut<T>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as NormalOut<U>>::Output: CommonBounds + Cast<T>,
<<T as NormalOut<U>>::Output as NormalOut<T>>::Output: CommonBounds + Cast<U>,
<<T as NormalOut<U>>::Output as TypeCommon>::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<U>>::Output as TypeCommon>::Vec> + NormalOut<<T as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<T>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Mul<&DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U> + NormalOut<U> + NormalOut<T>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as NormalOut<U>>::Output: CommonBounds + Cast<T>,
<<T as NormalOut<U>>::Output as NormalOut<T>>::Output: CommonBounds + Cast<U>,
<<T as NormalOut<U>>::Output as TypeCommon>::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<U>>::Output as TypeCommon>::Vec> + NormalOut<<T as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<T>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Mul<&DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U> + NormalOut<U> + NormalOut<T>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as NormalOut<U>>::Output: CommonBounds + Cast<T>,
<<T as NormalOut<U>>::Output as NormalOut<T>>::Output: CommonBounds + Cast<U>,
<<T as NormalOut<U>>::Output as TypeCommon>::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<U>>::Output as TypeCommon>::Vec> + NormalOut<<T as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<T>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Mul<DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U> + NormalOut<U> + NormalOut<T>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as NormalOut<U>>::Output: CommonBounds + Cast<T>,
<<T as NormalOut<U>>::Output as NormalOut<T>>::Output: CommonBounds + Cast<U>,
<<T as NormalOut<U>>::Output as TypeCommon>::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<U>>::Output as TypeCommon>::Vec> + NormalOut<<T as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<T>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Mul<DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U> + NormalOut<U> + NormalOut<T>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as NormalOut<U>>::Output: CommonBounds + Cast<T>,
<<T as NormalOut<U>>::Output as NormalOut<T>>::Output: CommonBounds + Cast<U>,
<<T as NormalOut<U>>::Output as TypeCommon>::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<U>>::Output as TypeCommon>::Vec> + NormalOut<<T as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<T>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Mul<DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U> + NormalOut<U> + NormalOut<T>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as NormalOut<U>>::Output: CommonBounds + Cast<T>,
<<T as NormalOut<U>>::Output as NormalOut<T>>::Output: CommonBounds + Cast<U>,
<<T as NormalOut<U>>::Output as TypeCommon>::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<U>>::Output as TypeCommon>::Vec> + NormalOut<<T as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<T>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Mul<DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U> + NormalOut<U> + NormalOut<T>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as NormalOut<U>>::Output: CommonBounds + Cast<T>,
<<T as NormalOut<U>>::Output as NormalOut<T>>::Output: CommonBounds + Cast<U>,
<<T as NormalOut<U>>::Output as TypeCommon>::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<U>>::Output as TypeCommon>::Vec> + NormalOut<<T as TypeCommon>::Vec, Output = <<<T as NormalOut<U>>::Output as NormalOut<T>>::Output as TypeCommon>::Vec>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, const DEVICE: usize, Al> Random for DiffTensor<T, Cpu, DEVICE, Al>where
T: CommonBounds + SampleUniform + Float + FloatConst,
<T as SampleUniform>::Sampler: Sync,
StandardNormal: Distribution<T>,
Open01: Distribution<T>,
Exp1: Distribution<T>,
OpenClosed01: Distribution<T>,
Standard: Distribution<T>,
Al: Allocator,
Al::Output: AllocatorOutputRetrive,
impl<T, const DEVICE: usize, Al> Random for DiffTensor<T, Cpu, DEVICE, Al>where
T: CommonBounds + SampleUniform + Float + FloatConst,
<T as SampleUniform>::Sampler: Sync,
StandardNormal: Distribution<T>,
Open01: Distribution<T>,
Exp1: Distribution<T>,
OpenClosed01: Distribution<T>,
Standard: Distribution<T>,
Al: Allocator,
Al::Output: AllocatorOutputRetrive,
Source§fn randn<S: Into<Shape>>(shape: S) -> Result<Self, TensorError>
fn randn<S: Into<Shape>>(shape: S) -> Result<Self, TensorError>
0, standard deviation = 1).Source§fn randn_like(&self) -> Result<Self, TensorError>
fn randn_like(&self) -> Result<Self, TensorError>
0, standard deviation = 1),
with the same shape as the calling instance.Source§fn rand<S: Into<Shape>>(
shape: S,
low: Self::Meta,
high: Self::Meta,
) -> Result<Self, TensorError>
fn rand<S: Into<Shape>>( shape: S, low: Self::Meta, high: Self::Meta, ) -> Result<Self, TensorError>
Source§fn rand_like(
&self,
low: Self::Meta,
high: Self::Meta,
) -> Result<Self, TensorError>
fn rand_like( &self, low: Self::Meta, high: Self::Meta, ) -> Result<Self, TensorError>
Source§fn beta<S: Into<Shape>>(
a: Self::Meta,
b: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn beta<S: Into<Shape>>( a: Self::Meta, b: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn beta_like(&self, a: Self::Meta, b: Self::Meta) -> Result<Self, TensorError>
fn beta_like(&self, a: Self::Meta, b: Self::Meta) -> Result<Self, TensorError>
Source§fn chisquare<S: Into<Shape>>(
df: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn chisquare<S: Into<Shape>>( df: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn chisquare_like(&self, df: Self::Meta) -> Result<Self, TensorError>
fn chisquare_like(&self, df: Self::Meta) -> Result<Self, TensorError>
Source§fn exponential<S: Into<Shape>>(
lambda: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn exponential<S: Into<Shape>>( lambda: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn exponential_like(&self, lambda: Self::Meta) -> Result<Self, TensorError>
fn exponential_like(&self, lambda: Self::Meta) -> Result<Self, TensorError>
Source§fn gamma<S: Into<Shape>>(
gamm_shape: Self::Meta,
scale: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn gamma<S: Into<Shape>>( gamm_shape: Self::Meta, scale: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn gamma_like(
&self,
shape: Self::Meta,
scale: Self::Meta,
) -> Result<Self, TensorError>
fn gamma_like( &self, shape: Self::Meta, scale: Self::Meta, ) -> Result<Self, TensorError>
Source§fn gumbel<S: Into<Shape>>(
mu: Self::Meta,
beta: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn gumbel<S: Into<Shape>>( mu: Self::Meta, beta: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn gumbel_like(
&self,
mu: Self::Meta,
beta: Self::Meta,
) -> Result<Self, TensorError>
fn gumbel_like( &self, mu: Self::Meta, beta: Self::Meta, ) -> Result<Self, TensorError>
Source§fn lognormal<S: Into<Shape>>(
mean: Self::Meta,
std: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn lognormal<S: Into<Shape>>( mean: Self::Meta, std: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn lognormal_like(
&self,
mean: Self::Meta,
std: Self::Meta,
) -> Result<Self, TensorError>
fn lognormal_like( &self, mean: Self::Meta, std: Self::Meta, ) -> Result<Self, TensorError>
Source§fn normal_gaussian<S: Into<Shape>>(
mean: Self::Meta,
std: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn normal_gaussian<S: Into<Shape>>( mean: Self::Meta, std: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn normal_gaussian_like(
&self,
mean: Self::Meta,
std: Self::Meta,
) -> Result<Self, TensorError>
fn normal_gaussian_like( &self, mean: Self::Meta, std: Self::Meta, ) -> Result<Self, TensorError>
Source§fn pareto<S: Into<Shape>>(
pareto_shape: Self::Meta,
a: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn pareto<S: Into<Shape>>( pareto_shape: Self::Meta, a: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn pareto_like(
&self,
pareto_shape: Self::Meta,
a: Self::Meta,
) -> Result<Self, TensorError>
fn pareto_like( &self, pareto_shape: Self::Meta, a: Self::Meta, ) -> Result<Self, TensorError>
Source§fn poisson<S: Into<Shape>>(
lambda: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn poisson<S: Into<Shape>>( lambda: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn poisson_like(&self, lambda: Self::Meta) -> Result<Self, TensorError>
fn poisson_like(&self, lambda: Self::Meta) -> Result<Self, TensorError>
Source§fn weibull<S: Into<Shape>>(
a: Self::Meta,
b: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn weibull<S: Into<Shape>>( a: Self::Meta, b: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn weibull_like(
&self,
a: Self::Meta,
b: Self::Meta,
) -> Result<Self, TensorError>
fn weibull_like( &self, a: Self::Meta, b: Self::Meta, ) -> Result<Self, TensorError>
Source§fn zipf<S: Into<Shape>>(
n: u64,
a: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn zipf<S: Into<Shape>>( n: u64, a: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn zipf_like(&self, n: u64, a: Self::Meta) -> Result<Self, TensorError>
fn zipf_like(&self, n: u64, a: Self::Meta) -> Result<Self, TensorError>
Source§fn triangular<S: Into<Shape>>(
low: Self::Meta,
high: Self::Meta,
mode: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn triangular<S: Into<Shape>>( low: Self::Meta, high: Self::Meta, mode: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn triangular_like(
&self,
low: Self::Meta,
high: Self::Meta,
mode: Self::Meta,
) -> Result<Self, TensorError>
fn triangular_like( &self, low: Self::Meta, high: Self::Meta, mode: Self::Meta, ) -> Result<Self, TensorError>
Source§impl<T, const DEVICE: usize, Al> RandomInt for DiffTensor<T, Cpu, DEVICE, Al>
impl<T, const DEVICE: usize, Al> RandomInt for DiffTensor<T, Cpu, DEVICE, Al>
Source§fn randint<S: Into<Shape>>(
low: Self::Meta,
high: Self::Meta,
shape: S,
) -> Result<Self, TensorError>
fn randint<S: Into<Shape>>( low: Self::Meta, high: Self::Meta, shape: S, ) -> Result<Self, TensorError>
Source§fn randint_like(
&self,
low: Self::Meta,
high: Self::Meta,
) -> Result<Self, TensorError>
fn randint_like( &self, low: Self::Meta, high: Self::Meta, ) -> Result<Self, TensorError>
Source§impl<T, U, const DEVICE: usize, A> Rem<&DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<T> + FloatOutBinary<U> + Cast<<T as NormalOut<U>>::Output> + NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + NormalOutUnary,
<<T as NormalOut<U>>::Output as NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>>::Output: FloatOutUnary + CommonBounds + Cast<U>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Rem<&DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<T> + FloatOutBinary<U> + Cast<<T as NormalOut<U>>::Output> + NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + NormalOutUnary,
<<T as NormalOut<U>>::Output as NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>>::Output: FloatOutUnary + CommonBounds + Cast<U>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Rem<&DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<T> + FloatOutBinary<U> + Cast<<T as NormalOut<U>>::Output> + NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + NormalOutUnary,
<<T as NormalOut<U>>::Output as NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>>::Output: FloatOutUnary + CommonBounds + Cast<U>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Rem<&DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<T> + FloatOutBinary<U> + Cast<<T as NormalOut<U>>::Output> + NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + NormalOutUnary,
<<T as NormalOut<U>>::Output as NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>>::Output: FloatOutUnary + CommonBounds + Cast<U>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Rem<DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<T> + FloatOutBinary<U> + Cast<<T as NormalOut<U>>::Output> + NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + NormalOutUnary,
<<T as NormalOut<U>>::Output as NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>>::Output: FloatOutUnary + CommonBounds + Cast<U>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Rem<DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<T> + FloatOutBinary<U> + Cast<<T as NormalOut<U>>::Output> + NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + NormalOutUnary,
<<T as NormalOut<U>>::Output as NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>>::Output: FloatOutUnary + CommonBounds + Cast<U>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Rem<DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<T> + FloatOutBinary<U> + Cast<<T as NormalOut<U>>::Output> + NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + NormalOutUnary,
<<T as NormalOut<U>>::Output as NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>>::Output: FloatOutUnary + CommonBounds + Cast<U>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Rem<DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<T> + FloatOutBinary<U> + Cast<<T as NormalOut<U>>::Output> + NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output: CommonBounds + NormalOutUnary,
<<T as NormalOut<U>>::Output as NormalOut<<<T as NormalOut<U>>::Output as FloatOutBinary<U>>::Output>>::Output: FloatOutUnary + CommonBounds + Cast<U>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T: CommonBounds, const DEVICE: usize, Al> ShapeManipulate for DiffTensor<T, Cpu, DEVICE, Al>
impl<T: CommonBounds, const DEVICE: usize, Al> ShapeManipulate for DiffTensor<T, Cpu, DEVICE, Al>
Source§type Output = DiffTensor<T, Cpu, DEVICE, Al>
type Output = DiffTensor<T, Cpu, DEVICE, Al>
Source§fn squeeze<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>
fn squeeze<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>
Source§fn unsqueeze<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>
fn unsqueeze<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>
Source§fn reshape<S: Into<Shape>>(&self, shape: S) -> Result<Self::Output, TensorError>
fn reshape<S: Into<Shape>>(&self, shape: S) -> Result<Self::Output, TensorError>
Source§fn transpose(&self, axis1: i64, axis2: i64) -> Result<Self::Output, TensorError>
fn transpose(&self, axis1: i64, axis2: i64) -> Result<Self::Output, TensorError>
Source§fn permute<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>
fn permute<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>
Source§fn permute_inv<A: Into<Axis>>(
&self,
axes: A,
) -> Result<Self::Output, TensorError>
fn permute_inv<A: Into<Axis>>( &self, axes: A, ) -> Result<Self::Output, TensorError>
Source§fn expand<S: Into<Shape>>(&self, shape: S) -> Result<Self::Output, TensorError>
fn expand<S: Into<Shape>>(&self, shape: S) -> Result<Self::Output, TensorError>
Source§fn t(&self) -> Result<Self::Output, TensorError>
fn t(&self) -> Result<Self::Output, TensorError>
Source§fn mt(&self) -> Result<Self::Output, TensorError>
fn mt(&self) -> Result<Self::Output, TensorError>
Source§fn flip<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>
fn flip<A: Into<Axis>>(&self, axes: A) -> Result<Self::Output, TensorError>
Source§fn fliplr(&self) -> Result<Self::Output, TensorError>
fn fliplr(&self) -> Result<Self::Output, TensorError>
Source§fn flipud(&self) -> Result<Self::Output, TensorError>
fn flipud(&self) -> Result<Self::Output, TensorError>
Source§fn tile<S: Into<Axis>>(&self, repeats: S) -> Result<Self::Output, TensorError>
fn tile<S: Into<Axis>>(&self, repeats: S) -> Result<Self::Output, TensorError>
Source§fn trim_zeros(&self, trim: &str) -> Result<Self::Output, TensorError>
fn trim_zeros(&self, trim: &str) -> Result<Self::Output, TensorError>
Source§fn repeat(&self, repeats: usize, axes: i16) -> Result<Self::Output, TensorError>
fn repeat(&self, repeats: usize, axes: i16) -> Result<Self::Output, TensorError>
Source§fn split(
&self,
indices_or_sections: &[i64],
axis: i64,
) -> Result<Vec<Self>, TensorError>
fn split( &self, indices_or_sections: &[i64], axis: i64, ) -> Result<Vec<Self>, TensorError>
Source§fn dsplit(&self, indices: &[i64]) -> Result<Vec<Self>, TensorError>
fn dsplit(&self, indices: &[i64]) -> Result<Vec<Self>, TensorError>
Source§fn hsplit(&self, indices: &[i64]) -> Result<Vec<Self>, TensorError>
fn hsplit(&self, indices: &[i64]) -> Result<Vec<Self>, TensorError>
Source§fn vsplit(&self, indices: &[i64]) -> Result<Vec<Self>, TensorError>
fn vsplit(&self, indices: &[i64]) -> Result<Vec<Self>, TensorError>
Source§impl<T, U, const DEVICE: usize, A> Sub<&DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
Tensor<T, Cpu, DEVICE, A>: Neg<Output = Tensor<T, Cpu, DEVICE, A>>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Sub<&DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
Tensor<T, Cpu, DEVICE, A>: Neg<Output = Tensor<T, Cpu, DEVICE, A>>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Sub<&DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
Tensor<T, Cpu, DEVICE, A>: Neg<Output = Tensor<T, Cpu, DEVICE, A>>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Sub<&DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
Tensor<T, Cpu, DEVICE, A>: Neg<Output = Tensor<T, Cpu, DEVICE, A>>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Sub<DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
Tensor<T, Cpu, DEVICE, A>: Neg<Output = Tensor<T, Cpu, DEVICE, A>>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Sub<DiffTensor<U, Cpu, DEVICE, A>> for &DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
Tensor<T, Cpu, DEVICE, A>: Neg<Output = Tensor<T, Cpu, DEVICE, A>>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, U, const DEVICE: usize, A> Sub<DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
Tensor<T, Cpu, DEVICE, A>: Neg<Output = Tensor<T, Cpu, DEVICE, A>>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
impl<T, U, const DEVICE: usize, A> Sub<DiffTensor<U, Cpu, DEVICE, A>> for DiffTensor<T, Cpu, DEVICE, A>where
T: CommonBounds + NormalOut<U>,
U: CommonBounds,
<T as NormalOut<U>>::Output: CommonBounds + Cast<<T as NormalOut<U>>::Output> + Cast<T> + Cast<U>,
T::Vec: NormalOut<<U as TypeCommon>::Vec, Output = <<T as NormalOut<U>>::Output as TypeCommon>::Vec>,
Tensor<T, Cpu, DEVICE, A>: Neg<Output = Tensor<T, Cpu, DEVICE, A>>,
A: Allocator + 'static + Send + Sync,
A::Output: AllocatorOutputRetrive,
Source§impl<T, C, const DEVICE: usize, Al> TensorCmp<T, C> for DiffTensor<T, Cpu, DEVICE, Al>
impl<T, C, const DEVICE: usize, Al> TensorCmp<T, C> for DiffTensor<T, Cpu, DEVICE, Al>
Source§type RHS = DiffTensor<C, Cpu, DEVICE, Al>
type RHS = DiffTensor<C, Cpu, DEVICE, Al>
Source§type BoolVector = boolx16
type BoolVector = boolx16
Source§fn tensor_neq<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
fn tensor_neq<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
Source§fn tensor_eq<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
fn tensor_eq<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
Source§fn tensor_lt<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
fn tensor_lt<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
Source§fn tensor_gt<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
fn tensor_gt<D>(&self, rhs: D) -> Result<Self::Output, TensorError>
Source§impl<T: CommonBounds, const DEVICE: usize, Al> TensorCreator<T> for DiffTensor<T, Cpu, DEVICE, Al>
impl<T: CommonBounds, const DEVICE: usize, Al> TensorCreator<T> for DiffTensor<T, Cpu, DEVICE, Al>
Source§type Output = DiffTensor<T, Cpu, DEVICE, Al>
type Output = DiffTensor<T, Cpu, DEVICE, Al>
Source§fn empty<S: Into<Shape>>(shape: S) -> Result<Self::Output, TensorError>
fn empty<S: Into<Shape>>(shape: S) -> Result<Self::Output, TensorError>
Source§fn zeros<S: Into<Shape>>(shape: S) -> Result<Self::Output, TensorError>
fn zeros<S: Into<Shape>>(shape: S) -> Result<Self::Output, TensorError>
Source§fn ones<S: Into<Shape>>(shape: S) -> Result<Self::Output, TensorError>
fn ones<S: Into<Shape>>(shape: S) -> Result<Self::Output, TensorError>
Source§fn empty_like(&self) -> Result<Self::Output, TensorError>
fn empty_like(&self) -> Result<Self::Output, TensorError>
Source§fn zeros_like(&self) -> Result<Self::Output, TensorError>
fn zeros_like(&self) -> Result<Self::Output, TensorError>
Source§fn ones_like(&self) -> Result<Self::Output, TensorError>
fn ones_like(&self) -> Result<Self::Output, TensorError>
Source§fn full<S: Into<Shape>>(val: T, shape: S) -> Result<Self::Output, TensorError>
fn full<S: Into<Shape>>(val: T, shape: S) -> Result<Self::Output, TensorError>
Source§fn full_like(&self, val: T) -> Result<Self::Output, TensorError>
fn full_like(&self, val: T) -> Result<Self::Output, TensorError>
Source§fn arange<U>(start: U, end: U) -> Result<Self::Output, TensorError>
fn arange<U>(start: U, end: U) -> Result<Self::Output, TensorError>
Source§fn arange_step(start: T, end: T, step: T) -> Result<Self::Output, TensorError>
fn arange_step(start: T, end: T, step: T) -> Result<Self::Output, TensorError>
Source§fn eye(n: usize, m: usize, k: usize) -> Result<Self::Output, TensorError>
fn eye(n: usize, m: usize, k: usize) -> Result<Self::Output, TensorError>
Source§fn linspace<U>(
start: U,
end: U,
num: usize,
include_end: bool,
) -> Result<Self::Output, TensorError>
fn linspace<U>( start: U, end: U, num: usize, include_end: bool, ) -> Result<Self::Output, TensorError>
Source§fn logspace(
start: T,
end: T,
num: usize,
include_end: bool,
base: T,
) -> Result<Self::Output, TensorError>
fn logspace( start: T, end: T, num: usize, include_end: bool, base: T, ) -> Result<Self::Output, TensorError>
Source§fn geomspace(
start: T,
end: T,
n: usize,
include_end: bool,
) -> Result<Self::Output, TensorError>
fn geomspace( start: T, end: T, n: usize, include_end: bool, ) -> Result<Self::Output, TensorError>
Source§fn tri(
n: usize,
m: usize,
k: i64,
low_triangle: bool,
) -> Result<Self::Output, TensorError>
fn tri( n: usize, m: usize, k: i64, low_triangle: bool, ) -> Result<Self::Output, TensorError>
n by m, with ones below or on the kth diagonal and zeros elsewhere. Read moreSource§fn tril(&self, k: i64) -> Result<Self::Output, TensorError>
fn tril(&self, k: i64) -> Result<Self::Output, TensorError>
kth diagonal set to zero. Read moreAuto Trait Implementations§
impl<T, B, const DEVICE_ID: usize, A> Freeze for DiffTensor<T, B, DEVICE_ID, A>
impl<T, B = Cpu, const DEVICE_ID: usize = 0, A = HptAllocator<B>> !RefUnwindSafe for DiffTensor<T, B, DEVICE_ID, A>
impl<T, B = Cpu, const DEVICE_ID: usize = 0, A = HptAllocator<B>> !Send for DiffTensor<T, B, DEVICE_ID, A>
impl<T, B = Cpu, const DEVICE_ID: usize = 0, A = HptAllocator<B>> !Sync for DiffTensor<T, B, DEVICE_ID, A>
impl<T, B, const DEVICE_ID: usize, A> Unpin for DiffTensor<T, B, DEVICE_ID, A>
impl<T, B = Cpu, const DEVICE_ID: usize = 0, A = HptAllocator<B>> !UnwindSafe for DiffTensor<T, B, DEVICE_ID, A>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more