pub struct Tensor<S: Shape, E, D: Storage<E>, T = NoneTape> { /* private fields */ }
Expand description
The single tensor struct that stores nd arrays and tapes.
See module level documentation on how to create and use tensors.
Generics:
- Shape - the shape of the underlying nd array
- Dtype - the type of the datas stored in the array
- Storage - the device the array is stored on
- Tape - the tape the tensor has
Examples:
// A 1d tensor with 1000 f32 elements, stored on the Cpu
type A = Tensor<Rank1<1000>, f32, Cpu>;
// A 2d tensor with bool elements, stored on the Cpu
type B = Tensor<Rank2<2, 3>, bool, Cpu>;
// A 3d tensor with usize elements, stored on the Cpu, without any tape
type C = Tensor<Rank3<4, 2, 3>, usize, Cpu, NoneTape>;
Implementations§
source§impl<S: Shape, E: Dtype + NumpyDtype, D: CopySlice<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype + NumpyDtype, D: CopySlice<E>, T> Tensor<S, E, D, T>
sourcepub fn write_to_npz<W: Write + Seek>(
&self,
w: &mut ZipWriter<W>,
filename: String
) -> ZipResult<()>
pub fn write_to_npz<W: Write + Seek>( &self, w: &mut ZipWriter<W>, filename: String ) -> ZipResult<()>
Writes data
to a new file in a zip archive named filename
.
sourcepub fn read_from_npz<R: Read + Seek>(
&mut self,
r: &mut ZipArchive<R>,
filename: String
) -> Result<(), NpzError>
pub fn read_from_npz<R: Read + Seek>( &mut self, r: &mut ZipArchive<R>, filename: String ) -> Result<(), NpzError>
Reads data
from a file already in a zip archive named filename
.
source§impl<S: Shape, E: Dtype + SafeDtype, D: CopySlice<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype + SafeDtype, D: CopySlice<E>, T> Tensor<S, E, D, T>
sourcepub fn load_safetensor(
&mut self,
tensors: &SafeTensors<'_>,
key: &str
) -> Result<(), Error>
pub fn load_safetensor( &mut self, tensors: &SafeTensors<'_>, key: &str ) -> Result<(), Error>
Loads data from the SafeTensors Storagekey
source§impl<S: Shape, E, D: CopySlice<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E, D: CopySlice<E>, T> Tensor<S, E, D, T>
source§impl<S: Shape, E, D: Storage<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E, D: Storage<E>, T> Tensor<S, E, D, T>
sourcepub fn to_device<Dst: TensorFromVec<E>>(
&self,
device: &Dst
) -> Tensor<S, E, Dst>
pub fn to_device<Dst: TensorFromVec<E>>( &self, device: &Dst ) -> Tensor<S, E, Dst>
Clones the tensor onto a different device.
sourcepub fn try_to_device<Dst: TensorFromVec<E>>(
&self,
device: &Dst
) -> Result<Tensor<S, E, Dst>, Dst::Err>
pub fn try_to_device<Dst: TensorFromVec<E>>( &self, device: &Dst ) -> Result<Tensor<S, E, Dst>, Dst::Err>
Fallibly clones the tensor onto a different device.
source§impl<S: Shape, E: Dtype, D: ZeroFillStorage<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: ZeroFillStorage<E>, T> Tensor<S, E, D, T>
sourcepub fn fill_with_zeros(&mut self)
pub fn fill_with_zeros(&mut self)
Fills the tensor with zeros
sourcepub fn try_fill_with_zeros(&mut self) -> Result<(), D::Err>
pub fn try_fill_with_zeros(&mut self) -> Result<(), D::Err>
Fallible version of Tensor::fill_with_zeros
source§impl<S: Shape, E: Dtype, D: OneFillStorage<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: OneFillStorage<E>, T> Tensor<S, E, D, T>
sourcepub fn fill_with_ones(&mut self)
pub fn fill_with_ones(&mut self)
Fills the tensor with ones
sourcepub fn try_fill_with_ones(&mut self) -> Result<(), D::Err>
pub fn try_fill_with_ones(&mut self) -> Result<(), D::Err>
Fallible version of Tensor::fill_with_ones
source§impl<S: Shape, E: Unit, D: SampleTensor<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Unit, D: SampleTensor<E>, T> Tensor<S, E, D, T>
sourcepub fn fill_with_distr<Distr: Distribution<E>>(&mut self, distr: Distr)
pub fn fill_with_distr<Distr: Distribution<E>>(&mut self, distr: Distr)
Fills the tensor with random data from the distribution
sourcepub fn try_fill_with_distr<Distr: Distribution<E>>(
&mut self,
distr: Distr
) -> Result<(), D::Err>
pub fn try_fill_with_distr<Distr: Distribution<E>>( &mut self, distr: Distr ) -> Result<(), D::Err>
Fallible version of Tensor::fill_with_distr
source§impl<S: Shape, E: Dtype, D: UnaryKernel<AccurateGeLUKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<AccurateGeLUKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
sourcepub fn accurate_gelu(self) -> Self
pub fn accurate_gelu(self) -> Self
See accurate_gelu
sourcepub fn try_accurate_gelu(self) -> Result<Self, D::Err>
pub fn try_accurate_gelu(self) -> Result<Self, D::Err>
See accurate_gelu
source§impl<S: Shape, E: Dtype, D: AxpyKernel<E>> Tensor<S, E, D>
impl<S: Shape, E: Dtype, D: AxpyKernel<E>> Tensor<S, E, D>
source§impl<S: Shape, E: Dtype, D: BinaryKernel<BCEKernelOp, E>, LTape: Tape<E, D>> Tensor<S, E, D, LTape>
impl<S: Shape, E: Dtype, D: BinaryKernel<BCEKernelOp, E>, LTape: Tape<E, D>> Tensor<S, E, D, LTape>
sourcepub fn bce_with_logits<RTape: Tape<E, D>>(
self,
prob: Tensor<S, E, D, RTape>
) -> Selfwhere
LTape: Merge<RTape>,
pub fn bce_with_logits<RTape: Tape<E, D>>( self, prob: Tensor<S, E, D, RTape> ) -> Selfwhere LTape: Merge<RTape>,
See bce_with_logits
sourcepub fn try_bce_with_logits<RTape>(
self,
prob: Tensor<S, E, D, RTape>
) -> Result<Self, D::Err>where
RTape: Tape<E, D>,
LTape: Merge<RTape>,
pub fn try_bce_with_logits<RTape>( self, prob: Tensor<S, E, D, RTape> ) -> Result<Self, D::Err>where RTape: Tape<E, D>, LTape: Merge<RTape>,
See bce_with_logits
source§impl<S: Shape, E: Dtype, D: UnaryKernel<ClampKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<ClampKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<EqKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<EqKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<NeKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<NeKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<GtKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<GtKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<GeKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<GeKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<LtKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<LtKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<LeKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<LeKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: UnaryKernel<FastGeLUKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<FastGeLUKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
sourcepub fn try_fast_gelu(self) -> Result<Self, D::Err>
pub fn try_fast_gelu(self) -> Result<Self, D::Err>
See fast_gelu
sourcepub fn gelu(self) -> Self
👎Deprecated since 0.12.0: Use Tensor::fast_gelu
instead
pub fn gelu(self) -> Self
Tensor::fast_gelu
insteadUse Tensor::fast_gelu instead
sourcepub fn try_gelu(self) -> Result<Self, D::Err>
👎Deprecated since 0.12.0: Use Tensor::try_fast_gelu
instead
pub fn try_gelu(self) -> Result<Self, D::Err>
Tensor::try_fast_gelu
insteadUse Tensor::try_fast_gelu instead
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T>
sourcepub fn huber_error<R: Tape<E, D>>(
self,
rhs: Tensor<S, E, D, R>,
delta: impl Into<f64>
) -> Selfwhere
T: Merge<R>,
pub fn huber_error<R: Tape<E, D>>( self, rhs: Tensor<S, E, D, R>, delta: impl Into<f64> ) -> Selfwhere T: Merge<R>,
See huber_error
sourcepub fn try_huber_error<R: Tape<E, D>>(
self,
rhs: Tensor<S, E, D, R>,
delta: impl Into<f64>
) -> Result<Self, D::Err>where
T: Merge<R>,
pub fn try_huber_error<R: Tape<E, D>>( self, rhs: Tensor<S, E, D, R>, delta: impl Into<f64> ) -> Result<Self, D::Err>where T: Merge<R>,
See huber_error
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T>
sourcepub fn log_softmax<Ax: Axes>(self) -> Selfwhere
S: ReduceShape<Ax>,
pub fn log_softmax<Ax: Axes>(self) -> Selfwhere S: ReduceShape<Ax>,
See log_softmax()
sourcepub fn try_log_softmax<Ax: Axes>(self) -> Result<Self, D::Err>where
S: ReduceShape<Ax>,
pub fn try_log_softmax<Ax: Axes>(self) -> Result<Self, D::Err>where S: ReduceShape<Ax>,
See log_softmax()
source§impl<S: Shape, E: Dtype, D: UnaryKernel<NansToKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<NansToKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: UnaryKernel<NegateKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<NegateKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: UnaryKernel<PowfKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<PowfKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: UnaryKernel<SigmoidKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<SigmoidKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E: Unit, D: SliceKernel<E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Unit, D: SliceKernel<E>, T: Tape<E, D>> Tensor<S, E, D, T>
sourcepub fn try_slice<Slice>(
self,
slice: Slice
) -> Result<Tensor<S::Sliced, E, D, T>, D::Err>where
S: SliceShape<Slice>,
Slice: 'static,
pub fn try_slice<Slice>( self, slice: Slice ) -> Result<Tensor<S::Sliced, E, D, T>, D::Err>where S: SliceShape<Slice>, Slice: 'static,
Fallible version of Tensor::slice
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T>
sourcepub fn softmax<Ax: Axes>(self) -> Selfwhere
S: ReduceShape<Ax>,
pub fn softmax<Ax: Axes>(self) -> Selfwhere S: ReduceShape<Ax>,
See softmax()
sourcepub fn try_softmax<Ax: Axes>(self) -> Result<Self, D::Err>where
S: ReduceShape<Ax>,
pub fn try_softmax<Ax: Axes>(self) -> Result<Self, D::Err>where S: ReduceShape<Ax>,
See softmax()
source§impl<S: Shape, E: Dtype, D: UnaryKernel<SquareKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<SquareKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: TriangleTensor<E>, T: Tape<E, D>> Tensor<S, E, D, T>where
Self: TryMul<Tensor<S, E, D>> + HasErr<Err = D::Err>,
impl<S: Shape, E: Dtype, D: TriangleTensor<E>, T: Tape<E, D>> Tensor<S, E, D, T>where Self: TryMul<Tensor<S, E, D>> + HasErr<Err = D::Err>,
Trait Implementations§
source§impl<S: Shape, E: Dtype, D: Storage<E>, LhsTape: Tape<E, D>, Rhs> Add<Rhs> for Tensor<S, E, D, LhsTape>where
Self: TryAdd<Rhs>,
impl<S: Shape, E: Dtype, D: Storage<E>, LhsTape: Tape<E, D>, Rhs> Add<Rhs> for Tensor<S, E, D, LhsTape>where Self: TryAdd<Rhs>,
source§impl<E: 'static + Clone, D: OneFillStorage<E>> Backward<E, D> for Tensor<Rank0, E, D, OwnedTape<E, D>>
impl<E: 'static + Clone, D: OneFillStorage<E>> Backward<E, D> for Tensor<Rank0, E, D, OwnedTape<E, D>>
source§impl<S: Shape, D: BooleanKernel> BitAnd<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
impl<S: Shape, D: BooleanKernel> BitAnd<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
source§impl<S: Shape, D: BooleanKernel> BitOr<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
impl<S: Shape, D: BooleanKernel> BitOr<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
source§impl<S: Shape, D: BooleanKernel> BitXor<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
impl<S: Shape, D: BooleanKernel> BitXor<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
source§impl<S: Shape, E, D: Storage<E>, T: Tape<E, D>> BroadcastTo for Tensor<S, E, D, T>
impl<S: Shape, E, D: Storage<E>, T: Tape<E, D>> BroadcastTo for Tensor<S, E, D, T>
source§fn try_broadcast_like<Dst: HasShape, Ax: Axes>(
self,
dst: &Dst
) -> Result<Self::WithShape<Dst::Shape>, Self::Err>where
Self::Shape: BroadcastShapeTo<Dst::Shape, Ax>,
fn try_broadcast_like<Dst: HasShape, Ax: Axes>( self, dst: &Dst ) -> Result<Self::WithShape<Dst::Shape>, Self::Err>where Self::Shape: BroadcastShapeTo<Dst::Shape, Ax>,
source§fn broadcast<Dst: ConstShape, Ax: Axes>(self) -> Self::WithShape<Dst>where
Self::Shape: BroadcastShapeTo<Dst, Ax>,
fn broadcast<Dst: ConstShape, Ax: Axes>(self) -> Self::WithShape<Dst>where Self::Shape: BroadcastShapeTo<Dst, Ax>,
Dst
along axes Ax
.source§fn try_broadcast<Dst: ConstShape, Ax: Axes>(
self
) -> Result<Self::WithShape<Dst>, Self::Err>where
Self::Shape: BroadcastShapeTo<Dst, Ax>,
fn try_broadcast<Dst: ConstShape, Ax: Axes>( self ) -> Result<Self::WithShape<Dst>, Self::Err>where Self::Shape: BroadcastShapeTo<Dst, Ax>,
source§fn broadcast_like<Dst: HasShape, Ax: Axes>(
self,
dst: &Dst
) -> Self::WithShape<Dst::Shape>where
Self::Shape: BroadcastShapeTo<Dst::Shape, Ax>,
fn broadcast_like<Dst: HasShape, Ax: Axes>( self, dst: &Dst ) -> Self::WithShape<Dst::Shape>where Self::Shape: BroadcastShapeTo<Dst::Shape, Ax>,
source§impl<S: Shape, E: Dtype, D: ChooseKernel<E>, LhsTape: Tape<E, D> + Merge<RhsTape>, RhsTape: Tape<E, D>> ChooseFrom<Tensor<S, E, D, LhsTape>, Tensor<S, E, D, RhsTape>> for Tensor<S, bool, D>
impl<S: Shape, E: Dtype, D: ChooseKernel<E>, LhsTape: Tape<E, D> + Merge<RhsTape>, RhsTape: Tape<E, D>> ChooseFrom<Tensor<S, E, D, LhsTape>, Tensor<S, E, D, RhsTape>> for Tensor<S, bool, D>
source§impl<S: Clone + Shape, E: Clone, D: Clone + Storage<E>, T: Clone> Clone for Tensor<S, E, D, T>where
D::Vec: Clone,
S::Concrete: Clone,
impl<S: Clone + Shape, E: Clone, D: Clone + Storage<E>, T: Clone> Clone for Tensor<S, E, D, T>where D::Vec: Clone, S::Concrete: Clone,
source§impl<S: Debug + Shape, E: Debug, D: Debug + Storage<E>, T: Debug> Debug for Tensor<S, E, D, T>where
D::Vec: Debug,
S::Concrete: Debug,
impl<S: Debug + Shape, E: Debug, D: Debug + Storage<E>, T: Debug> Debug for Tensor<S, E, D, T>where D::Vec: Debug, S::Concrete: Debug,
source§impl<S: Shape, E: Dtype, D: Storage<E>, LhsTape: Tape<E, D>, Rhs> Div<Rhs> for Tensor<S, E, D, LhsTape>where
Self: TryDiv<Rhs>,
impl<S: Shape, E: Dtype, D: Storage<E>, LhsTape: Tape<E, D>, Rhs> Div<Rhs> for Tensor<S, E, D, LhsTape>where Self: TryDiv<Rhs>,
source§impl<Src: Shape, E: Dtype, D: ReplaceDimKernel<E>, T: Tape<E, D>> GatherTo<E, D> for Tensor<Src, E, D, T>
impl<Src: Shape, E: Dtype, D: ReplaceDimKernel<E>, T: Tape<E, D>> GatherTo<E, D> for Tensor<Src, E, D, T>
source§impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> GenericUpscale2D<M> for Tensor<(B, C, H, W), E, D, T>
impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> GenericUpscale2D<M> for Tensor<(B, C, H, W), E, D, T>
source§impl<C: Dim, H: Dim, W: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> GenericUpscale2D<M> for Tensor<(C, H, W), E, D, T>
impl<C: Dim, H: Dim, W: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> GenericUpscale2D<M> for Tensor<(C, H, W), E, D, T>
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> LogSumExpTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> LogSumExpTo for Tensor<S, E, D, T>
source§fn try_logsumexp<Dst: Shape, Ax: Axes>(
self
) -> Result<Self::WithShape<Dst>, Self::Err>where
Self::Shape: ReduceShapeTo<Dst, Ax>,
fn try_logsumexp<Dst: Shape, Ax: Axes>( self ) -> Result<Self::WithShape<Dst>, Self::Err>where Self::Shape: ReduceShapeTo<Dst, Ax>,
source§impl<S: Shape, E: Dtype, D: MaxReduceKernel<E>, T: Tape<E, D>> MaxTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: MaxReduceKernel<E>, T: Tape<E, D>> MaxTo for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> MeanTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> MeanTo for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: MinReduceKernel<E>, T: Tape<E, D>> MinTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: MinReduceKernel<E>, T: Tape<E, D>> MinTo for Tensor<S, E, D, T>
source§impl<E: Dtype, D: Device<E>, T: Tape<E, D> + Merge<NoneTape>, B: ConstDim, C: ConstDim> Module<Tensor<(B, C), E, D, T>> for PReLU1D<C, E, D>where
(B, C): ReduceShapeTo<(C,), Axis<0>>,
Tensor<(B, C), E, D, T>: TryPReLU<Tensor<(B, C), E, D, NoneTape>>,
impl<E: Dtype, D: Device<E>, T: Tape<E, D> + Merge<NoneTape>, B: ConstDim, C: ConstDim> Module<Tensor<(B, C), E, D, T>> for PReLU1D<C, E, D>where (B, C): ReduceShapeTo<(C,), Axis<0>>, Tensor<(B, C), E, D, T>: TryPReLU<Tensor<(B, C), E, D, NoneTape>>,
source§impl<const H: usize, const W: usize, const IH: usize, const IW: usize, B: Dim, C: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> Module<Tensor<(B, C, Const<IH>, Const<IW>), E, D, T>> for Upscale2DBy<H, W, M>where
Tensor<(B, C, Const<{ _ }>, Const<{ _ }>), E, D, T>: Sized,
impl<const H: usize, const W: usize, const IH: usize, const IW: usize, B: Dim, C: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> Module<Tensor<(B, C, Const<IH>, Const<IW>), E, D, T>> for Upscale2DBy<H, W, M>where Tensor<(B, C, Const<{ _ }>, Const<{ _ }>), E, D, T>: Sized,
§type Output = Tensor<(B, C, Const<{ IH * H }>, Const<{ IW * W }>), E, D, T>
type Output = Tensor<(B, C, Const<{ IH * H }>, Const<{ IW * W }>), E, D, T>
Input
.type Error = <<Upscale2DBy<H, W, M> as Module<Tensor<(B, C, Const<IH>, Const<IW>), E, D, T>>>::Output as HasErr>::Err
fn try_forward( &self, x: Tensor<(B, C, Const<IH>, Const<IW>), E, D, T> ) -> Result<Self::Output, Self::Error>
source§impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for AvgPoolGlobal
impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for AvgPoolGlobal
source§impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for MaxPoolGlobal
impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for MaxPoolGlobal
source§impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for MinPoolGlobal
impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for MinPoolGlobal
source§impl<E: Dtype, D: Device<E>, T: Tape<E, D> + Merge<NoneTape>, B: ConstDim, C: ConstDim, M: ConstDim> Module<Tensor<(B, C, M), E, D, T>> for PReLU1D<C, E, D>where
(B, C, M): ReduceShapeTo<(C,), Axes2<0, 2>>,
Tensor<(B, C, M), E, D, T>: TryPReLU<Tensor<(B, C, M), E, D, NoneTape>>,
impl<E: Dtype, D: Device<E>, T: Tape<E, D> + Merge<NoneTape>, B: ConstDim, C: ConstDim, M: ConstDim> Module<Tensor<(B, C, M), E, D, T>> for PReLU1D<C, E, D>where (B, C, M): ReduceShapeTo<(C,), Axes2<0, 2>>, Tensor<(B, C, M), E, D, T>: TryPReLU<Tensor<(B, C, M), E, D, NoneTape>>,
source§impl<E: Dtype, D: Device<E>, T: Tape<E, D> + Merge<NoneTape>, B: ConstDim, C: ConstDim, M: ConstDim, N: ConstDim> Module<Tensor<(B, C, M, N), E, D, T>> for PReLU1D<C, E, D>where
(B, C, M, N): ReduceShapeTo<(C,), Axes3<0, 2, 3>>,
Tensor<(B, C, M, N), E, D, T>: TryPReLU<Tensor<(B, C, M, N), E, D, NoneTape>>,
impl<E: Dtype, D: Device<E>, T: Tape<E, D> + Merge<NoneTape>, B: ConstDim, C: ConstDim, M: ConstDim, N: ConstDim> Module<Tensor<(B, C, M, N), E, D, T>> for PReLU1D<C, E, D>where (B, C, M, N): ReduceShapeTo<(C,), Axes3<0, 2, 3>>, Tensor<(B, C, M, N), E, D, T>: TryPReLU<Tensor<(B, C, M, N), E, D, NoneTape>>,
§type Output = Tensor<(B, C, M, N), E, D, T>
type Output = Tensor<(B, C, M, N), E, D, T>
Input
.type Error = <Tensor<(B, C, M, N), E, D, T> as HasErr>::Err
fn try_forward( &self, input: Tensor<(B, C, M, N), E, D, T> ) -> Result<Self::Output, Self::Error>
source§impl<const H: usize, const W: usize, B: Dim, C: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> Module<Tensor<(B, C, usize, usize), E, D, T>> for Upscale2DBy<H, W, M>where
Tensor<(B, C, usize, usize), E, D, T>: Sized,
impl<const H: usize, const W: usize, B: Dim, C: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> Module<Tensor<(B, C, usize, usize), E, D, T>> for Upscale2DBy<H, W, M>where Tensor<(B, C, usize, usize), E, D, T>: Sized,
source§impl<B: Dim, const C: usize, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>), E, D, NoneTape>> for BatchNorm1D<C, E, D>
impl<B: Dim, const C: usize, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>), E, D, NoneTape>> for BatchNorm1D<C, E, D>
source§fn try_forward(
&self,
x: Tensor<(B, Const<C>), E, D, NoneTape>
) -> Result<Self::Output, D::Err>
fn try_forward( &self, x: Tensor<(B, Const<C>), E, D, NoneTape> ) -> Result<Self::Output, D::Err>
Inference 1d forward - does not update Self::running_mean and Self::running_var
type Error = <D as HasErr>::Err
source§impl<B: Dim, const C: usize, const H: usize, const W: usize, D, E: Dtype, T> Module<Tensor<(B, Const<C>, Const<H>, Const<W>), E, D, T>> for Flatten2Dwhere
D: Device<E>,
T: Tape<E, D>,
(B, Const<{ _ }>): Sized,
impl<B: Dim, const C: usize, const H: usize, const W: usize, D, E: Dtype, T> Module<Tensor<(B, Const<C>, Const<H>, Const<W>), E, D, T>> for Flatten2Dwhere D: Device<E>, T: Tape<E, D>, (B, Const<{ _ }>): Sized,
source§impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>, H, W), E, D, NoneTape>> for BatchNorm2D<C, E, D>
impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>, H, W), E, D, NoneTape>> for BatchNorm2D<C, E, D>
source§fn try_forward(
&self,
x: Tensor<(B, Const<C>, H, W), E, D, NoneTape>
) -> Result<Self::Output, D::Err>
fn try_forward( &self, x: Tensor<(B, Const<C>, H, W), E, D, NoneTape> ) -> Result<Self::Output, D::Err>
Inference 4d forward - does not update Self::running_mean and Self::running_var
§type Output = Tensor<(B, Const<C>, H, W), E, D, NoneTape>
type Output = Tensor<(B, Const<C>, H, W), E, D, NoneTape>
Input
.type Error = <D as HasErr>::Err
source§impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, Const<C>, H, W), E, D, T>> for Bias2D<C, E, D>
impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, Const<C>, H, W), E, D, T>> for Bias2D<C, E, D>
source§impl<B: Dim, const C: usize, L: Dim, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>, L), E, D, NoneTape>> for BatchNorm1D<C, E, D>
impl<B: Dim, const C: usize, L: Dim, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>, L), E, D, NoneTape>> for BatchNorm1D<C, E, D>
source§fn try_forward(
&self,
x: Tensor<(B, Const<C>, L), E, D, NoneTape>
) -> Result<Self::Output, D::Err>
fn try_forward( &self, x: Tensor<(B, Const<C>, L), E, D, NoneTape> ) -> Result<Self::Output, D::Err>
Inference 2d forward - does not update Self::running_mean and Self::running_var
§type Output = Tensor<(B, Const<C>, L), E, D, NoneTape>
type Output = Tensor<(B, Const<C>, L), E, D, NoneTape>
Input
.type Error = <D as HasErr>::Err
source§impl<B: Dim, const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, Const<M>), E, D, T>> for LayerNorm1D<M, E, D>
impl<B: Dim, const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, Const<M>), E, D, T>> for LayerNorm1D<M, E, D>
source§impl<B: Dim, S: Dim, const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, S, Const<M>), E, D, T>> for LayerNorm1D<M, E, D>
impl<B: Dim, S: Dim, const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, S, Const<M>), E, D, T>> for LayerNorm1D<M, E, D>
source§impl<const VOCAB: usize, const DIM: usize, BATCH: Dim, SEQ: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(BATCH, SEQ), usize, D, T>> for Embedding<VOCAB, DIM, E, D>
impl<const VOCAB: usize, const DIM: usize, BATCH: Dim, SEQ: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(BATCH, SEQ), usize, D, T>> for Embedding<VOCAB, DIM, E, D>
source§impl<C: ConstDim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C,), E, D, T>> for PReLU1D<C, E, D>where
Tensor<(C,), E, D, T>: TryPReLU<Tensor<(C,), E, D, NoneTape>>,
impl<C: ConstDim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C,), E, D, T>> for PReLU1D<C, E, D>where Tensor<(C,), E, D, T>: TryPReLU<Tensor<(C,), E, D, NoneTape>>,
source§impl<const H: usize, const W: usize, const IH: usize, const IW: usize, C: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> Module<Tensor<(C, Const<IH>, Const<IW>), E, D, T>> for Upscale2DBy<H, W, M>where
Tensor<(C, Const<{ _ }>, Const<{ _ }>), E, D, T>: Sized,
impl<const H: usize, const W: usize, const IH: usize, const IW: usize, C: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> Module<Tensor<(C, Const<IH>, Const<IW>), E, D, T>> for Upscale2DBy<H, W, M>where Tensor<(C, Const<{ _ }>, Const<{ _ }>), E, D, T>: Sized,
§type Output = Tensor<(C, Const<{ IH * H }>, Const<{ IW * W }>), E, D, T>
type Output = Tensor<(C, Const<{ IH * H }>, Const<{ IW * W }>), E, D, T>
Input
.type Error = <<Upscale2DBy<H, W, M> as Module<Tensor<(C, Const<IH>, Const<IW>), E, D, T>>>::Output as HasErr>::Err
fn try_forward( &self, x: Tensor<(C, Const<IH>, Const<IW>), E, D, T> ) -> Result<Self::Output, Self::Error>
source§impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for AvgPoolGlobal
impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for AvgPoolGlobal
source§impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for MaxPoolGlobal
impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for MaxPoolGlobal
source§impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for MinPoolGlobal
impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for MinPoolGlobal
source§impl<const H: usize, const W: usize, C: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> Module<Tensor<(C, usize, usize), E, D, T>> for Upscale2DBy<H, W, M>
impl<const H: usize, const W: usize, C: Dim, E: Dtype, M: UpscaleMethod, D: Upscale2DKernel<E, M> + ZerosTensor<E>, T: 'static + Tape<E, D>> Module<Tensor<(C, usize, usize), E, D, T>> for Upscale2DBy<H, W, M>
source§impl<const C: usize, const H: usize, const W: usize, D: Device<E>, E: Dtype, T: Tape<E, D>> Module<Tensor<(Const<C>, Const<H>, Const<W>), E, D, T>> for Flatten2Dwhere
Rank1<{ _ }>: Sized,
impl<const C: usize, const H: usize, const W: usize, D: Device<E>, E: Dtype, T: Tape<E, D>> Module<Tensor<(Const<C>, Const<H>, Const<W>), E, D, T>> for Flatten2Dwhere Rank1<{ _ }>: Sized,
source§impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> Module<Tensor<(Const<C>, H, W), E, D, NoneTape>> for BatchNorm2D<C, E, D>
impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> Module<Tensor<(Const<C>, H, W), E, D, NoneTape>> for BatchNorm2D<C, E, D>
source§fn try_forward(
&self,
x: Tensor<(Const<C>, H, W), E, D, NoneTape>
) -> Result<Self::Output, D::Err>
fn try_forward( &self, x: Tensor<(Const<C>, H, W), E, D, NoneTape> ) -> Result<Self::Output, D::Err>
Inference 3d forward - does not update Self::running_mean and Self::running_var
§type Output = Tensor<(Const<C>, H, W), E, D, NoneTape>
type Output = Tensor<(Const<C>, H, W), E, D, NoneTape>
Input
.type Error = <D as HasErr>::Err
source§impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(Const<C>, H, W), E, D, T>> for Bias2D<C, E, D>
impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(Const<C>, H, W), E, D, T>> for Bias2D<C, E, D>
source§impl<const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(Const<M>,), E, D, T>> for LayerNorm1D<M, E, D>
impl<const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(Const<M>,), E, D, T>> for LayerNorm1D<M, E, D>
source§impl<const V: usize, const M: usize, SEQ: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(SEQ,), usize, D, T>> for Embedding<V, M, E, D>
impl<const V: usize, const M: usize, SEQ: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(SEQ,), usize, D, T>> for Embedding<V, M, E, D>
source§impl<S: Shape, E: Dtype, D: Device<E>> Module<Tensor<S, E, D, NoneTape>> for Dropout
impl<S: Shape, E: Dtype, D: Device<E>> Module<Tensor<S, E, D, NoneTape>> for Dropout
source§impl<const N: usize, S: Shape, E: Dtype, D: Device<E>> Module<Tensor<S, E, D, NoneTape>> for DropoutOneIn<N>
impl<const N: usize, S: Shape, E: Dtype, D: Device<E>> Module<Tensor<S, E, D, NoneTape>> for DropoutOneIn<N>
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Abs
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Abs
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for AccurateGeLU
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for AccurateGeLU
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Cos
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Cos
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Exp
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Exp
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for FastGeLU
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for FastGeLU
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for GeLU
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for GeLU
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for LeakyReLU<E>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for LeakyReLU<E>
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Ln
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Ln
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for LogSoftmax
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for LogSoftmax
source§impl<S: ConstShape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for PReLU<E, D>where
Tensor<S, E, D, T>: TryPReLU<Tensor<S, E, D, NoneTape>>,
impl<S: ConstShape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for PReLU<E, D>where Tensor<S, E, D, T>: TryPReLU<Tensor<S, E, D, NoneTape>>,
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for ReLU
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for ReLU
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sigmoid
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sigmoid
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sin
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sin
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Softmax
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Softmax
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sqrt
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sqrt
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Square
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Square
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Tanh
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Tanh
source§impl<Src: Shape, Dst: ConstShape, D: Device<E>, E: Dtype, T: Tape<E, D>> Module<Tensor<Src, E, D, T>> for Reshape<Dst>
impl<Src: Shape, Dst: ConstShape, D: Device<E>, E: Dtype, T: Tape<E, D>> Module<Tensor<Src, E, D, T>> for Reshape<Dst>
source§impl<B: Dim, const C: usize, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>), E, D, OwnedTape<E, D>>> for BatchNorm1D<C, E, D>
impl<B: Dim, const C: usize, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>), E, D, OwnedTape<E, D>>> for BatchNorm1D<C, E, D>
source§fn try_forward_mut(
&mut self,
x: Tensor<(B, Const<C>), E, D, OwnedTape<E, D>>
) -> Result<Self::Output, D::Err>
fn try_forward_mut( &mut self, x: Tensor<(B, Const<C>), E, D, OwnedTape<E, D>> ) -> Result<Self::Output, D::Err>
Training 2d forward - updates Self::running_mean and Self::running_var
§type Output = Tensor<(B, Const<C>), E, D, OwnedTape<E, D>>
type Output = Tensor<(B, Const<C>), E, D, OwnedTape<E, D>>
Input
.type Error = <D as HasErr>::Err
source§fn forward_mut(&mut self, input: Input) -> Self::Output
fn forward_mut(&mut self, input: Input) -> Self::Output
source§impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>>> for BatchNorm2D<C, E, D>
impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>>> for BatchNorm2D<C, E, D>
source§fn try_forward_mut(
&mut self,
x: Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>>
) -> Result<Self::Output, D::Err>
fn try_forward_mut( &mut self, x: Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>> ) -> Result<Self::Output, D::Err>
Training 4d forward - updates Self::running_mean and Self::running_var
§type Output = Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>>
type Output = Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>>
Input
.type Error = <D as HasErr>::Err
source§fn forward_mut(&mut self, input: Input) -> Self::Output
fn forward_mut(&mut self, input: Input) -> Self::Output
source§impl<B: Dim, const C: usize, L: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>>> for BatchNorm1D<C, E, D>
impl<B: Dim, const C: usize, L: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>>> for BatchNorm1D<C, E, D>
source§fn try_forward_mut(
&mut self,
x: Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>>
) -> Result<Self::Output, D::Err>
fn try_forward_mut( &mut self, x: Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>> ) -> Result<Self::Output, D::Err>
Training 1d forward - updates Self::running_mean and Self::running_var
§type Output = Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>>
type Output = Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>>
Input
.type Error = <D as HasErr>::Err
source§fn forward_mut(&mut self, input: Input) -> Self::Output
fn forward_mut(&mut self, input: Input) -> Self::Output
source§impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>>> for BatchNorm2D<C, E, D>
impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>>> for BatchNorm2D<C, E, D>
source§fn try_forward_mut(
&mut self,
x: Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>>
) -> Result<Self::Output, D::Err>
fn try_forward_mut( &mut self, x: Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>> ) -> Result<Self::Output, D::Err>
Training 3d forward - updates Self::running_mean and Self::running_var
§type Output = Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>>
type Output = Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>>
Input
.type Error = <D as HasErr>::Err
source§fn forward_mut(&mut self, input: Input) -> Self::Output
fn forward_mut(&mut self, input: Input) -> Self::Output
source§impl<S: Shape, E: Dtype, D: Device<E>> ModuleMut<Tensor<S, E, D, OwnedTape<E, D>>> for Dropout
impl<S: Shape, E: Dtype, D: Device<E>> ModuleMut<Tensor<S, E, D, OwnedTape<E, D>>> for Dropout
source§impl<const N: usize, S: Shape, E: Dtype, D: Device<E>> ModuleMut<Tensor<S, E, D, OwnedTape<E, D>>> for DropoutOneIn<N>
impl<const N: usize, S: Shape, E: Dtype, D: Device<E>> ModuleMut<Tensor<S, E, D, OwnedTape<E, D>>> for DropoutOneIn<N>
source§impl<S: Shape, E: Dtype, D: Storage<E>, LhsTape: Tape<E, D>, Rhs> Mul<Rhs> for Tensor<S, E, D, LhsTape>where
Self: TryMul<Rhs>,
impl<S: Shape, E: Dtype, D: Storage<E>, LhsTape: Tape<E, D>, Rhs> Mul<Rhs> for Tensor<S, E, D, LhsTape>where Self: TryMul<Rhs>,
source§impl<S: Shape, E: Dtype, D: UnaryKernel<NegateKernelOp, E>, T: Tape<E, D>> Neg for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<NegateKernelOp, E>, T: Tape<E, D>> Neg for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: Storage<E>, T: Tape<E, D>> PermuteTo for Tensor<S, E, D, T>
impl<S: Shape, E, D: Storage<E>, T: Tape<E, D>> PermuteTo for Tensor<S, E, D, T>
source§fn try_permute<Dst: Shape, Ax: Axes>(
self
) -> Result<Self::WithShape<Dst>, Self::Err>where
Self::Shape: PermuteShapeTo<Dst, Ax>,
fn try_permute<Dst: Shape, Ax: Axes>( self ) -> Result<Self::WithShape<Dst>, Self::Err>where Self::Shape: PermuteShapeTo<Dst, Ax>,
source§impl<S: Shape, E, D: Storage<E>, T: Tape<E, D>> RealizeTo for Tensor<S, E, D, T>
impl<S: Shape, E, D: Storage<E>, T: Tape<E, D>> RealizeTo for Tensor<S, E, D, T>
source§fn try_realize<Dst: Shape<Concrete = S::Concrete>>(
self
) -> Result<Self::WithShape<Dst>, Self>where
Self::Shape: RealizeShapeTo<Dst>,
fn try_realize<Dst: Shape<Concrete = S::Concrete>>( self ) -> Result<Self::WithShape<Dst>, Self>where Self::Shape: RealizeShapeTo<Dst>,
source§impl<S: Shape, E: Dtype, D: ReshapeKernel<E>, T: Tape<E, D>> ReshapeTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: ReshapeKernel<E>, T: Tape<E, D>> ReshapeTo for Tensor<S, E, D, T>
source§fn try_reshape_like<Dst: Shape>(
self,
dst: &Dst
) -> Result<Self::WithShape<Dst>, Self::Err>
fn try_reshape_like<Dst: Shape>( self, dst: &Dst ) -> Result<Self::WithShape<Dst>, Self::Err>
source§fn reshape_like<Dst: Shape>(self, dst: &Dst) -> Self::WithShape<Dst>
fn reshape_like<Dst: Shape>(self, dst: &Dst) -> Self::WithShape<Dst>
source§fn contiguous(self) -> Self::WithShape<Self::Shape>
fn contiguous(self) -> Self::WithShape<Self::Shape>
source§impl<S: Shape, E: Dtype, D: RollKernel<E>, T: Tape<E, D>> Roll for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: RollKernel<E>, T: Tape<E, D>> Roll for Tensor<S, E, D, T>
source§impl<Src: Shape, E: Dtype, D: RemoveDimKernel<E>, T: Tape<E, D>> SelectTo<E, D> for Tensor<Src, E, D, T>
impl<Src: Shape, E: Dtype, D: RemoveDimKernel<E>, T: Tape<E, D>> SelectTo<E, D> for Tensor<Src, E, D, T>
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> StddevTo<E> for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> StddevTo<E> for Tensor<S, E, D, T>
source§fn try_stddev<Dst: Shape, Ax: Axes>(
self,
epsilon: impl Into<f64>
) -> Result<Self::WithShape<Dst>, Self::Err>where
Self::Shape: HasAxes<Ax> + ReduceShapeTo<Dst, Ax>,
fn try_stddev<Dst: Shape, Ax: Axes>( self, epsilon: impl Into<f64> ) -> Result<Self::WithShape<Dst>, Self::Err>where Self::Shape: HasAxes<Ax> + ReduceShapeTo<Dst, Ax>,
source§impl<S: Shape, E: Dtype, D: Storage<E>, LTape: Tape<E, D>, Rhs> Sub<Rhs> for Tensor<S, E, D, LTape>where
Self: TrySub<Rhs>,
impl<S: Shape, E: Dtype, D: Storage<E>, LTape: Tape<E, D>, Rhs> Sub<Rhs> for Tensor<S, E, D, LTape>where Self: TrySub<Rhs>,
source§impl<S: Shape, E: Dtype, D: SumKernel<E>, T: Tape<E, D>> SumTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: SumKernel<E>, T: Tape<E, D>> SumTo for Tensor<S, E, D, T>
source§impl<S: ConstShape, E: Dtype, D: Device<E>> TensorCollection<E, D> for Tensor<S, E, D>
impl<S: ConstShape, E: Dtype, D: Device<E>> TensorCollection<E, D> for Tensor<S, E, D>
§type To<E2: Dtype, D2: Device<E2>> = Tensor<S, E2, D2, NoneTape>
type To<E2: Dtype, D2: Device<E2>> = Tensor<S, E2, D2, NoneTape>
source§fn iter_tensors<V: ModuleVisitor<Self, E, D>>(
visitor: &mut V
) -> Result<Option<Self::To<V::E2, V::D2>>, V::Err>
fn iter_tensors<V: ModuleVisitor<Self, E, D>>( visitor: &mut V ) -> Result<Option<Self::To<V::E2, V::D2>>, V::Err>
Err(_)
to indicate an error,
Ok(None)
to indicate that there is no error and a module has not been built, and
Ok(Some(_))
contains Self::Output<E2, D2>
source§fn module<F1, F2, Field>(
name: &str,
get_ref: F1,
get_mut: F2
) -> ModuleField<'_, F1, F2, Self, Field>where
F1: FnMut(&Self) -> &Field,
F2: FnMut(&mut Self) -> &mut Field,
Field: TensorCollection<E, D>,
fn module<F1, F2, Field>( name: &str, get_ref: F1, get_mut: F2 ) -> ModuleField<'_, F1, F2, Self, Field>where F1: FnMut(&Self) -> &Field, F2: FnMut(&mut Self) -> &mut Field, Field: TensorCollection<E, D>,
source§fn tensor<F1, F2, S>(
name: &str,
get_ref: F1,
get_mut: F2,
options: TensorOptions<S, E, D>
) -> TensorField<'_, F1, F2, Self, S, E, D>where
F1: FnMut(&Self) -> &Tensor<S, E, D>,
F2: FnMut(&mut Self) -> &mut Tensor<S, E, D>,
S: Shape,
fn tensor<F1, F2, S>( name: &str, get_ref: F1, get_mut: F2, options: TensorOptions<S, E, D> ) -> TensorField<'_, F1, F2, Self, S, E, D>where F1: FnMut(&Self) -> &Tensor<S, E, D>, F2: FnMut(&mut Self) -> &mut Tensor<S, E, D>, S: Shape,
source§fn scalar<F1, F2, N>(
name: &str,
get_ref: F1,
get_mut: F2,
options: ScalarOptions<N>
) -> ScalarField<'_, F1, F2, Self, N>where
F1: FnMut(&Self) -> &N,
F2: FnMut(&mut Self) -> &mut N,
N: NumCast,
fn scalar<F1, F2, N>( name: &str, get_ref: F1, get_mut: F2, options: ScalarOptions<N> ) -> ScalarField<'_, F1, F2, Self, N>where F1: FnMut(&Self) -> &N, F2: FnMut(&mut Self) -> &mut N, N: NumCast,
source§impl<S: Shape, E: Unit, F: Unit, D: Storage<F> + Storage<E>> Trace<E, D> for Tensor<S, F, D, NoneTape>
impl<S: Shape, E: Unit, F: Unit, D: Storage<F> + Storage<E>> Trace<E, D> for Tensor<S, F, D, NoneTape>
type Traced = Tensor<S, F, D, OwnedTape<E, D>>
source§fn leaky_traced(self) -> Self::Traced
fn leaky_traced(self) -> Self::Traced
source§fn traced(self, gradients: Gradients<E, D>) -> Self::Traced
fn traced(self, gradients: Gradients<E, D>) -> Self::Traced
gradients
. Use crate::nn::ZeroGrads::alloc_grads()
to create gradients.source§fn leaky_trace(&self) -> Self::Traced
fn leaky_trace(&self) -> Self::Traced
source§impl<S: Shape, E: Dtype, D: UnaryKernel<ScalarAddKernelOp<E>, E>, T: Tape<E, D>> TryAdd<E> for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<ScalarAddKernelOp<E>, E>, T: Tape<E, D>> TryAdd<E> for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D, LhsTape, R> TryAdd<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where
D: BinaryKernel<BinaryAddKernelOp, E>,
LhsTape: Merge<R> + Tape<E, D>,
impl<S: Shape, E: Dtype, D, LhsTape, R> TryAdd<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where D: BinaryKernel<BinaryAddKernelOp, E>, LhsTape: Merge<R> + Tape<E, D>,
source§impl<A, B: Shape, T, R, E: Dtype, D: ConcatKernel<E>> TryConcat<Tensor<B, E, D, R>> for Tensor<A, E, D, T>where
A: ConcatShape<B> + Shape,
T: Tape<E, D> + Merge<R>,
R: Tape<E, D>,
impl<A, B: Shape, T, R, E: Dtype, D: ConcatKernel<E>> TryConcat<Tensor<B, E, D, R>> for Tensor<A, E, D, T>where A: ConcatShape<B> + Shape, T: Tape<E, D> + Merge<R>, R: Tape<E, D>,
source§impl<S: Shape, E: Dtype, D: UnaryKernel<ScalarDivKernelOp<E>, E>, T: Tape<E, D>> TryDiv<E> for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<ScalarDivKernelOp<E>, E>, T: Tape<E, D>> TryDiv<E> for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D, LhsTape, R> TryDiv<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where
D: BinaryKernel<BinaryDivKernelOp, E>,
LhsTape: Merge<R> + Tape<E, D>,
impl<S: Shape, E: Dtype, D, LhsTape, R> TryDiv<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where D: BinaryKernel<BinaryDivKernelOp, E>, LhsTape: Merge<R> + Tape<E, D>,
source§impl<S: Shape, E, D: CmpKernel<EqKernelOp, E>, T: Tape<E, D>> TryEq<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
impl<S: Shape, E, D: CmpKernel<EqKernelOp, E>, T: Tape<E, D>> TryEq<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<EqKernelOp, E>, T: Tape<E, D>> TryEq<E> for Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<EqKernelOp, E>, T: Tape<E, D>> TryEq<E> for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: CmpKernel<GeKernelOp, E>, T: Tape<E, D>> TryGe<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
impl<S: Shape, E, D: CmpKernel<GeKernelOp, E>, T: Tape<E, D>> TryGe<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<GeKernelOp, E>, T: Tape<E, D>> TryGe<E> for Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<GeKernelOp, E>, T: Tape<E, D>> TryGe<E> for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: CmpKernel<GtKernelOp, E>, T: Tape<E, D>> TryGt<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
impl<S: Shape, E, D: CmpKernel<GtKernelOp, E>, T: Tape<E, D>> TryGt<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<GtKernelOp, E>, T: Tape<E, D>> TryGt<E> for Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<GtKernelOp, E>, T: Tape<E, D>> TryGt<E> for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: CmpKernel<LeKernelOp, E>, T: Tape<E, D>> TryLe<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
impl<S: Shape, E, D: CmpKernel<LeKernelOp, E>, T: Tape<E, D>> TryLe<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<LeKernelOp, E>, T: Tape<E, D>> TryLe<E> for Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<LeKernelOp, E>, T: Tape<E, D>> TryLe<E> for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: CmpKernel<LtKernelOp, E>, T: Tape<E, D>> TryLt<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
impl<S: Shape, E, D: CmpKernel<LtKernelOp, E>, T: Tape<E, D>> TryLt<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<LtKernelOp, E>, T: Tape<E, D>> TryLt<E> for Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<LtKernelOp, E>, T: Tape<E, D>> TryLt<E> for Tensor<S, E, D, T>
source§impl<B: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D, T, R> TryMatMul<Tensor<(B, K, N), E, D, R>> for Tensor<(B, M, K), E, D, T>where
D: MatMatBatch3Kernel<E>,
T: Tape<E, D> + Merge<R>,
R: Tape<E, D>,
impl<B: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D, T, R> TryMatMul<Tensor<(B, K, N), E, D, R>> for Tensor<(B, M, K), E, D, T>where D: MatMatBatch3Kernel<E>, T: Tape<E, D> + Merge<R>, R: Tape<E, D>,
source§fn try_matmul(
self,
rhs: Tensor<(B, K, N), E, D, R>
) -> Result<Self::Output, Self::Err>
fn try_matmul( self, rhs: Tensor<(B, K, N), E, D, R> ) -> Result<Self::Output, Self::Err>
let x: Tensor<Rank3<1, 3, 2>, f32, _> = dev.zeros();
let y: Tensor<Rank3<1, 3, 4>, f32, _> = dev.zeros();
let _: Tensor<Rank3<1, 3, 4>, f32, _> = x.try_matmul(y);
type Output = Tensor<(B, M, N), E, D, T>
fn matmul(self, rhs: Rhs) -> Self::Output
source§impl<B: Dim, S: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D, T, R> TryMatMul<Tensor<(B, S, K, N), E, D, R>> for Tensor<(B, S, M, K), E, D, T>where
D: MatMatBatch4Kernel<E>,
T: Tape<E, D> + Merge<R>,
R: Tape<E, D>,
impl<B: Dim, S: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D, T, R> TryMatMul<Tensor<(B, S, K, N), E, D, R>> for Tensor<(B, S, M, K), E, D, T>where D: MatMatBatch4Kernel<E>, T: Tape<E, D> + Merge<R>, R: Tape<E, D>,
source§fn try_matmul(
self,
rhs: Tensor<(B, S, K, N), E, D, R>
) -> Result<Self::Output, Self::Err>
fn try_matmul( self, rhs: Tensor<(B, S, K, N), E, D, R> ) -> Result<Self::Output, Self::Err>
let x: Tensor<Rank4<1, 5, 3, 2>, f32, _> = dev.zeros();
let y: Tensor<Rank4<1, 5, 3, 4>, f32, _> = dev.zeros();
let _: Tensor<Rank3<1, 5, 3, 4>, f32, _> = x.try_matmul(y);
type Output = Tensor<(B, S, M, N), E, D, T>
fn matmul(self, rhs: Rhs) -> Self::Output
source§impl<M: Dim, K: Dim, E: Dtype, D, T: Tape<E, D> + Merge<R>, R: Tape<E, D>> TryMatMul<Tensor<(K,), E, D, R>> for Tensor<(M, K), E, D, T>where
D: MatMatKernel<E> + ReshapeKernel<E>,
impl<M: Dim, K: Dim, E: Dtype, D, T: Tape<E, D> + Merge<R>, R: Tape<E, D>> TryMatMul<Tensor<(K,), E, D, R>> for Tensor<(M, K), E, D, T>where D: MatMatKernel<E> + ReshapeKernel<E>,
source§impl<B: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D: MatMatBrKernel<E>, T, R> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(B, M, K), E, D, T>where
T: Tape<E, D> + Merge<R>,
R: Tape<E, D>,
impl<B: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D: MatMatBrKernel<E>, T, R> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(B, M, K), E, D, T>where T: Tape<E, D> + Merge<R>, R: Tape<E, D>,
source§fn try_matmul(
self,
rhs: Tensor<(K, N), E, D, R>
) -> Result<Self::Output, Self::Err>
fn try_matmul( self, rhs: Tensor<(K, N), E, D, R> ) -> Result<Self::Output, Self::Err>
let x: Tensor<Rank3<1, 3, 2>, f32, _> = dev.zeros();
let y: Tensor<Rank2<3, 4>, f32, _> = dev.zeros();
let _: Tensor<Rank3<1, 3, 4>, f32, _> = x.try_matmul(y);
type Output = Tensor<(B, M, N), E, D, T>
fn matmul(self, rhs: Rhs) -> Self::Output
source§impl<K: Dim, N: Dim, E: Dtype, D, T: Tape<E, D> + Merge<R>, R: Tape<E, D>> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(K,), E, D, T>where
D: MatMatKernel<E> + ReshapeKernel<E>,
impl<K: Dim, N: Dim, E: Dtype, D, T: Tape<E, D> + Merge<R>, R: Tape<E, D>> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(K,), E, D, T>where D: MatMatKernel<E> + ReshapeKernel<E>,
source§impl<M: Dim, K: Dim, N: Dim, E: Dtype, D: MatMatKernel<E>, T, R> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(M, K), E, D, T>where
T: Tape<E, D> + Merge<R>,
R: Tape<E, D>,
impl<M: Dim, K: Dim, N: Dim, E: Dtype, D: MatMatKernel<E>, T, R> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(M, K), E, D, T>where T: Tape<E, D> + Merge<R>, R: Tape<E, D>,
source§fn try_matmul(
self,
rhs: Tensor<(K, N), E, D, R>
) -> Result<Self::Output, Self::Err>
fn try_matmul( self, rhs: Tensor<(K, N), E, D, R> ) -> Result<Self::Output, Self::Err>
let x: Tensor<Rank2<3, 2>, f32, _> = dev.zeros();
let y: Tensor<Rank2<3, 4>, f32, _> = dev.zeros();
let _: Tensor<Rank2<3, 4>, f32, _> = x.try_matmul(y);
type Output = Tensor<(M, N), E, D, T>
fn matmul(self, rhs: Rhs) -> Self::Output
source§impl<M: Dim, N: Dim, E: Dtype, D, T: Tape<E, D> + Merge<R>, R: Tape<E, D>> TryMatMul<Tensor<(N,), E, D, R>> for Tensor<(M,), E, D, T>where
D: MatMatKernel<E> + ReshapeKernel<E>,
impl<M: Dim, N: Dim, E: Dtype, D, T: Tape<E, D> + Merge<R>, R: Tape<E, D>> TryMatMul<Tensor<(N,), E, D, R>> for Tensor<(M,), E, D, T>where D: MatMatKernel<E> + ReshapeKernel<E>,
source§impl<S: Shape, E: Dtype, D: UnaryKernel<ScalarMulKernelOp<E>, E>, T: Tape<E, D>> TryMul<E> for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<ScalarMulKernelOp<E>, E>, T: Tape<E, D>> TryMul<E> for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: BinaryKernel<BinaryMulKernelOp, E>, LhsTape, R> TryMul<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where
LhsTape: Merge<R> + Tape<E, D>,
impl<S: Shape, E: Dtype, D: BinaryKernel<BinaryMulKernelOp, E>, LhsTape, R> TryMul<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where LhsTape: Merge<R> + Tape<E, D>,
source§impl<S: Shape, E, D: CmpKernel<NeKernelOp, E>, T: Tape<E, D>> TryNe<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
impl<S: Shape, E, D: CmpKernel<NeKernelOp, E>, T: Tape<E, D>> TryNe<&Tensor<S, E, D, T>> for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: ScalarCmpKernel<NeKernelOp, E>, T: Tape<E, D>> TryNe<E> for Tensor<S, E, D, T>
impl<S: Shape, E, D: ScalarCmpKernel<NeKernelOp, E>, T: Tape<E, D>> TryNe<E> for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D, LhsTape, R> TryPReLU<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where
D: Device<E>,
LhsTape: Merge<R> + Tape<E, D>,
impl<S: Shape, E: Dtype, D, LhsTape, R> TryPReLU<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where D: Device<E>, LhsTape: Merge<R> + Tape<E, D>,
source§impl<Chan, Kernel, Stride, Padding, Dilation, Batch, H, W, E, D, T> TryPool2D<Kernel, Stride, Padding, Dilation> for Tensor<(Batch, Chan, H, W), E, D, T>where
Chan: Dim,
Kernel: Dim,
Stride: Dim,
Padding: Dim,
Dilation: Dim,
Batch: Dim,
H: Dim + TryPool2D<Kernel, Stride, Padding, Dilation>,
H::Pooled: Dim,
W: Dim + TryPool2D<Kernel, Stride, Padding, Dilation>,
W::Pooled: Dim,
E: Dtype,
D: Pool2DKernel<E>,
T: Tape<E, D>,
impl<Chan, Kernel, Stride, Padding, Dilation, Batch, H, W, E, D, T> TryPool2D<Kernel, Stride, Padding, Dilation> for Tensor<(Batch, Chan, H, W), E, D, T>where Chan: Dim, Kernel: Dim, Stride: Dim, Padding: Dim, Dilation: Dim, Batch: Dim, H: Dim + TryPool2D<Kernel, Stride, Padding, Dilation>, H::Pooled: Dim, W: Dim + TryPool2D<Kernel, Stride, Padding, Dilation>, W::Pooled: Dim, E: Dtype, D: Pool2DKernel<E>, T: Tape<E, D>,
type Pooled = Tensor<(Batch, Chan, <H as TryPool2D<Kernel, Stride, Padding, Dilation>>::Pooled, <W as TryPool2D<Kernel, Stride, Padding, Dilation>>::Pooled), E, D, T>
type Error = <D as HasErr>::Err
fn try_pool2d( self, kind: Pool2DKind, kernel: Kernel, stride: Stride, padding: Padding, dilation: Dilation ) -> Result<Self::Pooled, Self::Error>
fn pool2d( self, kind: Pool2DKind, kernel: Kernel, stride: Stride, padding: Padding, dilation: Dilation ) -> Self::Pooled
source§impl<Chan, Kernel, Stride, Padding, Dilation, H, W, E, D, T> TryPool2D<Kernel, Stride, Padding, Dilation> for Tensor<(Chan, H, W), E, D, T>where
Chan: Dim,
Kernel: Dim,
Stride: Dim,
Padding: Dim,
Dilation: Dim,
H: Dim + TryPool2D<Kernel, Stride, Padding, Dilation>,
H::Pooled: Dim,
W: Dim + TryPool2D<Kernel, Stride, Padding, Dilation>,
W::Pooled: Dim,
E: Dtype,
D: Pool2DKernel<E> + ReshapeKernel<E>,
T: Tape<E, D>,
impl<Chan, Kernel, Stride, Padding, Dilation, H, W, E, D, T> TryPool2D<Kernel, Stride, Padding, Dilation> for Tensor<(Chan, H, W), E, D, T>where Chan: Dim, Kernel: Dim, Stride: Dim, Padding: Dim, Dilation: Dim, H: Dim + TryPool2D<Kernel, Stride, Padding, Dilation>, H::Pooled: Dim, W: Dim + TryPool2D<Kernel, Stride, Padding, Dilation>, W::Pooled: Dim, E: Dtype, D: Pool2DKernel<E> + ReshapeKernel<E>, T: Tape<E, D>,
type Pooled = Tensor<(Chan, <H as TryPool2D<Kernel, Stride, Padding, Dilation>>::Pooled, <W as TryPool2D<Kernel, Stride, Padding, Dilation>>::Pooled), E, D, T>
type Error = <D as HasErr>::Err
fn try_pool2d( self, kind: Pool2DKind, kernel: Kernel, stride: Stride, padding: Padding, dilation: Dilation ) -> Result<Self::Pooled, Self::Error>
fn pool2d( self, kind: Pool2DKind, kernel: Kernel, stride: Stride, padding: Padding, dilation: Dilation ) -> Self::Pooled
source§impl<S: Shape, E: Dtype, D: UnaryKernel<ScalarSubKernelOp<E>, E>, T: Tape<E, D>> TrySub<E> for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<ScalarSubKernelOp<E>, E>, T: Tape<E, D>> TrySub<E> for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: BinaryKernel<BinarySubKernelOp, E>, LTape, R> TrySub<Tensor<S, E, D, R>> for Tensor<S, E, D, LTape>where
LTape: Merge<R> + Tape<E, D>,
impl<S: Shape, E: Dtype, D: BinaryKernel<BinarySubKernelOp, E>, LTape, R> TrySub<Tensor<S, E, D, R>> for Tensor<S, E, D, LTape>where LTape: Merge<R> + Tape<E, D>,
source§impl<S: Shape, E: Dtype, D: Storage<E>, T> TryUpscale2D for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Storage<E>, T> TryUpscale2D for Tensor<S, E, D, T>
source§fn upscale2d<const OH: usize, const OW: usize, M: UpscaleMethod>(
self,
method: M
) -> <Self as GenericUpscale2D<M>>::Output<Const<OH>, Const<OW>>where
Self: GenericUpscale2D<M>,
fn upscale2d<const OH: usize, const OW: usize, M: UpscaleMethod>( self, method: M ) -> <Self as GenericUpscale2D<M>>::Output<Const<OH>, Const<OW>>where Self: GenericUpscale2D<M>,
source§fn try_upscale2d<const OH: usize, const OW: usize, M: UpscaleMethod>(
self,
method: M
) -> Result<<Self as GenericUpscale2D<M>>::Output<Const<OH>, Const<OW>>, Self::Err>where
Self: GenericUpscale2D<M>,
fn try_upscale2d<const OH: usize, const OW: usize, M: UpscaleMethod>( self, method: M ) -> Result<<Self as GenericUpscale2D<M>>::Output<Const<OH>, Const<OW>>, Self::Err>where Self: GenericUpscale2D<M>,
source§fn upscale2d_like<OH: Dim, OW: Dim, M: UpscaleMethod>(
self,
method: M,
height: OH,
width: OW
) -> <Self as GenericUpscale2D<M>>::Output<OH, OW>where
Self: GenericUpscale2D<M>,
fn upscale2d_like<OH: Dim, OW: Dim, M: UpscaleMethod>( self, method: M, height: OH, width: OW ) -> <Self as GenericUpscale2D<M>>::Output<OH, OW>where Self: GenericUpscale2D<M>,
source§fn try_upscale2d_like<OH: Dim, OW: Dim, M: UpscaleMethod>(
self,
method: M,
height: OH,
width: OW
) -> Result<<Self as GenericUpscale2D<M>>::Output<OH, OW>, Self::Err>where
Self: GenericUpscale2D<M>,
fn try_upscale2d_like<OH: Dim, OW: Dim, M: UpscaleMethod>( self, method: M, height: OH, width: OW ) -> Result<<Self as GenericUpscale2D<M>>::Output<OH, OW>, Self::Err>where Self: GenericUpscale2D<M>,
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> VarTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> VarTo for Tensor<S, E, D, T>
source§impl<S: Shape, E, D: Storage<E>, T: Default> WithEmptyTape for Tensor<S, E, D, T>
impl<S: Shape, E, D: Storage<E>, T: Default> WithEmptyTape for Tensor<S, E, D, T>
source§fn with_empty_tape(&self) -> Self
fn with_empty_tape(&self) -> Self
Auto Trait Implementations§
impl<S, E, D, T> RefUnwindSafe for Tensor<S, E, D, T>where D: RefUnwindSafe, S: RefUnwindSafe, T: RefUnwindSafe, <S as Shape>::Concrete: RefUnwindSafe, <D as Storage<E>>::Vec: RefUnwindSafe,
impl<S, E, D, T> Send for Tensor<S, E, D, T>where D: Send, T: Send,
impl<S, E, D, T> Sync for Tensor<S, E, D, T>where D: Sync, T: Sync,
impl<S, E, D, T> Unpin for Tensor<S, E, D, T>where D: Unpin, S: Unpin, T: Unpin, <S as Shape>::Concrete: Unpin,
impl<S, E, D, T> UnwindSafe for Tensor<S, E, D, T>where D: UnwindSafe, S: UnwindSafe, T: UnwindSafe, <S as Shape>::Concrete: UnwindSafe, <D as Storage<E>>::Vec: RefUnwindSafe,
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
source§impl<D, E, M> BuildModule<D, E> for Mwhere
D: Device<E>,
E: Dtype,
M: TensorCollection<E, D, To<E, D> = M>,
impl<D, E, M> BuildModule<D, E> for Mwhere D: Device<E>, E: Dtype, M: TensorCollection<E, D, To<E, D> = M>,
source§impl<E, D, T> LoadFromNpz<E, D> for Twhere
E: Dtype + NumpyDtype,
D: Device<E>,
T: TensorCollection<E, D>,
impl<E, D, T> LoadFromNpz<E, D> for Twhere E: Dtype + NumpyDtype, D: Device<E>, T: TensorCollection<E, D>,
source§impl<E, D, T> LoadFromSafetensors<E, D> for Twhere
E: Dtype + SafeDtype,
D: Device<E>,
T: TensorCollection<E, D>,
impl<E, D, T> LoadFromSafetensors<E, D> for Twhere E: Dtype + SafeDtype, D: Device<E>, T: TensorCollection<E, D>,
source§impl<E, D, M> NumParams<E, D> for Mwhere
E: Dtype,
D: Device<E>,
M: TensorCollection<E, D>,
impl<E, D, M> NumParams<E, D> for Mwhere E: Dtype, D: Device<E>, M: TensorCollection<E, D>,
source§fn num_trainable_params(&self) -> usize
fn num_trainable_params(&self) -> usize
§impl<T> Pointable for T
impl<T> Pointable for T
source§impl<E, D, M> ResetParams<E, D> for Mwhere
E: Dtype,
D: Device<E>,
M: TensorCollection<E, D>,
impl<E, D, M> ResetParams<E, D> for Mwhere E: Dtype, D: Device<E>, M: TensorCollection<E, D>,
source§fn reset_params(&mut self)
fn reset_params(&mut self)
source§impl<E, D, T> SaveToNpz<E, D> for Twhere
E: Dtype + NumpyDtype,
D: Device<E>,
T: TensorCollection<E, D>,
impl<E, D, T> SaveToNpz<E, D> for Twhere E: Dtype + NumpyDtype, D: Device<E>, T: TensorCollection<E, D>,
source§impl<E, D, T> SaveToSafetensors<E, D> for Twhere
E: Dtype + SafeDtype,
D: Device<E>,
T: TensorCollection<E, D>,
impl<E, D, T> SaveToSafetensors<E, D> for Twhere E: Dtype + SafeDtype, D: Device<E>, T: TensorCollection<E, D>,
source§fn save_safetensors<P: AsRef<Path>>(
&self,
path: P
) -> Result<(), SafeTensorError>
fn save_safetensors<P: AsRef<Path>>( &self, path: P ) -> Result<(), SafeTensorError>
source§impl<E, D1, D2, T> ToDevice<E, D1, D2> for Twhere
E: Dtype,
D1: Device<E>,
D2: Device<E>,
T: TensorCollection<E, D1>,
impl<E, D1, D2, T> ToDevice<E, D1, D2> for Twhere E: Dtype, D1: Device<E>, D2: Device<E>, T: TensorCollection<E, D1>,
source§impl<E1, D, T> ToDtype<E1, D> for Twhere
E1: Dtype,
D: Device<E1>,
T: TensorCollection<E1, D>,
impl<E1, D, T> ToDtype<E1, D> for Twhere E1: Dtype, D: Device<E1>, T: TensorCollection<E1, D>,
source§impl<E, D, M> ZeroGrads<E, D> for Mwhere
E: Dtype,
D: Device<E>,
M: TensorCollection<E, D>,
impl<E, D, M> ZeroGrads<E, D> for Mwhere E: Dtype, D: Device<E>, M: TensorCollection<E, D>,
source§fn alloc_grads(&self) -> Gradients<E, D>
fn alloc_grads(&self) -> Gradients<E, D>
source§fn try_alloc_grads(&self) -> Result<Gradients<E, D>, D::Err>
fn try_alloc_grads(&self) -> Result<Gradients<E, D>, D::Err>
source§fn zero_grads(&self, gradients: &mut Gradients<E, D>)
fn zero_grads(&self, gradients: &mut Gradients<E, D>)
self
.