pub struct Tensor<S: Shape, E: Unit, D: DeviceStorage, T = NoneTape> { /* private fields */ }
Expand description
The single tensor struct that stores nd arrays and tapes.
See module level documentation on how to create and use tensors.
Generics:
- Shape - the shape of the underlying nd array
- Dtype - the type of the datas stored in the array
- DeviceStorage - the device the array is stored on
- Tape - the tape the tensor has
Examples:
// A 1d tensor with 1000 f32 elements, stored on the Cpu
type A = Tensor<Rank1<1000>, f32, Cpu>;
// A 2d tensor with bool elements, stored on the Cpu
type B = Tensor<Rank2<2, 3>, bool, Cpu>;
// A 3d tensor with usize elements, stored on the Cpu, without any tape
type C = Tensor<Rank3<4, 2, 3>, usize, Cpu, NoneTape>;
Implementations§
source§impl<S: Shape, E: Dtype + NumpyDtype, D: DeviceStorage + CopySlice<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype + NumpyDtype, D: DeviceStorage + CopySlice<E>, T> Tensor<S, E, D, T>
sourcepub fn write_to_npz<W: Write + Seek>(
&self,
w: &mut ZipWriter<W>,
filename: String
) -> ZipResult<()>
pub fn write_to_npz<W: Write + Seek>( &self, w: &mut ZipWriter<W>, filename: String ) -> ZipResult<()>
Writes data
to a new file in a zip archive named filename
.
sourcepub fn read_from_npz<R: Read + Seek>(
&mut self,
r: &mut ZipArchive<R>,
filename: String
) -> Result<(), NpzError>
pub fn read_from_npz<R: Read + Seek>( &mut self, r: &mut ZipArchive<R>, filename: String ) -> Result<(), NpzError>
Reads data
from a file already in a zip archive named filename
.
source§impl<S: Shape, E: Dtype + SafeDtype, D: CopySlice<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype + SafeDtype, D: CopySlice<E>, T> Tensor<S, E, D, T>
sourcepub fn load_safetensor(
&mut self,
tensors: &SafeTensors<'_>,
key: &str
) -> Result<(), Error>
pub fn load_safetensor( &mut self, tensors: &SafeTensors<'_>, key: &str ) -> Result<(), Error>
Loads data from the SafeTensors storage with the given key
source§impl<S: Shape, E: Unit, D: CopySlice<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Unit, D: CopySlice<E>, T> Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: ZeroFillStorage<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: ZeroFillStorage<E>, T> Tensor<S, E, D, T>
sourcepub fn fill_with_zeros(&mut self)
pub fn fill_with_zeros(&mut self)
Fills the tensor with zeros
sourcepub fn try_fill_with_zeros(&mut self) -> Result<(), D::Err>
pub fn try_fill_with_zeros(&mut self) -> Result<(), D::Err>
Fallible version of Tensor::fill_with_zeros
source§impl<S: Shape, E: Dtype, D: OneFillStorage<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: OneFillStorage<E>, T> Tensor<S, E, D, T>
sourcepub fn fill_with_ones(&mut self)
pub fn fill_with_ones(&mut self)
Fills the tensor with ones
sourcepub fn try_fill_with_ones(&mut self) -> Result<(), D::Err>
pub fn try_fill_with_ones(&mut self) -> Result<(), D::Err>
Fallible version of Tensor::fill_with_ones
source§impl<S: Shape, E: Unit, D: SampleTensor<E>, T> Tensor<S, E, D, T>
impl<S: Shape, E: Unit, D: SampleTensor<E>, T> Tensor<S, E, D, T>
sourcepub fn fill_with_distr<Distr: Distribution<E>>(&mut self, distr: Distr)
pub fn fill_with_distr<Distr: Distribution<E>>(&mut self, distr: Distr)
Fills the tensor with random data from the distribution
sourcepub fn try_fill_with_distr<Distr: Distribution<E>>(
&mut self,
distr: Distr
) -> Result<(), D::Err>
pub fn try_fill_with_distr<Distr: Distribution<E>>( &mut self, distr: Distr ) -> Result<(), D::Err>
Fallible version of Tensor::fill_with_distr
source§impl<S: Shape, E: Unit, D: AxpyKernel<E>> Tensor<S, E, D>
impl<S: Shape, E: Unit, D: AxpyKernel<E>> Tensor<S, E, D>
source§impl<S: Shape, E: Dtype, D: BinaryKernel<BCEKernelOp, E>, LTape: Tape<E, D>> Tensor<S, E, D, LTape>
impl<S: Shape, E: Dtype, D: BinaryKernel<BCEKernelOp, E>, LTape: Tape<E, D>> Tensor<S, E, D, LTape>
sourcepub fn bce_with_logits<RTape: Tape<E, D>>(
self,
prob: Tensor<S, E, D, RTape>
) -> Selfwhere
LTape: Merge<RTape>,
pub fn bce_with_logits<RTape: Tape<E, D>>( self, prob: Tensor<S, E, D, RTape> ) -> Selfwhere LTape: Merge<RTape>,
See bce_with_logits
sourcepub fn try_bce_with_logits<RTape>(
self,
prob: Tensor<S, E, D, RTape>
) -> Result<Self, D::Err>where
RTape: Tape<E, D>,
LTape: Merge<RTape>,
pub fn try_bce_with_logits<RTape>( self, prob: Tensor<S, E, D, RTape> ) -> Result<Self, D::Err>where RTape: Tape<E, D>, LTape: Merge<RTape>,
See bce_with_logits
source§impl<S: Shape, E: Dtype, D: UnaryKernel<ClampKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<ClampKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T>
sourcepub fn huber_error<R: Tape<E, D>>(
self,
rhs: Tensor<S, E, D, R>,
delta: E
) -> Selfwhere
T: Merge<R>,
pub fn huber_error<R: Tape<E, D>>( self, rhs: Tensor<S, E, D, R>, delta: E ) -> Selfwhere T: Merge<R>,
See huber_error
sourcepub fn try_huber_error<R: Tape<E, D>>(
self,
rhs: Tensor<S, E, D, R>,
delta: E
) -> Result<Self, D::Err>where
T: Merge<R>,
pub fn try_huber_error<R: Tape<E, D>>( self, rhs: Tensor<S, E, D, R>, delta: E ) -> Result<Self, D::Err>where T: Merge<R>,
See huber_error
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T>
sourcepub fn log_softmax<Ax: Axes>(self) -> Selfwhere
S: ReduceShape<Ax>,
pub fn log_softmax<Ax: Axes>(self) -> Selfwhere S: ReduceShape<Ax>,
See log_softmax
sourcepub fn try_log_softmax<Ax: Axes>(self) -> Result<Self, D::Err>where
S: ReduceShape<Ax>,
pub fn try_log_softmax<Ax: Axes>(self) -> Result<Self, D::Err>where S: ReduceShape<Ax>,
See log_softmax
source§impl<S: Shape, E: Dtype, D: UnaryKernel<NansToKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<NansToKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: UnaryKernel<NegateKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<NegateKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: UnaryKernel<PowfKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<PowfKernelOp<E>, E>, T: Tape<E, D>> Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: UnaryKernel<SigmoidKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<SigmoidKernelOp, E>, T: Tape<E, D>> Tensor<S, E, D, T>
Trait Implementations§
source§impl<S: Shape, E: Dtype, D: DeviceStorage, LhsTape: Tape<E, D>, Rhs> Add<Rhs> for Tensor<S, E, D, LhsTape>where
Self: TryAdd<Rhs>,
impl<S: Shape, E: Dtype, D: DeviceStorage, LhsTape: Tape<E, D>, Rhs> Add<Rhs> for Tensor<S, E, D, LhsTape>where Self: TryAdd<Rhs>,
source§impl<S: Shape, D: BooleanKernel> BitAnd<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
impl<S: Shape, D: BooleanKernel> BitAnd<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
source§impl<S: Shape, D: BooleanKernel> BitOr<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
impl<S: Shape, D: BooleanKernel> BitOr<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
source§impl<S: Shape, D: BooleanKernel> BitXor<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
impl<S: Shape, D: BooleanKernel> BitXor<&Tensor<S, bool, D, NoneTape>> for &Tensor<S, bool, D>
source§impl<S: Shape, E: Unit, D: DeviceStorage, T: Tape<E, D>> BroadcastTo for Tensor<S, E, D, T>
impl<S: Shape, E: Unit, D: DeviceStorage, T: Tape<E, D>> BroadcastTo for Tensor<S, E, D, T>
source§fn try_broadcast_like<Dst: Shape, Ax: Axes>(
self,
dst: &Dst
) -> Result<Self::WithShape<Dst>, Self::Err>where
Self::Shape: BroadcastShapeTo<Dst, Ax>,
fn try_broadcast_like<Dst: Shape, Ax: Axes>( self, dst: &Dst ) -> Result<Self::WithShape<Dst>, Self::Err>where Self::Shape: BroadcastShapeTo<Dst, Ax>,
source§fn broadcast<Dst: ConstShape, Ax: Axes>(self) -> Self::WithShape<Dst>where
Self::Shape: BroadcastShapeTo<Dst, Ax>,
fn broadcast<Dst: ConstShape, Ax: Axes>(self) -> Self::WithShape<Dst>where Self::Shape: BroadcastShapeTo<Dst, Ax>,
source§fn try_broadcast<Dst: ConstShape, Ax: Axes>(
self
) -> Result<Self::WithShape<Dst>, Self::Err>where
Self::Shape: BroadcastShapeTo<Dst, Ax>,
fn try_broadcast<Dst: ConstShape, Ax: Axes>( self ) -> Result<Self::WithShape<Dst>, Self::Err>where Self::Shape: BroadcastShapeTo<Dst, Ax>,
source§fn broadcast_like<Dst: Shape, Ax: Axes>(self, dst: &Dst) -> Self::WithShape<Dst>where
Self::Shape: BroadcastShapeTo<Dst, Ax>,
fn broadcast_like<Dst: Shape, Ax: Axes>(self, dst: &Dst) -> Self::WithShape<Dst>where Self::Shape: BroadcastShapeTo<Dst, Ax>,
source§impl<S: Shape, E: Dtype, D: ChooseKernel<E>, LhsTape: Tape<E, D> + Merge<RhsTape>, RhsTape: Tape<E, D>> ChooseFrom<Tensor<S, E, D, LhsTape>, Tensor<S, E, D, RhsTape>> for Tensor<S, bool, D>
impl<S: Shape, E: Dtype, D: ChooseKernel<E>, LhsTape: Tape<E, D> + Merge<RhsTape>, RhsTape: Tape<E, D>> ChooseFrom<Tensor<S, E, D, LhsTape>, Tensor<S, E, D, RhsTape>> for Tensor<S, bool, D>
source§impl<S: Clone + Shape, E: Clone + Unit, D: Clone + DeviceStorage, T: Clone> Clone for Tensor<S, E, D, T>where
D::Vec<E>: Clone,
S::Concrete: Clone,
impl<S: Clone + Shape, E: Clone + Unit, D: Clone + DeviceStorage, T: Clone> Clone for Tensor<S, E, D, T>where D::Vec<E>: Clone, S::Concrete: Clone,
source§impl<S: Debug + Shape, E: Debug + Unit, D: Debug + DeviceStorage, T: Debug> Debug for Tensor<S, E, D, T>where
D::Vec<E>: Debug,
S::Concrete: Debug,
impl<S: Debug + Shape, E: Debug + Unit, D: Debug + DeviceStorage, T: Debug> Debug for Tensor<S, E, D, T>where D::Vec<E>: Debug, S::Concrete: Debug,
source§impl<S: Shape, E: Dtype, D: Device<E>, LhsTape: Tape<E, D>, Rhs> Div<Rhs> for Tensor<S, E, D, LhsTape>where
Self: TryDiv<Rhs>,
impl<S: Shape, E: Dtype, D: Device<E>, LhsTape: Tape<E, D>, Rhs> Div<Rhs> for Tensor<S, E, D, LhsTape>where Self: TryDiv<Rhs>,
source§impl<Src: Shape, E: Dtype, D: ReplaceDimKernel<E>, T: Tape<E, D>> GatherTo<D> for Tensor<Src, E, D, T>
impl<Src: Shape, E: Dtype, D: ReplaceDimKernel<E>, T: Tape<E, D>> GatherTo<D> for Tensor<Src, E, D, T>
source§impl<S: Shape, E: Unit, D: DeviceStorage, T> HasUnitType for Tensor<S, E, D, T>
impl<S: Shape, E: Unit, D: DeviceStorage, T> HasUnitType for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> LogSumExpTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> LogSumExpTo for Tensor<S, E, D, T>
source§fn try_logsumexp<Dst: Shape, Ax: Axes>(
self
) -> Result<Self::WithShape<Dst>, Self::Err>where
Self::Shape: ReduceShapeTo<Dst, Ax>,
fn try_logsumexp<Dst: Shape, Ax: Axes>( self ) -> Result<Self::WithShape<Dst>, Self::Err>where Self::Shape: ReduceShapeTo<Dst, Ax>,
source§impl<S: Shape, E: Dtype, D: MaxReduceKernel<E>, T: Tape<E, D>> MaxTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: MaxReduceKernel<E>, T: Tape<E, D>> MaxTo for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> MeanTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> MeanTo for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D: MinReduceKernel<E>, T: Tape<E, D>> MinTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: MinReduceKernel<E>, T: Tape<E, D>> MinTo for Tensor<S, E, D, T>
source§impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for AvgPoolGlobal
impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for AvgPoolGlobal
source§impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for MaxPoolGlobal
impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for MaxPoolGlobal
source§impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for MinPoolGlobal
impl<B: Dim, C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, C, H, W), E, D, T>> for MinPoolGlobal
source§impl<B: Dim, const C: usize, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>), E, D, NoneTape>> for BatchNorm1D<C, E, D>
impl<B: Dim, const C: usize, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>), E, D, NoneTape>> for BatchNorm1D<C, E, D>
source§fn try_forward(
&self,
x: Tensor<(B, Const<C>), E, D, NoneTape>
) -> Result<Self::Output, D::Err>
fn try_forward( &self, x: Tensor<(B, Const<C>), E, D, NoneTape> ) -> Result<Self::Output, D::Err>
Inference 1d forward - does not update Self::running_mean and Self::running_var
type Error = <D as HasErr>::Err
source§impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>, H, W), E, D, NoneTape>> for BatchNorm2D<C, E, D>
impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>, H, W), E, D, NoneTape>> for BatchNorm2D<C, E, D>
source§fn try_forward(
&self,
x: Tensor<(B, Const<C>, H, W), E, D, NoneTape>
) -> Result<Self::Output, D::Err>
fn try_forward( &self, x: Tensor<(B, Const<C>, H, W), E, D, NoneTape> ) -> Result<Self::Output, D::Err>
Inference 4d forward - does not update Self::running_mean and Self::running_var
§type Output = Tensor<(B, Const<C>, H, W), E, D, NoneTape>
type Output = Tensor<(B, Const<C>, H, W), E, D, NoneTape>
Input
.type Error = <D as HasErr>::Err
source§impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, Const<C>, H, W), E, D, T>> for Bias2D<C, E, D>
impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, Const<C>, H, W), E, D, T>> for Bias2D<C, E, D>
source§impl<B: Dim, const C: usize, L: Dim, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>, L), E, D, NoneTape>> for BatchNorm1D<C, E, D>
impl<B: Dim, const C: usize, L: Dim, E: Dtype, D: Device<E>> Module<Tensor<(B, Const<C>, L), E, D, NoneTape>> for BatchNorm1D<C, E, D>
source§fn try_forward(
&self,
x: Tensor<(B, Const<C>, L), E, D, NoneTape>
) -> Result<Self::Output, D::Err>
fn try_forward( &self, x: Tensor<(B, Const<C>, L), E, D, NoneTape> ) -> Result<Self::Output, D::Err>
Inference 2d forward - does not update Self::running_mean and Self::running_var
§type Output = Tensor<(B, Const<C>, L), E, D, NoneTape>
type Output = Tensor<(B, Const<C>, L), E, D, NoneTape>
Input
.type Error = <D as HasErr>::Err
source§impl<B: Dim, const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, Const<M>), E, D, T>> for LayerNorm1D<M, E, D>
impl<B: Dim, const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, Const<M>), E, D, T>> for LayerNorm1D<M, E, D>
source§impl<B: Dim, S: Dim, const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, S, Const<M>), E, D, T>> for LayerNorm1D<M, E, D>
impl<B: Dim, S: Dim, const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(B, S, Const<M>), E, D, T>> for LayerNorm1D<M, E, D>
source§impl<const VOCAB: usize, const DIM: usize, BATCH: Dim, SEQ: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(BATCH, SEQ), usize, D, T>> for Embedding<VOCAB, DIM, E, D>
impl<const VOCAB: usize, const DIM: usize, BATCH: Dim, SEQ: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(BATCH, SEQ), usize, D, T>> for Embedding<VOCAB, DIM, E, D>
source§impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for AvgPoolGlobal
impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for AvgPoolGlobal
source§impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for MaxPoolGlobal
impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for MaxPoolGlobal
source§impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for MinPoolGlobal
impl<C: Dim, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(C, H, W), E, D, T>> for MinPoolGlobal
source§impl<const B: usize, const C: usize, const H: usize, const W: usize, D, E: Dtype, T> Module<Tensor<(Const<B>, Const<C>, Const<H>, Const<W>), E, D, T>> for Flatten2Dwhere
D: Device<E>,
T: Tape<E, D>,
Rank2<B, { _ }>: Sized,
impl<const B: usize, const C: usize, const H: usize, const W: usize, D, E: Dtype, T> Module<Tensor<(Const<B>, Const<C>, Const<H>, Const<W>), E, D, T>> for Flatten2Dwhere D: Device<E>, T: Tape<E, D>, Rank2<B, { _ }>: Sized,
source§impl<const C: usize, const H: usize, const W: usize, D: Device<E>, E: Dtype, T: Tape<E, D>> Module<Tensor<(Const<C>, Const<H>, Const<W>), E, D, T>> for Flatten2Dwhere
Rank1<{ _ }>: Sized,
impl<const C: usize, const H: usize, const W: usize, D: Device<E>, E: Dtype, T: Tape<E, D>> Module<Tensor<(Const<C>, Const<H>, Const<W>), E, D, T>> for Flatten2Dwhere Rank1<{ _ }>: Sized,
source§impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> Module<Tensor<(Const<C>, H, W), E, D, NoneTape>> for BatchNorm2D<C, E, D>
impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> Module<Tensor<(Const<C>, H, W), E, D, NoneTape>> for BatchNorm2D<C, E, D>
source§fn try_forward(
&self,
x: Tensor<(Const<C>, H, W), E, D, NoneTape>
) -> Result<Self::Output, D::Err>
fn try_forward( &self, x: Tensor<(Const<C>, H, W), E, D, NoneTape> ) -> Result<Self::Output, D::Err>
Inference 3d forward - does not update Self::running_mean and Self::running_var
§type Output = Tensor<(Const<C>, H, W), E, D, NoneTape>
type Output = Tensor<(Const<C>, H, W), E, D, NoneTape>
Input
.type Error = <D as HasErr>::Err
source§impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(Const<C>, H, W), E, D, T>> for Bias2D<C, E, D>
impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(Const<C>, H, W), E, D, T>> for Bias2D<C, E, D>
source§impl<const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(Const<M>,), E, D, T>> for LayerNorm1D<M, E, D>
impl<const M: usize, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(Const<M>,), E, D, T>> for LayerNorm1D<M, E, D>
source§impl<const V: usize, const M: usize, SEQ: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(SEQ,), usize, D, T>> for Embedding<V, M, E, D>
impl<const V: usize, const M: usize, SEQ: Dim, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<(SEQ,), usize, D, T>> for Embedding<V, M, E, D>
source§impl<S: Shape, E: Dtype, D: Device<E>> Module<Tensor<S, E, D, NoneTape>> for Dropout
impl<S: Shape, E: Dtype, D: Device<E>> Module<Tensor<S, E, D, NoneTape>> for Dropout
source§impl<const N: usize, S: Shape, E: Dtype, D: Device<E>> Module<Tensor<S, E, D, NoneTape>> for DropoutOneIn<N>
impl<const N: usize, S: Shape, E: Dtype, D: Device<E>> Module<Tensor<S, E, D, NoneTape>> for DropoutOneIn<N>
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Abs
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Abs
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Cos
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Cos
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Exp
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Exp
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for GeLU
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for GeLU
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Ln
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Ln
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for ReLU
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for ReLU
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sigmoid
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sigmoid
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sin
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sin
source§impl<Ax: Axes, S, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Softmaxwhere
S: Shape<LastAxis = Ax> + ReduceShape<Ax>,
impl<Ax: Axes, S, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Softmaxwhere S: Shape<LastAxis = Ax> + ReduceShape<Ax>,
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sqrt
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Sqrt
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Square
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Square
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Tanh
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Module<Tensor<S, E, D, T>> for Tanh
source§impl<B: Dim, const C: usize, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>), E, D, OwnedTape<E, D>>> for BatchNorm1D<C, E, D>
impl<B: Dim, const C: usize, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>), E, D, OwnedTape<E, D>>> for BatchNorm1D<C, E, D>
source§fn try_forward_mut(
&mut self,
x: Tensor<(B, Const<C>), E, D, OwnedTape<E, D>>
) -> Result<Self::Output, D::Err>
fn try_forward_mut( &mut self, x: Tensor<(B, Const<C>), E, D, OwnedTape<E, D>> ) -> Result<Self::Output, D::Err>
Training 2d forward - updates Self::running_mean and Self::running_var
§type Output = Tensor<(B, Const<C>), E, D, OwnedTape<E, D>>
type Output = Tensor<(B, Const<C>), E, D, OwnedTape<E, D>>
Input
.type Error = <D as HasErr>::Err
source§fn forward_mut(&mut self, input: Input) -> Self::Output
fn forward_mut(&mut self, input: Input) -> Self::Output
source§impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>>> for BatchNorm2D<C, E, D>
impl<B: Dim, const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>>> for BatchNorm2D<C, E, D>
source§fn try_forward_mut(
&mut self,
x: Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>>
) -> Result<Self::Output, D::Err>
fn try_forward_mut( &mut self, x: Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>> ) -> Result<Self::Output, D::Err>
Training 4d forward - updates Self::running_mean and Self::running_var
§type Output = Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>>
type Output = Tensor<(B, Const<C>, H, W), E, D, OwnedTape<E, D>>
Input
.type Error = <D as HasErr>::Err
source§fn forward_mut(&mut self, input: Input) -> Self::Output
fn forward_mut(&mut self, input: Input) -> Self::Output
source§impl<B: Dim, const C: usize, L: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>>> for BatchNorm1D<C, E, D>
impl<B: Dim, const C: usize, L: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>>> for BatchNorm1D<C, E, D>
source§fn try_forward_mut(
&mut self,
x: Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>>
) -> Result<Self::Output, D::Err>
fn try_forward_mut( &mut self, x: Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>> ) -> Result<Self::Output, D::Err>
Training 1d forward - updates Self::running_mean and Self::running_var
§type Output = Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>>
type Output = Tensor<(B, Const<C>, L), E, D, OwnedTape<E, D>>
Input
.type Error = <D as HasErr>::Err
source§fn forward_mut(&mut self, input: Input) -> Self::Output
fn forward_mut(&mut self, input: Input) -> Self::Output
source§impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>>> for BatchNorm2D<C, E, D>
impl<const C: usize, H: Dim, W: Dim, E: Dtype, D: Device<E>> ModuleMut<Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>>> for BatchNorm2D<C, E, D>
source§fn try_forward_mut(
&mut self,
x: Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>>
) -> Result<Self::Output, D::Err>
fn try_forward_mut( &mut self, x: Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>> ) -> Result<Self::Output, D::Err>
Training 3d forward - updates Self::running_mean and Self::running_var
§type Output = Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>>
type Output = Tensor<(Const<C>, H, W), E, D, OwnedTape<E, D>>
Input
.type Error = <D as HasErr>::Err
source§fn forward_mut(&mut self, input: Input) -> Self::Output
fn forward_mut(&mut self, input: Input) -> Self::Output
source§impl<S: Shape, E: Dtype, D: Device<E>> ModuleMut<Tensor<S, E, D, OwnedTape<E, D>>> for Dropout
impl<S: Shape, E: Dtype, D: Device<E>> ModuleMut<Tensor<S, E, D, OwnedTape<E, D>>> for Dropout
source§impl<const N: usize, S: Shape, E: Dtype, D: Device<E>> ModuleMut<Tensor<S, E, D, OwnedTape<E, D>>> for DropoutOneIn<N>
impl<const N: usize, S: Shape, E: Dtype, D: Device<E>> ModuleMut<Tensor<S, E, D, OwnedTape<E, D>>> for DropoutOneIn<N>
source§impl<S: Shape, E: Dtype, D: Device<E>, LhsTape: Tape<E, D>, Rhs> Mul<Rhs> for Tensor<S, E, D, LhsTape>where
Self: TryMul<Rhs>,
impl<S: Shape, E: Dtype, D: Device<E>, LhsTape: Tape<E, D>, Rhs> Mul<Rhs> for Tensor<S, E, D, LhsTape>where Self: TryMul<Rhs>,
source§impl<S: Shape, E: Dtype, D: UnaryKernel<NegateKernelOp, E>, T: Tape<E, D>> Neg for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<NegateKernelOp, E>, T: Tape<E, D>> Neg for Tensor<S, E, D, T>
source§impl<S: Shape, E: Unit, D: DeviceStorage, T: Tape<E, D>> PermuteTo for Tensor<S, E, D, T>
impl<S: Shape, E: Unit, D: DeviceStorage, T: Tape<E, D>> PermuteTo for Tensor<S, E, D, T>
source§fn try_permute<Dst: Shape, Ax: Axes>(
self
) -> Result<Self::WithShape<Dst>, Self::Err>where
Self::Shape: PermuteShapeTo<Dst, Ax>,
fn try_permute<Dst: Shape, Ax: Axes>( self ) -> Result<Self::WithShape<Dst>, Self::Err>where Self::Shape: PermuteShapeTo<Dst, Ax>,
source§impl<S: Shape, E: Dtype, D: ReshapeKernel<E>, T: Tape<E, D>> ReshapeTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: ReshapeKernel<E>, T: Tape<E, D>> ReshapeTo for Tensor<S, E, D, T>
source§impl<Src: Shape, E: Dtype, D: RemoveDimKernel<E>, T: Tape<E, D>> SelectTo<D> for Tensor<Src, E, D, T>
impl<Src: Shape, E: Dtype, D: RemoveDimKernel<E>, T: Tape<E, D>> SelectTo<D> for Tensor<Src, E, D, T>
source§impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> StddevTo<E> for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> StddevTo<E> for Tensor<S, E, D, T>
source§fn try_stddev<Dst: Shape, Ax: Axes>(
self,
epsilon: E
) -> Result<Self::WithShape<Dst>, Self::Err>where
Self::Shape: HasAxes<Ax> + ReduceShapeTo<Dst, Ax>,
fn try_stddev<Dst: Shape, Ax: Axes>( self, epsilon: E ) -> Result<Self::WithShape<Dst>, Self::Err>where Self::Shape: HasAxes<Ax> + ReduceShapeTo<Dst, Ax>,
source§impl<S: Shape, E: Dtype, D: Device<E>, LTape: Tape<E, D>, Rhs> Sub<Rhs> for Tensor<S, E, D, LTape>where
Self: TrySub<Rhs>,
impl<S: Shape, E: Dtype, D: Device<E>, LTape: Tape<E, D>, Rhs> Sub<Rhs> for Tensor<S, E, D, LTape>where Self: TrySub<Rhs>,
source§impl<S: Shape, E: Dtype, D: SumKernel<E>, T: Tape<E, D>> SumTo for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: SumKernel<E>, T: Tape<E, D>> SumTo for Tensor<S, E, D, T>
source§impl<S: ConstShape, E: Dtype, D: Device<E>> TensorCollection<E, D> for Tensor<S, E, D>
impl<S: ConstShape, E: Dtype, D: Device<E>> TensorCollection<E, D> for Tensor<S, E, D>
§type To<E2: Dtype, D2: Device<E2>> = Tensor<S, E2, D2, NoneTape>
type To<E2: Dtype, D2: Device<E2>> = Tensor<S, E2, D2, NoneTape>
source§fn iter_tensors<V: ModuleVisitor<Self, E, D>>(
visitor: &mut V
) -> Result<Option<Self::To<V::E2, V::D2>>, V::Err>
fn iter_tensors<V: ModuleVisitor<Self, E, D>>( visitor: &mut V ) -> Result<Option<Self::To<V::E2, V::D2>>, V::Err>
Err(_)
to indicate an error,
Ok(None)
to indicate that there is no error and a module has not been built, and
Ok(Some(_))
contains Self::Output<E2, D2>
source§fn module<F1, F2, Field>(
name: &str,
get_ref: F1,
get_mut: F2
) -> ModuleField<'_, F1, F2, Self, Field>where
F1: FnMut(&Self) -> &Field,
F2: FnMut(&mut Self) -> &mut Field,
Field: TensorCollection<E, D>,
fn module<F1, F2, Field>( name: &str, get_ref: F1, get_mut: F2 ) -> ModuleField<'_, F1, F2, Self, Field>where F1: FnMut(&Self) -> &Field, F2: FnMut(&mut Self) -> &mut Field, Field: TensorCollection<E, D>,
source§fn tensor<F1, F2, S>(
name: &str,
get_ref: F1,
get_mut: F2,
options: TensorOptions<S, E, D>
) -> TensorField<'_, F1, F2, Self, S, E, D>where
F1: FnMut(&Self) -> &Tensor<S, E, D>,
F2: FnMut(&mut Self) -> &mut Tensor<S, E, D>,
S: Shape,
fn tensor<F1, F2, S>( name: &str, get_ref: F1, get_mut: F2, options: TensorOptions<S, E, D> ) -> TensorField<'_, F1, F2, Self, S, E, D>where F1: FnMut(&Self) -> &Tensor<S, E, D>, F2: FnMut(&mut Self) -> &mut Tensor<S, E, D>, S: Shape,
source§impl<S: Shape, E: Unit, F: Unit, D: DeviceStorage> Trace<E, D> for Tensor<S, F, D, NoneTape>
impl<S: Shape, E: Unit, F: Unit, D: DeviceStorage> Trace<E, D> for Tensor<S, F, D, NoneTape>
type Traced = Tensor<S, F, D, OwnedTape<E, D>>
source§fn leaky_traced(self) -> Self::Traced
fn leaky_traced(self) -> Self::Traced
source§fn traced(self, gradients: Gradients<E, D>) -> Self::Traced
fn traced(self, gradients: Gradients<E, D>) -> Self::Traced
gradients
. Use crate::nn::ZeroGrads::alloc_grads()
to create gradients.source§fn leaky_trace(&self) -> Self::Traced
fn leaky_trace(&self) -> Self::Traced
source§impl<S: Shape, E: Dtype, D: UnaryKernel<ScalarAddKernelOp<E>, E>, T: Tape<E, D>> TryAdd<E> for Tensor<S, E, D, T>
impl<S: Shape, E: Dtype, D: UnaryKernel<ScalarAddKernelOp<E>, E>, T: Tape<E, D>> TryAdd<E> for Tensor<S, E, D, T>
source§impl<S: Shape, E: Dtype, D, LhsTape, R> TryAdd<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where
D: BinaryKernel<BinaryAddKernelOp, E>,
LhsTape: Merge<R> + Tape<E, D>,
impl<S: Shape, E: Dtype, D, LhsTape, R> TryAdd<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where D: BinaryKernel<BinaryAddKernelOp, E>, LhsTape: Merge<R> + Tape<E, D>,
source§impl<A, B: Shape, T, R, E: Dtype, D: ConcatKernel<E>> TryConcat<Tensor<B, E, D, R>> for Tensor<A, E, D, T>where
A: ConcatShape<B> + Shape,
T: Tape<E, D> + Merge<R>,
R: Tape<E, D>,
impl<A, B: Shape, T, R, E: Dtype, D: ConcatKernel<E>> TryConcat<Tensor<B, E, D, R>> for Tensor<A, E, D, T>where A: ConcatShape<B> + Shape, T: Tape<E, D> + Merge<R>, R: Tape<E, D>,
source§impl<S: Shape, E: Dtype, D: Device<E>, LhsTape, R> TryDiv<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where
LhsTape: Merge<R> + Tape<E, D>,
impl<S: Shape, E: Dtype, D: Device<E>, LhsTape, R> TryDiv<Tensor<S, E, D, R>> for Tensor<S, E, D, LhsTape>where LhsTape: Merge<R> + Tape<E, D>,
source§impl<B: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D, T, R> TryMatMul<Tensor<(B, K, N), E, D, R>> for Tensor<(B, M, K), E, D, T>where
D: MatMatBatch3Kernel<E>,
T: Tape<E, D> + Merge<R>,
R: Tape<E, D>,
impl<B: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D, T, R> TryMatMul<Tensor<(B, K, N), E, D, R>> for Tensor<(B, M, K), E, D, T>where D: MatMatBatch3Kernel<E>, T: Tape<E, D> + Merge<R>, R: Tape<E, D>,
source§fn try_matmul(
self,
rhs: Tensor<(B, K, N), E, D, R>
) -> Result<Self::Output, Self::Err>
fn try_matmul( self, rhs: Tensor<(B, K, N), E, D, R> ) -> Result<Self::Output, Self::Err>
let x: Tensor<Rank3<1, 3, 2>, f32, _> = dev.zeros();
let y: Tensor<Rank3<1, 3, 4>, f32, _> = dev.zeros();
let _: Tensor<Rank3<1, 3, 4>, f32, _> = x.try_matmul(y);
type Output = Tensor<(B, M, N), E, D, T>
fn matmul(self, rhs: Rhs) -> Self::Output
source§impl<B: Dim, S: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D, T, R> TryMatMul<Tensor<(B, S, K, N), E, D, R>> for Tensor<(B, S, M, K), E, D, T>where
D: MatMatBatch4Kernel<E>,
T: Tape<E, D> + Merge<R>,
R: Tape<E, D>,
impl<B: Dim, S: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D, T, R> TryMatMul<Tensor<(B, S, K, N), E, D, R>> for Tensor<(B, S, M, K), E, D, T>where D: MatMatBatch4Kernel<E>, T: Tape<E, D> + Merge<R>, R: Tape<E, D>,
source§fn try_matmul(
self,
rhs: Tensor<(B, S, K, N), E, D, R>
) -> Result<Self::Output, Self::Err>
fn try_matmul( self, rhs: Tensor<(B, S, K, N), E, D, R> ) -> Result<Self::Output, Self::Err>
let x: Tensor<Rank4<1, 5, 3, 2>, f32, _> = dev.zeros();
let y: Tensor<Rank4<1, 5, 3, 4>, f32, _> = dev.zeros();
let _: Tensor<Rank3<1, 5, 3, 4>, f32, _> = x.try_matmul(y);
type Output = Tensor<(B, S, M, N), E, D, T>
fn matmul(self, rhs: Rhs) -> Self::Output
source§impl<B: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D: MatMatBrKernel<E>, T, R> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(B, M, K), E, D, T>where
T: Tape<E, D> + Merge<R>,
R: Tape<E, D>,
impl<B: Dim, M: Dim, K: Dim, N: Dim, E: Dtype, D: MatMatBrKernel<E>, T, R> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(B, M, K), E, D, T>where T: Tape<E, D> + Merge<R>, R: Tape<E, D>,
source§fn try_matmul(
self,
rhs: Tensor<(K, N), E, D, R>
) -> Result<Self::Output, Self::Err>
fn try_matmul( self, rhs: Tensor<(K, N), E, D, R> ) -> Result<Self::Output, Self::Err>
let x: Tensor<Rank3<1, 3, 2>, f32, _> = dev.zeros();
let y: Tensor<Rank2<3, 4>, f32, _> = dev.zeros();
let _: Tensor<Rank3<1, 3, 4>, f32, _> = x.try_matmul(y);
type Output = Tensor<(B, M, N), E, D, T>
fn matmul(self, rhs: Rhs) -> Self::Output
source§impl<K: Dim, N: Dim, E: Dtype, D: VecMatKernel<E>, T: Tape<E, D> + Merge<R>, R: Tape<E, D>> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(K,), E, D, T>
impl<K: Dim, N: Dim, E: Dtype, D: VecMatKernel<E>, T: Tape<E, D> + Merge<R>, R: Tape<E, D>> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(K,), E, D, T>
source§impl<M: Dim, K: Dim, N: Dim, E: Dtype, D: MatMatKernel<E>, T, R> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(M, K), E, D, T>where
T: Tape<E, D> + Merge<R>,
R: Tape<E, D>,
impl<M: Dim, K: Dim, N: Dim, E: Dtype, D: MatMatKernel<E>, T, R> TryMatMul<Tensor<(K, N), E, D, R>> for Tensor<(M, K), E, D, T>where T: Tape<E, D> + Merge<R>, R: Tape<E, D>,
source§fn try_matmul(
self,
rhs: Tensor<(K, N), E, D, R>
) -> Result<Self::Output, Self::Err>
fn try_matmul( self, rhs: Tensor<(K, N), E, D, R> ) -> Result<Self::Output, Self::Err>
let x: Tensor<Rank2<3, 2>, f32, _> = dev.zeros();
let y: Tensor<Rank2<3, 4>, f32, _> = dev.zeros();
let _: Tensor<Rank2<3, 4>, f32, _> = x.try_matmul(y);