pub struct ReLu<U, D>{ /* private fields */ }Expand description
ReLu Implementation
Implementations§
Trait Implementations§
Source§impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>where
U: UnitValue<U>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>where
U: UnitValue<U>,
Source§fn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>,
) -> Result<Arr<U, N>, EvaluateError>
fn apply( &self, device: &DeviceCpu<U>, input: &Arr<U, N>, ) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
Source§fn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>,
) -> Result<Arr<U, N>, TrainingError>
fn derive( &self, device: &DeviceCpu<U>, o: &Arr<U, N>, loss: &Arr<U, N>, u: &Arr<U, N>, ) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
Source§fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
Source§impl<'a, U, const N: usize> Activation<U, ArrView<'a, U, N>, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>where
U: UnitValue<U>,
impl<'a, U, const N: usize> Activation<U, ArrView<'a, U, N>, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>where
U: UnitValue<U>,
Source§fn apply(
&self,
device: &DeviceCpu<U>,
input: &ArrView<'a, U, N>,
) -> Result<Arr<U, N>, EvaluateError>
fn apply( &self, device: &DeviceCpu<U>, input: &ArrView<'a, U, N>, ) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
Source§fn derive(
&self,
device: &DeviceCpu<U>,
o: &ArrView<'a, U, N>,
loss: &ArrView<'a, U, N>,
u: &ArrView<'a, U, N>,
) -> Result<Arr<U, N>, TrainingError>
fn derive( &self, device: &DeviceCpu<U>, o: &ArrView<'a, U, N>, loss: &ArrView<'a, U, N>, u: &ArrView<'a, U, N>, ) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
Source§fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
Source§impl<'a, U, const N: usize> Activation<U, CudaTensor1dPtrView<'a, U, N>, CudaTensor1dPtr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> ReLuForward<'b, U, N>: Kernel<Args = ActivationForwardArgs<'b, U, N>>,
for<'b> ReLuBackward<'b, U, N>: Kernel<Args = ActivationBackwardArgs<'b, U, N>>,
impl<'a, U, const N: usize> Activation<U, CudaTensor1dPtrView<'a, U, N>, CudaTensor1dPtr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> ReLuForward<'b, U, N>: Kernel<Args = ActivationForwardArgs<'b, U, N>>,
for<'b> ReLuBackward<'b, U, N>: Kernel<Args = ActivationBackwardArgs<'b, U, N>>,
Source§fn apply(
&self,
device: &DeviceGpu<U>,
input: &CudaTensor1dPtrView<'a, U, N>,
) -> Result<CudaTensor1dPtr<U, N>, EvaluateError>
fn apply( &self, device: &DeviceGpu<U>, input: &CudaTensor1dPtrView<'a, U, N>, ) -> Result<CudaTensor1dPtr<U, N>, EvaluateError>
Apply the activation function Read more
Source§fn derive(
&self,
device: &DeviceGpu<U>,
o: &CudaTensor1dPtrView<'a, U, N>,
loss: &CudaTensor1dPtrView<'a, U, N>,
u: &CudaTensor1dPtrView<'a, U, N>,
) -> Result<CudaTensor1dPtr<U, N>, TrainingError>
fn derive( &self, device: &DeviceGpu<U>, o: &CudaTensor1dPtrView<'a, U, N>, loss: &CudaTensor1dPtrView<'a, U, N>, u: &CudaTensor1dPtrView<'a, U, N>, ) -> Result<CudaTensor1dPtr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
Source§fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
Source§impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>
impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>
Source§fn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
fn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
Source§fn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I,
) -> Result<Arr<U, N>, TrainingError>
fn derive( &self, _: &DeviceCpu<U>, _: &I, loss: &I, u: &I, ) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
Source§fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
Source§impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> ReLuBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> ReLuBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> ReLuBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> ReLuBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
Source§fn batch_apply(
&self,
device: &DeviceGpu<U>,
input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_apply( &self, device: &DeviceGpu<U>, input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply the activation function Read more
Source§fn batch_derive(
&self,
device: &DeviceGpu<U>,
o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_derive( &self, device: &DeviceGpu<U>, o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply derivatives of the activation function Read more
Source§impl<U, const N: usize> BatchActivation<U, SerializedVec<U, Arr<U, N>>, SerializedVec<U, Arr<U, N>>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>
impl<U, const N: usize> BatchActivation<U, SerializedVec<U, Arr<U, N>>, SerializedVec<U, Arr<U, N>>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>
Source§fn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &SerializedVec<U, Arr<U, N>>,
) -> Result<SerializedVec<U, Arr<U, N>>, TrainingError>
fn batch_apply( &self, device: &DeviceCpu<U>, input: &SerializedVec<U, Arr<U, N>>, ) -> Result<SerializedVec<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
Source§fn batch_derive(
&self,
device: &DeviceCpu<U>,
o: &SerializedVec<U, Arr<U, N>>,
loss: &SerializedVec<U, Arr<U, N>>,
u: &SerializedVec<U, Arr<U, N>>,
) -> Result<SerializedVec<U, Arr<U, N>>, TrainingError>
fn batch_derive( &self, device: &DeviceCpu<U>, o: &SerializedVec<U, Arr<U, N>>, loss: &SerializedVec<U, Arr<U, N>>, u: &SerializedVec<U, Arr<U, N>>, ) -> Result<SerializedVec<U, Arr<U, N>>, TrainingError>
Apply derivatives of the activation function Read more
Source§impl<'a, U, const N: usize> BatchActivation<U, SerializedVecView<'a, U, Arr<U, N>>, SerializedVec<U, Arr<U, N>>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>
impl<'a, U, const N: usize> BatchActivation<U, SerializedVecView<'a, U, Arr<U, N>>, SerializedVec<U, Arr<U, N>>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>
Source§fn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &SerializedVecView<'a, U, Arr<U, N>>,
) -> Result<SerializedVec<U, Arr<U, N>>, TrainingError>
fn batch_apply( &self, device: &DeviceCpu<U>, input: &SerializedVecView<'a, U, Arr<U, N>>, ) -> Result<SerializedVec<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
Source§fn batch_derive(
&self,
device: &DeviceCpu<U>,
o: &SerializedVecView<'a, U, Arr<U, N>>,
loss: &SerializedVecView<'a, U, Arr<U, N>>,
u: &SerializedVecView<'a, U, Arr<U, N>>,
) -> Result<SerializedVec<U, Arr<U, N>>, TrainingError>
fn batch_derive( &self, device: &DeviceCpu<U>, o: &SerializedVecView<'a, U, Arr<U, N>>, loss: &SerializedVecView<'a, U, Arr<U, N>>, u: &SerializedVecView<'a, U, Arr<U, N>>, ) -> Result<SerializedVec<U, Arr<U, N>>, TrainingError>
Apply derivatives of the activation function Read more
Auto Trait Implementations§
impl<U, D> Freeze for ReLu<U, D>
impl<U, D> RefUnwindSafe for ReLu<U, D>where
U: RefUnwindSafe,
D: RefUnwindSafe,
impl<U, D> Send for ReLu<U, D>where
D: Send,
impl<U, D> Sync for ReLu<U, D>where
D: Sync,
impl<U, D> Unpin for ReLu<U, D>
impl<U, D> UnwindSafe for ReLu<U, D>where
U: UnwindSafe,
D: UnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more