Trait Activation

Source
pub trait Activation<U, T, R, D>
where U: UnitValue<U>, D: Device<U>,
{ // Required methods fn apply(&self, device: &D, input: &T) -> Result<R, EvaluateError>; fn derive( &self, device: &D, o: &T, loss: &T, u: &T, ) -> Result<R, TrainingError>; fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool; }
Expand description

Trait defining activation functions

Required Methods§

Source

fn apply(&self, device: &D, input: &T) -> Result<R, EvaluateError>

Apply the activation function

§Arguments
  • device - Device objects available for processing
  • input - input
§Errors

This function may return the following errors

Source

fn derive(&self, device: &D, o: &T, loss: &T, u: &T) -> Result<R, TrainingError>

Apply derivatives of the activation function

§Arguments
  • device - Device objects available for processing
  • o - Input from upper layers
  • loss - Losses calculated at lower tiers
  • u - Value before passing through the activation function of the input from the upper layer
§Errors

This function may return the following errors

Returns whether or not the canonical linkage function can be used.

§Arguments
  • l - loss function

Dyn Compatibility§

This trait is not dyn compatible.

In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.

Implementors§

Source§

impl<'a, U, const N: usize> Activation<U, ArrView<'a, U, N>, Arr<U, N>, DeviceCpu<U>> for Identity<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<'a, U, const N: usize> Activation<U, ArrView<'a, U, N>, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<'a, U, const N: usize> Activation<U, ArrView<'a, U, N>, Arr<U, N>, DeviceCpu<U>> for Sigmoid<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<'a, U, const N: usize> Activation<U, ArrView<'a, U, N>, Arr<U, N>, DeviceCpu<U>> for SoftMax<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<'a, U, const N: usize> Activation<U, ArrView<'a, U, N>, Arr<U, N>, DeviceCpu<U>> for Swish<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<'a, U, const N: usize> Activation<U, ArrView<'a, U, N>, Arr<U, N>, DeviceCpu<U>> for Tanh<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<'a, U, const N: usize> Activation<U, CudaTensor1dPtrView<'a, U, N>, CudaTensor1dPtr<U, N>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>
where U: UnitValue<U>, DeviceGpu<U>: Device<U>,

Source§

impl<'a, U, const N: usize> Activation<U, CudaTensor1dPtrView<'a, U, N>, CudaTensor1dPtr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>
where U: UnitValue<U> + DataTypeInfo, DeviceGpu<U>: Device<U>, for<'b> ReLuForward<'b, U, N>: Kernel<Args = ActivationForwardArgs<'b, U, N>>, for<'b> ReLuBackward<'b, U, N>: Kernel<Args = ActivationBackwardArgs<'b, U, N>>,

Source§

impl<'a, U, const N: usize> Activation<U, CudaTensor1dPtrView<'a, U, N>, CudaTensor1dPtr<U, N>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>
where U: UnitValue<U> + DataTypeInfo, DeviceGpu<U>: Device<U>, for<'b> SigmoidForward<'b, U, N>: Kernel<Args = ActivationForwardArgs<'b, U, N>>, for<'b> SigmoidBackward<'b, U, N>: Kernel<Args = ActivationBackwardArgs<'b, U, N>>,

Source§

impl<'a, U, const N: usize> Activation<U, CudaTensor1dPtrView<'a, U, N>, CudaTensor1dPtr<U, N>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>
where U: UnitValue<U> + DataTypeInfo, DeviceGpu<U>: Device<U>, CudaPtr<U>: TryFrom<U, Error = CudaError>, for<'b> SoftMaxForward<'b, U, N>: Kernel<Args = ActivationForwardArgs<'b, U, N>>, for<'b> SoftMaxBackward<'b, U, N>: Kernel<Args = ActivationBackwardArgs<'b, U, N>>,

Source§

impl<'a, U, const N: usize> Activation<U, CudaTensor1dPtrView<'a, U, N>, CudaTensor1dPtr<U, N>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>
where U: UnitValue<U> + DataTypeInfo, DeviceGpu<U>: Device<U>, for<'b> SwishForward<'b, U, N>: Kernel<Args = ActivationForwardArgs<'b, U, N>>, for<'b> SwishBackward<'b, U, N>: Kernel<Args = ActivationBackwardArgs<'b, U, N>>,

Source§

impl<'a, U, const N: usize> Activation<U, CudaTensor1dPtrView<'a, U, N>, CudaTensor1dPtr<U, N>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>
where U: UnitValue<U> + DataTypeInfo, DeviceGpu<U>: Device<U>, for<'b> TanhForward<'b, U, N>: Kernel<Args = ActivationForwardArgs<'b, U, N>>, for<'b> TanhBackward<'b, U, N>: Kernel<Args = ActivationBackwardArgs<'b, U, N>>,

Source§

impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Identity<U, DeviceCpu<U>>
where U: UnitValue<U>, I: Iterator<Item = U> + Clone,

Source§

impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>
where U: UnitValue<U>, I: Iterator<Item = U> + Clone,

Source§

impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Sigmoid<U, DeviceCpu<U>>
where U: UnitValue<U>, I: Iterator<Item = U> + Clone,

Source§

impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for SoftMax<U, DeviceCpu<U>>
where U: UnitValue<U>, I: Iterator<Item = U> + Clone,

Source§

impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Swish<U, DeviceCpu<U>>
where U: UnitValue<U>, I: Iterator<Item = U> + Clone,

Source§

impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Tanh<U, DeviceCpu<U>>
where U: UnitValue<U>, I: Iterator<Item = U> + Clone,

Source§

impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Identity<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Sigmoid<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for SoftMax<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Swish<U, DeviceCpu<U>>
where U: UnitValue<U>,

Source§

impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Tanh<U, DeviceCpu<U>>
where U: UnitValue<U>,