Struct nncombinator::arr::Arr
source · [−]Expand description
Fixed-length one-dimensional array implementation
Implementations
Trait Implementations
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Identity<U, DeviceCpu<U>>where
U: UnitValue<U>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Identity<U, DeviceCpu<U>>where
U: UnitValue<U>,
sourcefn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>where
U: UnitValue<U>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>where
U: UnitValue<U>,
sourcefn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Sigmoid<U, DeviceCpu<U>>where
U: UnitValue<U>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Sigmoid<U, DeviceCpu<U>>where
U: UnitValue<U>,
sourcefn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for SoftMax<U, DeviceCpu<U>>where
U: UnitValue<U>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for SoftMax<U, DeviceCpu<U>>where
U: UnitValue<U>,
sourcefn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Swish<U, DeviceCpu<U>>where
U: UnitValue<U>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Swish<U, DeviceCpu<U>>where
U: UnitValue<U>,
sourcefn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Tanh<U, DeviceCpu<U>>where
U: UnitValue<U>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Tanh<U, DeviceCpu<U>>where
U: UnitValue<U>,
sourcefn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
device: &DeviceCpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
device: &DeviceCpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>where
U: UnitValue<U>,
DeviceGpu<U>: Device<U>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>where
U: UnitValue<U>,
DeviceGpu<U>: Device<U>,
sourcefn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
_: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
_: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
ReLuForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
ReLuBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
ReLuForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
ReLuBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
sourcefn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SigmoidForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SigmoidBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SigmoidForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SigmoidBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
sourcefn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
SoftMaxForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SoftMaxBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
SoftMaxForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SoftMaxBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
sourcefn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SwishForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SwishBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SwishForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SwishBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
sourcefn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
TanhForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
TanhBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
TanhForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
TanhBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
sourcefn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Identity<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Identity<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
sourcefn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
fn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
_: &I
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
_: &I
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
sourcefn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
fn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Sigmoid<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Sigmoid<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
sourcefn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
fn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for SoftMax<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for SoftMax<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
sourcefn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
fn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Swish<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Swish<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
sourcefn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
fn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Tanh<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
impl<U, I, const N: usize> Activation<U, I, Arr<U, N>, DeviceCpu<U>> for Tanh<U, DeviceCpu<U>>where
U: UnitValue<U>,
I: Iterator<Item = U> + Clone,
sourcefn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
fn apply(&self, _: &DeviceCpu<U>, input: &I) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
sourcefn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceCpu<U>,
_: &I,
loss: &I,
u: &I
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
sourcefn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
sourceimpl<'a, T, const N: usize> AsRawMutSlice<'a, T> for Arr<T, N>where
T: Default + Clone + Send,
impl<'a, T, const N: usize> AsRawMutSlice<'a, T> for Arr<T, N>where
T: Default + Clone + Send,
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> Backward<U, &Arr<U, NO>, Result<Arr<U, NI>, TrainingError>> for DiffLinearLayer<U, C, P, D, I, NI, NO>where
U: Default + Clone + Copy + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
C: Index<(usize, usize), Output = U>,
P: ForwardAll<Input = I, Output = DiffInput<DiffArr<U, NI>, U, NI, NO>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
I: Debug + Send + Sync,
impl<U, C, P, D, I, const NI: usize, const NO: usize> Backward<U, &Arr<U, NO>, Result<Arr<U, NI>, TrainingError>> for DiffLinearLayer<U, C, P, D, I, NI, NO>where
U: Default + Clone + Copy + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
C: Index<(usize, usize), Output = U>,
P: ForwardAll<Input = I, Output = DiffInput<DiffArr<U, NI>, U, NI, NO>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
I: Debug + Send + Sync,
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> Backward<U, &Arr<U, NO>, Result<Arr<U, NI>, TrainingError>> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
I: Debug + Send + Sync,
impl<U, C, P, D, I, const NI: usize, const NO: usize> Backward<U, &Arr<U, NO>, Result<Arr<U, NI>, TrainingError>> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
I: Debug + Send + Sync,
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Identity<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Identity<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
sourcefn batch_apply(
&self,
_: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
_: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for ReLu<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
sourcefn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Sigmoid<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Sigmoid<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
sourcefn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for SoftMax<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for SoftMax<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
sourcefn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Swish<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Swish<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
sourcefn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Tanh<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceCpu<U>> for Tanh<U, DeviceCpu<U>>where
U: UnitValue<U>,
Vec<Arr<U, N>>: FromParallelIterator<Arr<U, N>>,
sourcefn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
device: &DeviceCpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>where
U: UnitValue<U>,
DeviceGpu<U>: Device<U>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>where
U: UnitValue<U>,
DeviceGpu<U>: Device<U>,
sourcefn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
ReLuForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
ReLuBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
ReLuForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
ReLuBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
sourcefn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SigmoidForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SigmoidBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SigmoidForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SigmoidBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
sourcefn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
SoftMaxForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SoftMaxBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
SoftMaxForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SoftMaxBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
sourcefn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SwishForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SwishBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SwishForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SwishBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
sourcefn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
TanhForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
TanhBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
TanhForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
TanhBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
sourcefn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
fn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &VecArr<U, Arr<U, N>>
) -> Result<VecArr<U, Arr<U, N>>, TrainingError>
Apply the activation function Read more
sourceimpl<T, const N: usize> Div<T> for Arr<T, N>where
T: Div<T> + Div<Output = T> + Clone + Copy + Default + Send,
impl<T, const N: usize> Div<T> for Arr<T, N>where
T: Div<T> + Div<Output = T> + Clone + Copy + Default + Send,
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> Forward<Arr<U, NI>, Result<Arr<U, NO>, EvaluateError>> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
I: Debug + Send + Sync,
impl<U, C, P, D, I, const NI: usize, const NO: usize> Forward<Arr<U, NI>, Result<Arr<U, NO>, EvaluateError>> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
I: Debug + Send + Sync,
sourceimpl<'data, U, const N: usize> From<ArrView<'data, U, N>> for Arr<U, N>where
U: Default + Clone + Copy + Send,
impl<'data, U, const N: usize> From<ArrView<'data, U, N>> for Arr<U, N>where
U: Default + Clone + Copy + Send,
sourceimpl<'data, T, const N: usize> IntoParallelRefIterator<'data> for Arr<T, N>where
T: Send + Sync + 'static + Default + Clone,
impl<'data, T, const N: usize> IntoParallelRefIterator<'data> for Arr<T, N>where
T: Send + Sync + 'static + Default + Clone,
sourceimpl<T, const N: usize> Mul<T> for Arr<T, N>where
T: Mul<T> + Mul<Output = T> + Clone + Copy + Default + Send,
impl<T, const N: usize> Mul<T> for Arr<T, N>where
T: Mul<T> + Mul<Output = T> + Clone + Copy + Default + Send,
sourceimpl<T: PartialEq, const N: usize> PartialEq<Arr<T, N>> for Arr<T, N>where
T: Default + Clone + Send,
impl<T: PartialEq, const N: usize> PartialEq<Arr<T, N>> for Arr<T, N>where
T: Default + Clone + Send,
impl<T: Eq, const N: usize> Eq for Arr<T, N>where
T: Default + Clone + Send,
impl<T, const N: usize> StructuralEq for Arr<T, N>where
T: Default + Clone + Send,
impl<T, const N: usize> StructuralPartialEq for Arr<T, N>where
T: Default + Clone + Send,
Auto Trait Implementations
impl<T, const N: usize> RefUnwindSafe for Arr<T, N>where
T: RefUnwindSafe,
impl<T, const N: usize> Send for Arr<T, N>
impl<T, const N: usize> Sync for Arr<T, N>where
T: Sync,
impl<T, const N: usize> Unpin for Arr<T, N>
impl<T, const N: usize> UnwindSafe for Arr<T, N>where
T: UnwindSafe,
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more