pub struct BiasLayer<U, C, P, OP, D, I, PI, const N: usize>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U>,
I: Debug + Send + Sync,
PI: Debug,
OP: Optimizer<U, D>,{ /* private fields */ }Expand description
Bias Layer Implementation
Trait Implementations§
Source§impl<U, C, P, OP, D, I, PI, const N: usize> AskDiffInput<U> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: PreTrain<U, OutStack = <<Self as PreTrain<U>>::OutStack as Stack>::Remaining> + ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + Loss<U> + AskDiffInput<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
Self: PreTrain<U, PreOutput = PI>,
impl<U, C, P, OP, D, I, PI, const N: usize> AskDiffInput<U> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: PreTrain<U, OutStack = <<Self as PreTrain<U>>::OutStack as Stack>::Remaining> + ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + Loss<U> + AskDiffInput<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
Self: PreTrain<U, PreOutput = PI>,
Source§type DiffInput = <P as AskDiffInput<U>>::DiffInput
type DiffInput = <P as AskDiffInput<U>>::DiffInput
Diff Input to this layer of the neural network
Source§fn ask_diff_input(
&self,
stack: &Self::OutStack,
) -> Result<Self::DiffInput, TypeConvertError>
fn ask_diff_input( &self, stack: &Self::OutStack, ) -> Result<Self::DiffInput, TypeConvertError>
Data inquiry for creating difference information Read more
Source§impl<U, C, P, OP, D, I, PI, const N: usize> Backward<U, PI, Result<PI, TrainingError>> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U>,
U: Default + Clone + Copy + UnitValue<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
impl<U, C, P, OP, D, I, PI, const N: usize> Backward<U, PI, Result<PI, TrainingError>> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U>,
U: Default + Clone + Copy + UnitValue<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
Source§impl<U, P, OP, D, I, PI, const N: usize> BackwardAll<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>where
P: BackwardAll<U, LossInput = PI> + ForwardAll<Input = I, Output = PI> + PreTrain<U, PreOutput = PI> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>,
for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,
impl<U, P, OP, D, I, PI, const N: usize> BackwardAll<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>where
P: BackwardAll<U, LossInput = PI> + ForwardAll<Input = I, Output = PI> + PreTrain<U, PreOutput = PI> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>,
for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,
Source§type LossOutput = <P as BackwardAll<U>>::LossOutput
type LossOutput = <P as BackwardAll<U>>::LossOutput
Losses in the top layer during neural network training
Source§fn backward_all<L: LossFunction<U>>(
&mut self,
input: Self::LossInput,
stack: Self::OutStack,
lossf: &L,
) -> Result<(<Self as BackwardAll<U>>::LossOutput, <Self as UpdateWeight<U>>::GradientStack), TrainingError>
fn backward_all<L: LossFunction<U>>( &mut self, input: Self::LossInput, stack: Self::OutStack, lossf: &L, ) -> Result<(<Self as BackwardAll<U>>::LossOutput, <Self as UpdateWeight<U>>::GradientStack), TrainingError>
Back propagation of errors Read more
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Source§impl<U, P, OP, D, I, PI, const N: usize> BatchBackward<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = <PI as BatchDataType>::Type>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + IntoConverter + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>,
for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>,
for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,
impl<U, P, OP, D, I, PI, const N: usize> BatchBackward<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = <PI as BatchDataType>::Type>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + IntoConverter + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>,
for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>,
for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,
Source§type BatchLossInput = <PI as BatchDataType>::Type
type BatchLossInput = <PI as BatchDataType>::Type
Losses during neural network training for batch execution
Source§type BatchLossOutput = <P as BatchBackward<U>>::BatchLossOutput
type BatchLossOutput = <P as BatchBackward<U>>::BatchLossOutput
Losses in the top layer during neural network training
Source§fn batch_backward<L: LossFunction<U>>(
&mut self,
input: Self::BatchLossInput,
stack: Self::BatchOutStack,
lossf: &L,
) -> Result<(<Self as BatchBackward<U>>::BatchLossOutput, <Self as UpdateWeight<U>>::GradientStack), TrainingError>
fn batch_backward<L: LossFunction<U>>( &mut self, input: Self::BatchLossInput, stack: Self::BatchOutStack, lossf: &L, ) -> Result<(<Self as BatchBackward<U>>::BatchLossOutput, <Self as UpdateWeight<U>>::GradientStack), TrainingError>
Back propagation of errors Read more
Source§impl<U, C, P, OP, D, I, PI, const N: usize> BatchForward for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward,
D: Device<U> + DeviceBias<U, C, PI, N>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
impl<U, C, P, OP, D, I, PI, const N: usize> BatchForward for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward,
D: Device<U> + DeviceBias<U, C, PI, N>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
Source§fn batch_forward(
&self,
input: Self::BatchInput,
) -> Result<Self::BatchOutput, TrainingError>
fn batch_forward( &self, input: Self::BatchInput, ) -> Result<Self::BatchOutput, TrainingError>
Forward propagation Read more
Source§impl<U, C, P, OP, D, I, PI, const N: usize> BatchForwardBase for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
Self: ForwardAll,
impl<U, C, P, OP, D, I, PI, const N: usize> BatchForwardBase for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
Self: ForwardAll,
Source§type BatchInput = <I as BatchDataType>::Type
type BatchInput = <I as BatchDataType>::Type
Input to this layer of the neural network for batch execution
Source§type BatchOutput = <PI as BatchDataType>::Type
type BatchOutput = <PI as BatchDataType>::Type
Output from this layer of the neural network for batch execution
Source§impl<U, P, OP, D, I, PI, const N: usize> BatchLoss<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = <PI as BatchDataType>::Type>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + IntoConverter + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>,
for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>,
for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,
impl<U, P, OP, D, I, PI, const N: usize> BatchLoss<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = <PI as BatchDataType>::Type>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + IntoConverter + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>,
for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>,
for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,
Source§fn batch_loss<L: LossFunction<U>>(
&self,
loss: Self::BatchLossInput,
_lossf: &L,
stack: Self::BatchOutStack,
) -> Result<(Self::BatchOutStack, Self::BatchLossInput), TrainingError>
fn batch_loss<L: LossFunction<U>>( &self, loss: Self::BatchLossInput, _lossf: &L, stack: Self::BatchOutStack, ) -> Result<(Self::BatchOutStack, Self::BatchLossInput), TrainingError>
Error Calculation Read more
Source§impl<U, C, P, OP, D, I, PI, const N: usize> BatchPreTrain<U> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type> + BatchPreTrain<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
impl<U, C, P, OP, D, I, PI, const N: usize> BatchPreTrain<U> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type> + BatchPreTrain<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
Source§fn batch_pre_train(
&self,
input: Self::BatchInput,
) -> Result<Self::BatchOutStack, TrainingError>
fn batch_pre_train( &self, input: Self::BatchInput, ) -> Result<Self::BatchOutStack, TrainingError>
Perform forward propagation required to perform error back propagation Read more
Source§impl<U, C, P, OP, D, I, PI, const N: usize> BatchPreTrainBase<U> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
Self: PreTrain<U, PreOutput = PI>,
impl<U, C, P, OP, D, I, PI, const N: usize> BatchPreTrainBase<U> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
I: Debug + Send + Sync + BatchDataType,
PI: Debug + BatchDataType + 'static,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
<I as BatchDataType>::Type: Debug,
OP: Optimizer<U, D>,
Self: PreTrain<U, PreOutput = PI>,
Source§type BatchPreOutput = <PI as BatchDataType>::Type
type BatchPreOutput = <PI as BatchDataType>::Type
The type of output that is piled on the stack during the back-propagation process for errors in a batch run.
Source§type BatchOutStack = Cons<<P as BatchPreTrainBase<U>>::BatchOutStack, <BiasLayer<U, C, P, OP, D, I, PI, N> as BatchPreTrainBase<U>>::BatchPreOutput>
type BatchOutStack = Cons<<P as BatchPreTrainBase<U>>::BatchOutStack, <BiasLayer<U, C, P, OP, D, I, PI, N> as BatchPreTrainBase<U>>::BatchPreOutput>
Type of object to keep the results of forward propagation
needed to perform error back propagation for batch execution.
Source§impl<U, P, OP, I, PI, const N: usize> BiasLayerInstantiation<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N> for BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>
impl<U, P, OP, I, PI, const N: usize> BiasLayerInstantiation<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N> for BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>
Source§fn instantiation<UI: FnMut() -> U, B: OptimizerBuilder<U, DeviceCpu<U>, Output = OP>>(
parent: P,
device: &DeviceCpu<U>,
ui: UI,
b: &B,
) -> Result<BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>, LayerInstantiationError>
fn instantiation<UI: FnMut() -> U, B: OptimizerBuilder<U, DeviceCpu<U>, Output = OP>>( parent: P, device: &DeviceCpu<U>, ui: UI, b: &B, ) -> Result<BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>, LayerInstantiationError>
Create and return an instance with the specified scale, bias, and momentum. Read more
Source§impl<U, P, OP, I, PI, const N: usize> BiasLayerInstantiation<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N> for BiasLayer<U, Arr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>
impl<U, P, OP, I, PI, const N: usize> BiasLayerInstantiation<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N> for BiasLayer<U, Arr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>
Source§fn instantiation<UI: FnMut() -> U, B: OptimizerBuilder<U, DeviceGpu<U>, Output = OP>>(
parent: P,
device: &DeviceGpu<U>,
ui: UI,
b: &B,
) -> Result<BiasLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>, LayerInstantiationError>
fn instantiation<UI: FnMut() -> U, B: OptimizerBuilder<U, DeviceGpu<U>, Output = OP>>( parent: P, device: &DeviceGpu<U>, ui: UI, b: &B, ) -> Result<BiasLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>, LayerInstantiationError>
Create and return an instance with the specified scale, bias, and momentum. Read more
Source§impl<U, C, P, OP, D, I, PI, const N: usize> Forward<PI, Result<PI, EvaluateError>> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize,
impl<U, C, P, OP, D, I, PI, const N: usize> Forward<PI, Result<PI, EvaluateError>> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize,
Source§impl<U, C, P, OP, D, I, PI, const N: usize> ForwardAll for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
impl<U, C, P, OP, D, I, PI, const N: usize> ForwardAll for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
Source§impl<U, P, OP, D, I, PI, const N: usize> Loss<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>where
P: PreTrain<U, PreOutput = PI> + ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>,
for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>,
for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,
impl<U, P, OP, D, I, PI, const N: usize> Loss<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>where
P: PreTrain<U, PreOutput = PI> + ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>,
for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>,
for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,
Source§impl<T, U, P, OP, I, PI, const N: usize> Persistence<U, T, Linear> for BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>where
T: LinearPersistence<U>,
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, T, Linear>,
U: Default + Clone + Copy + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug,
OP: Optimizer<U, DeviceCpu<U>>,
impl<T, U, P, OP, I, PI, const N: usize> Persistence<U, T, Linear> for BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>where
T: LinearPersistence<U>,
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, T, Linear>,
U: Default + Clone + Copy + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug,
OP: Optimizer<U, DeviceCpu<U>>,
Source§impl<T, U, P, OP, I, PI, const N: usize> Persistence<U, T, Linear> for BiasLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>where
T: LinearPersistence<U>,
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, T, Linear>,
U: Default + Clone + Copy + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug,
OP: Optimizer<U, DeviceGpu<U>>,
DeviceGpu<U>: Device<U>,
impl<T, U, P, OP, I, PI, const N: usize> Persistence<U, T, Linear> for BiasLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>where
T: LinearPersistence<U>,
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, T, Linear>,
U: Default + Clone + Copy + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug,
OP: Optimizer<U, DeviceGpu<U>>,
DeviceGpu<U>: Device<U>,
Source§impl<U, P, OP, I, PI, const N: usize> Persistence<U, TextFilePersistence<U>, Specialized> for BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, TextFilePersistence<U>, Specialized>,
U: Default + Clone + Copy + UnitValue<U> + FromStr,
I: Debug + Send + Sync,
PI: Debug,
OP: Optimizer<U, DeviceCpu<U>>,
ConfigReadError: From<<U as FromStr>::Err>,
impl<U, P, OP, I, PI, const N: usize> Persistence<U, TextFilePersistence<U>, Specialized> for BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, TextFilePersistence<U>, Specialized>,
U: Default + Clone + Copy + UnitValue<U> + FromStr,
I: Debug + Send + Sync,
PI: Debug,
OP: Optimizer<U, DeviceCpu<U>>,
ConfigReadError: From<<U as FromStr>::Err>,
Source§fn load(
&mut self,
persistence: &mut TextFilePersistence<U>,
) -> Result<(), ConfigReadError>
fn load( &mut self, persistence: &mut TextFilePersistence<U>, ) -> Result<(), ConfigReadError>
Load Model Read more
Source§fn save(
&mut self,
persistence: &mut TextFilePersistence<U>,
) -> Result<(), PersistenceError>
fn save( &mut self, persistence: &mut TextFilePersistence<U>, ) -> Result<(), PersistenceError>
Save Model Read more
Source§impl<U, P, OP, I, PI, const N: usize> Persistence<U, TextFilePersistence<U>, Specialized> for BiasLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, TextFilePersistence<U>, Specialized>,
U: Default + Clone + Copy + UnitValue<U> + FromStr,
I: Debug + Send + Sync,
PI: Debug,
OP: Optimizer<U, DeviceGpu<U>>,
DeviceGpu<U>: Device<U>,
ConfigReadError: From<<U as FromStr>::Err>,
impl<U, P, OP, I, PI, const N: usize> Persistence<U, TextFilePersistence<U>, Specialized> for BiasLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, TextFilePersistence<U>, Specialized>,
U: Default + Clone + Copy + UnitValue<U> + FromStr,
I: Debug + Send + Sync,
PI: Debug,
OP: Optimizer<U, DeviceGpu<U>>,
DeviceGpu<U>: Device<U>,
ConfigReadError: From<<U as FromStr>::Err>,
Source§fn load(
&mut self,
persistence: &mut TextFilePersistence<U>,
) -> Result<(), ConfigReadError>
fn load( &mut self, persistence: &mut TextFilePersistence<U>, ) -> Result<(), ConfigReadError>
Load Model Read more
Source§fn save(
&mut self,
persistence: &mut TextFilePersistence<U>,
) -> Result<(), PersistenceError>
fn save( &mut self, persistence: &mut TextFilePersistence<U>, ) -> Result<(), PersistenceError>
Save Model Read more
Source§impl<U, C, P, OP, D, I, PI, const N: usize> PreTrain<U> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: PreTrain<U, PreOutput = PI> + ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + Loss<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
impl<U, C, P, OP, D, I, PI, const N: usize> PreTrain<U> for BiasLayer<U, C, P, OP, D, I, PI, N>where
P: PreTrain<U, PreOutput = PI> + ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + Loss<U>,
D: Device<U> + DeviceBias<U, C, PI, N>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
Source§type PreOutput = PI
type PreOutput = PI
The type of output that is piled on the stack during the error back propagation process.
Source§impl<U, P, OP, D, I, PI, const N: usize> UpdateWeight<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + UpdateWeight<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
D: Device<U>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>,
for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,
impl<U, P, OP, D, I, PI, const N: usize> UpdateWeight<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>where
P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + UpdateWeight<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
PI: Debug + BatchDataType + 'static,
OP: Optimizer<U, D>,
D: Device<U>,
<PI as BatchDataType>::Type: Debug + BatchSize + 'static,
for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>,
for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,
Source§type GradientStack = Cons<<P as UpdateWeight<U>>::GradientStack, Arr<U, N>>
type GradientStack = Cons<<P as UpdateWeight<U>>::GradientStack, Arr<U, N>>
Type of object that holds the gradient needed to update the weights of the units in each layer.
Source§fn update_weight(
&mut self,
stack: Self::GradientStack,
) -> Result<(), TrainingError>
fn update_weight( &mut self, stack: Self::GradientStack, ) -> Result<(), TrainingError>
Type of object that holds the gradient needed to update the unit weights. Read more
Auto Trait Implementations§
impl<U, C, P, OP, D, I, PI, const N: usize> Freeze for BiasLayer<U, C, P, OP, D, I, PI, N>
impl<U, C, P, OP, D, I, PI, const N: usize> RefUnwindSafe for BiasLayer<U, C, P, OP, D, I, PI, N>
impl<U, C, P, OP, D, I, PI, const N: usize> Send for BiasLayer<U, C, P, OP, D, I, PI, N>
impl<U, C, P, OP, D, I, PI, const N: usize> Sync for BiasLayer<U, C, P, OP, D, I, PI, N>
impl<U, C, P, OP, D, I, PI, const N: usize> Unpin for BiasLayer<U, C, P, OP, D, I, PI, N>
impl<U, C, P, OP, D, I, PI, const N: usize> UnwindSafe for BiasLayer<U, C, P, OP, D, I, PI, N>
Blanket Implementations§
Source§impl<T> AddLayer for Twhere
T: ForwardAll,
impl<T> AddLayer for Twhere
T: ForwardAll,
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T, U> ForwardDiff<U> for T
impl<T, U> ForwardDiff<U> for T
Source§fn forward_diff(
&self,
input: <T as ForwardAll>::Input,
) -> Result<<T as PreTrain<U>>::OutStack, EvaluateError>
fn forward_diff( &self, input: <T as ForwardAll>::Input, ) -> Result<<T as PreTrain<U>>::OutStack, EvaluateError>
Forward propagation (differential application) Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<T> TryAddLayer for Twhere
T: ForwardAll,
impl<T> TryAddLayer for Twhere
T: ForwardAll,
Source§fn try_add_layer<C, F, E>(self, f: F) -> Result<C, E>
fn try_add_layer<C, F, E>(self, f: F) -> Result<C, E>
Adding Layers Read more