Struct nncombinator::layer::LinearLayer
source · [−]pub struct LinearLayer<U, C, P, D, I, const NI: usize, const NO: usize>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U>,
I: Debug + Send + Sync,{ /* private fields */ }
Expand description
Linear Layer Implementation
Implementations
sourceimpl<U, P, I, const NI: usize, const NO: usize> LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
impl<U, P, I, const NI: usize, const NO: usize> LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
sourcepub fn new<UI: FnMut() -> U, BI: FnMut() -> U>(
parent: P,
device: &DeviceCpu<U>,
ui: UI,
bi: BI
) -> LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>
pub fn new<UI: FnMut() -> U, BI: FnMut() -> U>(
parent: P,
device: &DeviceCpu<U>,
ui: UI,
bi: BI
) -> LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>
Create and return an instance of LinearLayer
Arguments
parent
- upper layerdevice
- Device object used for neural network computationui
- Callback to generate weight of unitbi
- Callback to generate weight of bias
sourceimpl<U, P, I, const NI: usize, const NO: usize> LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U>,
impl<U, P, I, const NI: usize, const NO: usize> LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U>,
Trait Implementations
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> AskDiffInput<U> for LinearLayer<U, C, P, D, I, NI, NO>where
P: PreTrain<U, OutStack = <<Self as PreTrain<U>>::OutStack as Stack>::Remaining> + ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + Loss<U> + AskDiffInput<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U>,
I: Debug + Send + Sync,
Self: PreTrain<U>,
impl<U, C, P, D, I, const NI: usize, const NO: usize> AskDiffInput<U> for LinearLayer<U, C, P, D, I, NI, NO>where
P: PreTrain<U, OutStack = <<Self as PreTrain<U>>::OutStack as Stack>::Remaining> + ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + Loss<U> + AskDiffInput<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U>,
I: Debug + Send + Sync,
Self: PreTrain<U>,
type DiffInput = <P as AskDiffInput<U>>::DiffInput
type DiffInput = <P as AskDiffInput<U>>::DiffInput
Diff Input to this layer of the neural network
sourcefn ask_diff_input(&self, stack: &Self::OutStack) -> Self::DiffInput
fn ask_diff_input(&self, stack: &Self::OutStack) -> Self::DiffInput
Data inquiry for creating difference information Read more
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> Backward<U, &Arr<U, NO>, Result<Arr<U, NI>, TrainingError>> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
I: Debug + Send + Sync,
impl<U, C, P, D, I, const NI: usize, const NO: usize> Backward<U, &Arr<U, NO>, Result<Arr<U, NI>, TrainingError>> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
I: Debug + Send + Sync,
sourceimpl<U, P, I, const NI: usize, const NO: usize> BackwardAll<U> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: BackwardAll<U, LossInput = Arr<U, NI>> + ForwardAll<Input = I, Output = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
impl<U, P, I, const NI: usize, const NO: usize> BackwardAll<U> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: BackwardAll<U, LossInput = Arr<U, NI>> + ForwardAll<Input = I, Output = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
sourcefn backward_all<OP: Optimizer<U>, L: LossFunction<U>>(
&mut self,
input: Self::LossInput,
stack: Self::OutStack,
optimizer: &mut OP,
lossf: &L
) -> Result<(), TrainingError>
fn backward_all<OP: Optimizer<U>, L: LossFunction<U>>(
&mut self,
input: Self::LossInput,
stack: Self::OutStack,
optimizer: &mut OP,
lossf: &L
) -> Result<(), TrainingError>
Back propagation of errors
#Arguments Read more
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
sourceimpl<U, P, I, const NI: usize, const NO: usize> BackwardAll<U> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: BackwardAll<U, LossInput = Arr<U, NI>> + ForwardAll<Input = I, Output = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U> + DeviceLinear<U, CachedTensor<U, Arr2<U, NI, NO>>, NI, NO>,
impl<U, P, I, const NI: usize, const NO: usize> BackwardAll<U> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: BackwardAll<U, LossInput = Arr<U, NI>> + ForwardAll<Input = I, Output = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U> + DeviceLinear<U, CachedTensor<U, Arr2<U, NI, NO>>, NI, NO>,
sourcefn backward_all<OP: Optimizer<U>, L: LossFunction<U>>(
&mut self,
input: Self::LossInput,
stack: Self::OutStack,
optimizer: &mut OP,
lossf: &L
) -> Result<(), TrainingError>
fn backward_all<OP: Optimizer<U>, L: LossFunction<U>>(
&mut self,
input: Self::LossInput,
stack: Self::OutStack,
optimizer: &mut OP,
lossf: &L
) -> Result<(), TrainingError>
Back propagation of errors
#Arguments Read more
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
sourceimpl<U, P, I, const NI: usize, const NO: usize> BatchBackward<U> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = VecArr<U, Arr<U, NI>>>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
impl<U, P, I, const NI: usize, const NO: usize> BatchBackward<U> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = VecArr<U, Arr<U, NI>>>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
type BatchLossInput = VecArr<U, Arr<U, NO>>
type BatchLossInput = VecArr<U, Arr<U, NO>>
Losses during neural network training for batch execution
sourcefn batch_backward<OP: Optimizer<U>, L: LossFunction<U>>(
&mut self,
input: Self::BatchLossInput,
stack: Self::BatchOutStack,
optimizer: &mut OP,
lossf: &L
) -> Result<(), TrainingError>
fn batch_backward<OP: Optimizer<U>, L: LossFunction<U>>(
&mut self,
input: Self::BatchLossInput,
stack: Self::BatchOutStack,
optimizer: &mut OP,
lossf: &L
) -> Result<(), TrainingError>
Back propagation of errors
#Arguments Read more
sourceimpl<U, P, I, const NI: usize, const NO: usize> BatchBackward<U> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = VecArr<U, Arr<U, NI>>>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U> + DeviceLinear<U, CachedTensor<U, Arr2<U, NI, NO>>, NI, NO>,
impl<U, P, I, const NI: usize, const NO: usize> BatchBackward<U> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = VecArr<U, Arr<U, NI>>>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U> + DeviceLinear<U, CachedTensor<U, Arr2<U, NI, NO>>, NI, NO>,
type BatchLossInput = VecArr<U, Arr<U, NO>>
type BatchLossInput = VecArr<U, Arr<U, NO>>
Losses during neural network training for batch execution
sourcefn batch_backward<OP: Optimizer<U>, L: LossFunction<U>>(
&mut self,
input: Self::BatchLossInput,
stack: Self::BatchOutStack,
optimizer: &mut OP,
lossf: &L
) -> Result<(), TrainingError>
fn batch_backward<OP: Optimizer<U>, L: LossFunction<U>>(
&mut self,
input: Self::BatchLossInput,
stack: Self::BatchOutStack,
optimizer: &mut OP,
lossf: &L
) -> Result<(), TrainingError>
Back propagation of errors
#Arguments Read more
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> BatchForward for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
impl<U, C, P, D, I, const NI: usize, const NO: usize> BatchForward for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
sourcefn batch_forward(
&self,
input: Self::BatchInput
) -> Result<Self::BatchOutput, TrainingError>
fn batch_forward(
&self,
input: Self::BatchInput
) -> Result<Self::BatchOutput, TrainingError>
Forward propagation
#Arguments Read more
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> BatchForwardBase for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U>,
I: Debug + Send + Sync,
Self: ForwardAll,
impl<U, C, P, D, I, const NI: usize, const NO: usize> BatchForwardBase for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U>,
I: Debug + Send + Sync,
Self: ForwardAll,
type BatchInput = VecArr<U, I>
type BatchInput = VecArr<U, I>
Input to this layer of the neural network for batch execution
type BatchOutput = VecArr<U, Arr<U, NO>>
type BatchOutput = VecArr<U, Arr<U, NO>>
Output from this layer of the neural network for batch execution
sourceimpl<U, P, I, const NI: usize, const NO: usize> BatchLoss<U> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = VecArr<U, Arr<U, NI>>>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
impl<U, P, I, const NI: usize, const NO: usize> BatchLoss<U> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = VecArr<U, Arr<U, NI>>>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
sourcefn batch_loss<L: LossFunction<U>>(
&self,
loss: Self::BatchLossInput,
_lossf: &L,
stack: Self::BatchOutStack
) -> Result<(Self::BatchOutStack, Self::BatchLossInput), TrainingError>
fn batch_loss<L: LossFunction<U>>(
&self,
loss: Self::BatchLossInput,
_lossf: &L,
stack: Self::BatchOutStack
) -> Result<(Self::BatchOutStack, Self::BatchLossInput), TrainingError>
Error Calculation Read more
sourceimpl<U, P, I, const NI: usize, const NO: usize> BatchLoss<U> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = VecArr<U, Arr<U, NI>>>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U>,
Self: Loss<U> + BatchBackward<U>,
impl<U, P, I, const NI: usize, const NO: usize> BatchLoss<U> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = VecArr<U, Arr<U, NI>>>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U>,
Self: Loss<U> + BatchBackward<U>,
sourcefn batch_loss<L: LossFunction<U>>(
&self,
loss: Self::BatchLossInput,
_lossf: &L,
stack: Self::BatchOutStack
) -> Result<(Self::BatchOutStack, Self::BatchLossInput), TrainingError>
fn batch_loss<L: LossFunction<U>>(
&self,
loss: Self::BatchLossInput,
_lossf: &L,
stack: Self::BatchOutStack
) -> Result<(Self::BatchOutStack, Self::BatchLossInput), TrainingError>
Error Calculation Read more
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> BatchPreTrain<U> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U> + BatchPreTrain<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
I: Debug + Send + Sync,
impl<U, C, P, D, I, const NI: usize, const NO: usize> BatchPreTrain<U> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U> + BatchPreTrain<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
I: Debug + Send + Sync,
sourcefn batch_pre_train(
&self,
input: Self::BatchInput
) -> Result<Self::BatchOutStack, TrainingError>
fn batch_pre_train(
&self,
input: Self::BatchInput
) -> Result<Self::BatchOutStack, TrainingError>
Perform forward propagation required to perform error back propagation
#Arguments Read more
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> BatchPreTrainBase<U> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U>,
I: Debug + Send + Sync,
Self: PreTrain<U>,
impl<U, C, P, D, I, const NI: usize, const NO: usize> BatchPreTrainBase<U> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + BatchForwardBase<BatchInput = VecArr<U, I>, BatchOutput = VecArr<U, Arr<U, NI>>> + BatchForward + BatchPreTrainBase<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U>,
I: Debug + Send + Sync,
Self: PreTrain<U>,
type BatchOutStack = Cons<<P as BatchPreTrainBase<U>>::BatchOutStack, <LinearLayer<U, C, P, D, I, NI, NO> as BatchForwardBase>::BatchOutput>
type BatchOutStack = Cons<<P as BatchPreTrainBase<U>>::BatchOutStack, <LinearLayer<U, C, P, D, I, NI, NO> as BatchForwardBase>::BatchOutput>
Type of object to keep the results of forward propagation
needed to perform error back propagation for batch execution. Read more
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> Forward<Arr<U, NI>, Result<Arr<U, NO>, EvaluateError>> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
I: Debug + Send + Sync,
impl<U, C, P, D, I, const NI: usize, const NO: usize> Forward<Arr<U, NI>, Result<Arr<U, NO>, EvaluateError>> for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
I: Debug + Send + Sync,
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> ForwardAll for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
impl<U, C, P, D, I, const NI: usize, const NO: usize> ForwardAll for LinearLayer<U, C, P, D, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
type Input = I
type Input = I
Input to this layer of the neural network
sourcefn forward_all(&self, input: Self::Input) -> Result<Self::Output, EvaluateError>
fn forward_all(&self, input: Self::Input) -> Result<Self::Output, EvaluateError>
Forward propagation
#Arguments Read more
sourceimpl<U, P, I, const NI: usize, const NO: usize> Loss<U> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: PreTrain<U> + ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
impl<U, P, I, const NI: usize, const NO: usize> Loss<U> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: PreTrain<U> + ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
sourceimpl<U, P, I, const NI: usize, const NO: usize> Loss<U> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: PreTrain<U> + ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U>,
Self: BackwardAll<U>,
impl<U, P, I, const NI: usize, const NO: usize> Loss<U> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: PreTrain<U> + ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + Loss<U>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U>,
Self: BackwardAll<U>,
sourceimpl<T, U, P, I, const NI: usize, const NO: usize> Persistence<U, T, Linear> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
T: LinearPersistence<U>,
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + Persistence<U, T, Linear>,
U: Default + Clone + Copy + UnitValue<U>,
I: Debug + Send + Sync,
impl<T, U, P, I, const NI: usize, const NO: usize> Persistence<U, T, Linear> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
T: LinearPersistence<U>,
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + Persistence<U, T, Linear>,
U: Default + Clone + Copy + UnitValue<U>,
I: Debug + Send + Sync,
sourceimpl<T, U, P, I, const NI: usize, const NO: usize> Persistence<U, T, Linear> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
T: LinearPersistence<U>,
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + Persistence<U, T, Linear>,
U: Default + Clone + Copy + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U>,
impl<T, U, P, I, const NI: usize, const NO: usize> Persistence<U, T, Linear> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
T: LinearPersistence<U>,
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + Persistence<U, T, Linear>,
U: Default + Clone + Copy + UnitValue<U>,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U>,
sourceimpl<U, P, I, const NI: usize, const NO: usize> Persistence<U, TextFilePersistence<U>, Specialized> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + Persistence<U, TextFilePersistence<U>, Specialized>,
U: Default + Clone + Copy + UnitValue<U> + FromStr,
I: Debug + Send + Sync,
ConfigReadError: From<<U as FromStr>::Err>,
impl<U, P, I, const NI: usize, const NO: usize> Persistence<U, TextFilePersistence<U>, Specialized> for LinearLayer<U, Arr2<U, NI, NO>, P, DeviceCpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + Persistence<U, TextFilePersistence<U>, Specialized>,
U: Default + Clone + Copy + UnitValue<U> + FromStr,
I: Debug + Send + Sync,
ConfigReadError: From<<U as FromStr>::Err>,
sourcefn load(
&mut self,
persistence: &mut TextFilePersistence<U>
) -> Result<(), ConfigReadError>
fn load(
&mut self,
persistence: &mut TextFilePersistence<U>
) -> Result<(), ConfigReadError>
Load Model Read more
sourcefn save(
&mut self,
persistence: &mut TextFilePersistence<U>
) -> Result<(), PersistenceError>
fn save(
&mut self,
persistence: &mut TextFilePersistence<U>
) -> Result<(), PersistenceError>
Save Model Read more
sourceimpl<U, P, I, const NI: usize, const NO: usize> Persistence<U, TextFilePersistence<U>, Specialized> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + Persistence<U, TextFilePersistence<U>, Specialized>,
U: Default + Clone + Copy + UnitValue<U> + FromStr,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U>,
ConfigReadError: From<<U as FromStr>::Err>,
impl<U, P, I, const NI: usize, const NO: usize> Persistence<U, TextFilePersistence<U>, Specialized> for LinearLayer<U, CachedTensor<U, Arr2<U, NI, NO>>, P, DeviceGpu<U>, I, NI, NO>where
P: ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + PreTrain<U> + Loss<U> + Persistence<U, TextFilePersistence<U>, Specialized>,
U: Default + Clone + Copy + UnitValue<U> + FromStr,
I: Debug + Send + Sync,
DeviceGpu<U>: Device<U>,
ConfigReadError: From<<U as FromStr>::Err>,
sourcefn load(
&mut self,
persistence: &mut TextFilePersistence<U>
) -> Result<(), ConfigReadError>
fn load(
&mut self,
persistence: &mut TextFilePersistence<U>
) -> Result<(), ConfigReadError>
Load Model Read more
sourcefn save(
&mut self,
persistence: &mut TextFilePersistence<U>
) -> Result<(), PersistenceError>
fn save(
&mut self,
persistence: &mut TextFilePersistence<U>
) -> Result<(), PersistenceError>
Save Model Read more
sourceimpl<U, C, P, D, I, const NI: usize, const NO: usize> PreTrain<U> for LinearLayer<U, C, P, D, I, NI, NO>where
P: PreTrain<U> + ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + Loss<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
impl<U, C, P, D, I, const NI: usize, const NO: usize> PreTrain<U> for LinearLayer<U, C, P, D, I, NI, NO>where
P: PreTrain<U> + ForwardAll<Input = I, Output = Arr<U, NI>> + BackwardAll<U, LossInput = Arr<U, NI>> + Loss<U>,
D: Device<U> + DeviceLinear<U, C, NI, NO>,
U: Default + Clone + Copy + Send + UnitValue<U>,
I: Debug + Send + Sync,
type OutStack = Cons<<P as PreTrain<U>>::OutStack, <LinearLayer<U, C, P, D, I, NI, NO> as ForwardAll>::Output>
type OutStack = Cons<<P as PreTrain<U>>::OutStack, <LinearLayer<U, C, P, D, I, NI, NO> as ForwardAll>::Output>
Type of object to keep the results of forward propagation needed to perform error back propagation.
Auto Trait Implementations
impl<U, C, P, D, I, const NI: usize, const NO: usize> RefUnwindSafe for LinearLayer<U, C, P, D, I, NI, NO>where
C: RefUnwindSafe,
D: RefUnwindSafe,
P: RefUnwindSafe,
U: RefUnwindSafe,
impl<U, C, P, D, I, const NI: usize, const NO: usize> Send for LinearLayer<U, C, P, D, I, NI, NO>where
C: Send,
D: Send,
P: Send,
impl<U, C, P, D, I, const NI: usize, const NO: usize> Sync for LinearLayer<U, C, P, D, I, NI, NO>where
C: Sync,
D: Sync,
P: Sync,
impl<U, C, P, D, I, const NI: usize, const NO: usize> Unpin for LinearLayer<U, C, P, D, I, NI, NO>where
C: Unpin,
D: Unpin,
P: Unpin,
impl<U, C, P, D, I, const NI: usize, const NO: usize> UnwindSafe for LinearLayer<U, C, P, D, I, NI, NO>where
C: UnwindSafe,
D: UnwindSafe,
P: UnwindSafe,
U: UnwindSafe,
Blanket Implementations
sourceimpl<T> AddLayer for Twhere
T: ForwardAll,
impl<T> AddLayer for Twhere
T: ForwardAll,
sourcefn add_layer<C, F>(self, f: F) -> Cwhere
C: ForwardAll,
F: FnOnce(T) -> C,
fn add_layer<C, F>(self, f: F) -> Cwhere
C: ForwardAll,
F: FnOnce(T) -> C,
Adding Layers Read more
sourceimpl<T, U> AddLayerTrain<U> for Twhere
T: PreTrain<U>,
U: UnitValue<U>,
impl<T, U> AddLayerTrain<U> for Twhere
T: PreTrain<U>,
U: UnitValue<U>,
sourcefn add_layer_train<C, F>(self, f: F) -> Cwhere
C: Train<U>,
F: FnOnce(T) -> C,
fn add_layer_train<C, F>(self, f: F) -> Cwhere
C: Train<U>,
F: FnOnce(T) -> C,
Adding Layers Read more
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
sourceimpl<T, U> ForwardDiff<U> for Twhere
T: PreTrain<U>,
U: UnitValue<U>,
impl<T, U> ForwardDiff<U> for Twhere
T: PreTrain<U>,
U: UnitValue<U>,
sourcefn forward_diff(
&self,
input: <T as ForwardAll>::Input
) -> Result<<T as PreTrain<U>>::OutStack, EvaluateError>
fn forward_diff(
&self,
input: <T as ForwardAll>::Input
) -> Result<<T as PreTrain<U>>::OutStack, EvaluateError>
Forward propagation (differential application) Read more
impl<T> Pointable for T
impl<T> Pointable for T
sourceimpl<T> TryAddLayer for Twhere
T: ForwardAll,
impl<T> TryAddLayer for Twhere
T: ForwardAll,
sourcefn try_add_layer<C, F>(self, f: F) -> Result<C, DeviceError>where
C: ForwardAll,
F: FnOnce(T) -> Result<C, DeviceError>,
fn try_add_layer<C, F>(self, f: F) -> Result<C, DeviceError>where
C: ForwardAll,
F: FnOnce(T) -> Result<C, DeviceError>,
Adding Layers Read more
sourceimpl<T, U> TryAddLayerTrain<U> for Twhere
T: PreTrain<U>,
U: UnitValue<U>,
impl<T, U> TryAddLayerTrain<U> for Twhere
T: PreTrain<U>,
U: UnitValue<U>,
sourcefn try_add_layer_train<C, F>(self, f: F) -> Result<C, DeviceError>where
C: Train<U>,
F: FnOnce(T) -> Result<C, DeviceError>,
fn try_add_layer_train<C, F>(self, f: F) -> Result<C, DeviceError>where
C: Train<U>,
F: FnOnce(T) -> Result<C, DeviceError>,
Adding Layers Read more