BiasLayer

Struct BiasLayer 

Source
pub struct BiasLayer<U, C, P, OP, D, I, PI, const N: usize>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U>, U: Default + Clone + Copy + Send + UnitValue<U>, D: Device<U>, I: Debug + Send + Sync, PI: Debug, OP: Optimizer<U, D>,
{ /* private fields */ }
Expand description

Bias Layer Implementation

Trait Implementations§

Source§

impl<U, C, P, OP, D, I, PI, const N: usize> AskDiffInput<U> for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: PreTrain<U, OutStack = <<Self as PreTrain<U>>::OutStack as Stack>::Remaining> + ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + Loss<U> + AskDiffInput<U>, U: Default + Clone + Copy + Send + UnitValue<U>, D: Device<U>, I: Debug + Send + Sync, PI: Debug + BatchDataType + 'static, OP: Optimizer<U, D>, <PI as BatchDataType>::Type: Debug + BatchSize + 'static, Self: PreTrain<U, PreOutput = PI>,

Source§

type DiffInput = <P as AskDiffInput<U>>::DiffInput

Diff Input to this layer of the neural network
Source§

fn ask_diff_input( &self, stack: &Self::OutStack, ) -> Result<Self::DiffInput, TypeConvertError>

Data inquiry for creating difference information Read more
Source§

impl<U, C, P, OP, D, I, PI, const N: usize> Backward<U, PI, Result<PI, TrainingError>> for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U>, U: Default + Clone + Copy + UnitValue<U>, D: Device<U> + DeviceBias<U, C, PI, N>, I: Debug + Send + Sync, PI: Debug + BatchDataType + 'static, OP: Optimizer<U, D>, <PI as BatchDataType>::Type: Debug + BatchSize + 'static,

Source§

fn backward(&mut self, input: PI) -> Result<PI, TrainingError>

Back propagation of errors Read more
Source§

impl<U, P, OP, D, I, PI, const N: usize> BackwardAll<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>
where P: BackwardAll<U, LossInput = PI> + ForwardAll<Input = I, Output = PI> + PreTrain<U, PreOutput = PI> + Loss<U>, U: Default + Clone + Copy + Send + UnitValue<U>, D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>, I: Debug + Send + Sync, PI: Debug + BatchDataType + 'static, OP: Optimizer<U, D>, <PI as BatchDataType>::Type: Debug + BatchSize + 'static, for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>, for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,

Source§

type LossInput = PI

Losses during neural network training
Source§

type LossOutput = <P as BackwardAll<U>>::LossOutput

Losses in the top layer during neural network training
Source§

fn backward_all<L: LossFunction<U>>( &mut self, input: Self::LossInput, stack: Self::OutStack, lossf: &L, ) -> Result<(<Self as BackwardAll<U>>::LossOutput, <Self as UpdateWeight<U>>::GradientStack), TrainingError>

Back propagation of errors Read more
Source§

impl<U, P, OP, D, I, PI, const N: usize> BatchBackward<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = <PI as BatchDataType>::Type>, U: Default + Clone + Copy + Send + UnitValue<U>, I: Debug + Send + Sync + BatchDataType, PI: Debug + BatchDataType + 'static, <PI as BatchDataType>::Type: Debug + BatchSize + IntoConverter + 'static, <I as BatchDataType>::Type: Debug, OP: Optimizer<U, D>, D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>, for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>, for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,

Source§

type BatchLossInput = <PI as BatchDataType>::Type

Losses during neural network training for batch execution
Source§

type BatchLossOutput = <P as BatchBackward<U>>::BatchLossOutput

Losses in the top layer during neural network training
Source§

fn batch_backward<L: LossFunction<U>>( &mut self, input: Self::BatchLossInput, stack: Self::BatchOutStack, lossf: &L, ) -> Result<(<Self as BatchBackward<U>>::BatchLossOutput, <Self as UpdateWeight<U>>::GradientStack), TrainingError>

Back propagation of errors Read more
Source§

impl<U, C, P, OP, D, I, PI, const N: usize> BatchForward for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward, D: Device<U> + DeviceBias<U, C, PI, N>, U: Default + Clone + Copy + Send + UnitValue<U>, I: Debug + Send + Sync + BatchDataType, PI: Debug + BatchDataType + 'static, <PI as BatchDataType>::Type: Debug + BatchSize + 'static, <I as BatchDataType>::Type: Debug, OP: Optimizer<U, D>,

Source§

fn batch_forward( &self, input: Self::BatchInput, ) -> Result<Self::BatchOutput, TrainingError>

Forward propagation Read more
Source§

impl<U, C, P, OP, D, I, PI, const N: usize> BatchForwardBase for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type>, U: Default + Clone + Copy + Send + UnitValue<U>, D: Device<U> + DeviceBias<U, C, PI, N>, I: Debug + Send + Sync + BatchDataType, PI: Debug + BatchDataType + 'static, <PI as BatchDataType>::Type: Debug + BatchSize + 'static, <I as BatchDataType>::Type: Debug, OP: Optimizer<U, D>, Self: ForwardAll,

Source§

type BatchInput = <I as BatchDataType>::Type

Input to this layer of the neural network for batch execution
Source§

type BatchOutput = <PI as BatchDataType>::Type

Output from this layer of the neural network for batch execution
Source§

impl<U, P, OP, D, I, PI, const N: usize> BatchLoss<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type> + BatchPreTrain<U> + BatchBackward<U> + BatchLoss<U, BatchLossInput = <PI as BatchDataType>::Type>, U: Default + Clone + Copy + Send + UnitValue<U>, I: Debug + Send + Sync + BatchDataType, PI: Debug + BatchDataType + 'static, <PI as BatchDataType>::Type: Debug + BatchSize + IntoConverter + 'static, <I as BatchDataType>::Type: Debug, OP: Optimizer<U, D>, D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>, for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>, for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,

Source§

fn batch_loss<L: LossFunction<U>>( &self, loss: Self::BatchLossInput, _lossf: &L, stack: Self::BatchOutStack, ) -> Result<(Self::BatchOutStack, Self::BatchLossInput), TrainingError>

Error Calculation Read more
Source§

impl<U, C, P, OP, D, I, PI, const N: usize> BatchPreTrain<U> for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type> + BatchPreTrain<U>, U: Default + Clone + Copy + Send + UnitValue<U>, D: Device<U> + DeviceBias<U, C, PI, N>, I: Debug + Send + Sync + BatchDataType, PI: Debug + BatchDataType + 'static, <PI as BatchDataType>::Type: Debug + BatchSize + 'static, <I as BatchDataType>::Type: Debug, OP: Optimizer<U, D>,

Source§

fn batch_pre_train( &self, input: Self::BatchInput, ) -> Result<Self::BatchOutStack, TrainingError>

Perform forward propagation required to perform error back propagation Read more
Source§

impl<U, C, P, OP, D, I, PI, const N: usize> BatchPreTrainBase<U> for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + BatchForwardBase<BatchInput = <I as BatchDataType>::Type, BatchOutput = <PI as BatchDataType>::Type> + BatchForward + BatchPreTrainBase<U, BatchPreOutput = <PI as BatchDataType>::Type>, U: Default + Clone + Copy + Send + UnitValue<U>, D: Device<U> + DeviceBias<U, C, PI, N>, I: Debug + Send + Sync + BatchDataType, PI: Debug + BatchDataType + 'static, <PI as BatchDataType>::Type: Debug + BatchSize + 'static, <I as BatchDataType>::Type: Debug, OP: Optimizer<U, D>, Self: PreTrain<U, PreOutput = PI>,

Source§

type BatchPreOutput = <PI as BatchDataType>::Type

The type of output that is piled on the stack during the back-propagation process for errors in a batch run.
Source§

type BatchOutStack = Cons<<P as BatchPreTrainBase<U>>::BatchOutStack, <BiasLayer<U, C, P, OP, D, I, PI, N> as BatchPreTrainBase<U>>::BatchPreOutput>

Type of object to keep the results of forward propagation needed to perform error back propagation for batch execution.
Source§

impl<U, P, OP, I, PI, const N: usize> BiasLayerInstantiation<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N> for BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U>, U: Default + Clone + Copy + Send + UnitValue<U>, I: Debug + Send + Sync, PI: Debug + BatchDataType, OP: Optimizer<U, DeviceCpu<U>>,

Source§

fn instantiation<UI: FnMut() -> U, B: OptimizerBuilder<U, DeviceCpu<U>, Output = OP>>( parent: P, device: &DeviceCpu<U>, ui: UI, b: &B, ) -> Result<BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>, LayerInstantiationError>

Create and return an instance with the specified scale, bias, and momentum. Read more
Source§

impl<U, P, OP, I, PI, const N: usize> BiasLayerInstantiation<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N> for BiasLayer<U, Arr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U>, U: Default + Clone + Copy + Send + UnitValue<U>, I: Debug + Send + Sync, PI: Debug + BatchDataType, OP: Optimizer<U, DeviceGpu<U>>, DeviceGpu<U>: Device<U>,

Source§

fn instantiation<UI: FnMut() -> U, B: OptimizerBuilder<U, DeviceGpu<U>, Output = OP>>( parent: P, device: &DeviceGpu<U>, ui: UI, b: &B, ) -> Result<BiasLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>, LayerInstantiationError>

Create and return an instance with the specified scale, bias, and momentum. Read more
Source§

impl<U, C, P, OP, D, I, PI, const N: usize> Forward<PI, Result<PI, EvaluateError>> for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U>, U: Default + Clone + Copy + Send + UnitValue<U>, D: Device<U> + DeviceBias<U, C, PI, N>, I: Debug + Send + Sync, PI: Debug + BatchDataType, OP: Optimizer<U, D>, <PI as BatchDataType>::Type: Debug + BatchSize,

Source§

fn forward(&self, input: &PI) -> Result<PI, EvaluateError>

Forward propagation implementation Read more
Source§

impl<U, C, P, OP, D, I, PI, const N: usize> ForwardAll for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U>, D: Device<U> + DeviceBias<U, C, PI, N>, U: Default + Clone + Copy + Send + UnitValue<U>, I: Debug + Send + Sync, PI: Debug + BatchDataType + 'static, OP: Optimizer<U, D>, <PI as BatchDataType>::Type: Debug + BatchSize + 'static,

Source§

type Input = I

Input to this layer of the neural network
Source§

type Output = PI

Output from this layer of the neural network
Source§

fn forward_all(&self, input: Self::Input) -> Result<Self::Output, EvaluateError>

Forward propagation Read more
Source§

impl<U, P, OP, D, I, PI, const N: usize> Loss<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>
where P: PreTrain<U, PreOutput = PI> + ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + Loss<U>, U: Default + Clone + Copy + Send + UnitValue<U>, I: Debug + Send + Sync, PI: Debug + BatchDataType + 'static, OP: Optimizer<U, D>, <PI as BatchDataType>::Type: Debug + BatchSize + 'static, D: Device<U> + DeviceBias<U, Arr<U, N>, PI, N>, for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>, for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,

Source§

fn loss<L: LossFunction<U>>( &mut self, loss: Self::LossInput, _lossf: &L, stack: Self::OutStack, ) -> Result<(Self::OutStack, Self::LossInput), TrainingError>

Error Calculation Read more
Source§

impl<T, U, P, OP, I, PI, const N: usize> Persistence<U, T, Linear> for BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>
where T: LinearPersistence<U>, P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, T, Linear>, U: Default + Clone + Copy + UnitValue<U>, I: Debug + Send + Sync, PI: Debug, OP: Optimizer<U, DeviceCpu<U>>,

Source§

fn load(&mut self, persistence: &mut T) -> Result<(), ConfigReadError>

Load Model Read more
Source§

fn save(&mut self, persistence: &mut T) -> Result<(), PersistenceError>

Save Model Read more
Source§

impl<T, U, P, OP, I, PI, const N: usize> Persistence<U, T, Linear> for BiasLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>
where T: LinearPersistence<U>, P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, T, Linear>, U: Default + Clone + Copy + UnitValue<U>, I: Debug + Send + Sync, PI: Debug, OP: Optimizer<U, DeviceGpu<U>>, DeviceGpu<U>: Device<U>,

Source§

fn load(&mut self, persistence: &mut T) -> Result<(), ConfigReadError>

Load Model Read more
Source§

fn save(&mut self, persistence: &mut T) -> Result<(), PersistenceError>

Save Model Read more
Source§

impl<U, P, OP, I, PI, const N: usize> Persistence<U, TextFilePersistence<U>, Specialized> for BiasLayer<U, Arr<U, N>, P, OP, DeviceCpu<U>, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, TextFilePersistence<U>, Specialized>, U: Default + Clone + Copy + UnitValue<U> + FromStr, I: Debug + Send + Sync, PI: Debug, OP: Optimizer<U, DeviceCpu<U>>, ConfigReadError: From<<U as FromStr>::Err>,

Source§

fn load( &mut self, persistence: &mut TextFilePersistence<U>, ) -> Result<(), ConfigReadError>

Load Model Read more
Source§

fn save( &mut self, persistence: &mut TextFilePersistence<U>, ) -> Result<(), PersistenceError>

Save Model Read more
Source§

impl<U, P, OP, I, PI, const N: usize> Persistence<U, TextFilePersistence<U>, Specialized> for BiasLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U> + Loss<U> + Persistence<U, TextFilePersistence<U>, Specialized>, U: Default + Clone + Copy + UnitValue<U> + FromStr, I: Debug + Send + Sync, PI: Debug, OP: Optimizer<U, DeviceGpu<U>>, DeviceGpu<U>: Device<U>, ConfigReadError: From<<U as FromStr>::Err>,

Source§

fn load( &mut self, persistence: &mut TextFilePersistence<U>, ) -> Result<(), ConfigReadError>

Load Model Read more
Source§

fn save( &mut self, persistence: &mut TextFilePersistence<U>, ) -> Result<(), PersistenceError>

Save Model Read more
Source§

impl<U, C, P, OP, D, I, PI, const N: usize> PreTrain<U> for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: PreTrain<U, PreOutput = PI> + ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + Loss<U>, D: Device<U> + DeviceBias<U, C, PI, N>, U: Default + Clone + Copy + Send + UnitValue<U>, I: Debug + Send + Sync, PI: Debug + BatchDataType + 'static, OP: Optimizer<U, D>, <PI as BatchDataType>::Type: Debug + BatchSize + 'static,

Source§

type PreOutput = PI

The type of output that is piled on the stack during the error back propagation process.
Source§

type OutStack = Cons<<P as PreTrain<U>>::OutStack, <BiasLayer<U, C, P, OP, D, I, PI, N> as PreTrain<U>>::PreOutput>

Type of object to keep the results of forward propagation needed to perform error back propagation.
Source§

fn pre_train(&self, input: Self::Input) -> Result<Self::OutStack, EvaluateError>

Perform forward propagation required to perform error back propagation Read more
Source§

impl<U, P, OP, D, I, PI, const N: usize> UpdateWeight<U> for BiasLayer<U, Arr<U, N>, P, OP, D, I, PI, N>
where P: ForwardAll<Input = I, Output = PI> + BackwardAll<U, LossInput = PI> + PreTrain<U, PreOutput = PI> + Loss<U> + UpdateWeight<U>, U: Default + Clone + Copy + Send + UnitValue<U>, I: Debug + Send + Sync, PI: Debug + BatchDataType + 'static, OP: Optimizer<U, D>, D: Device<U>, <PI as BatchDataType>::Type: Debug + BatchSize + 'static, for<'a> &'a <OP as Optimizer<U, D>>::InternalType: From<&'a Arr<U, N>>, for<'a> <OP as Optimizer<U, D>>::InternalUpdateType<'a>: From<&'a mut Arr<U, N>>,

Source§

type GradientStack = Cons<<P as UpdateWeight<U>>::GradientStack, Arr<U, N>>

Type of object that holds the gradient needed to update the weights of the units in each layer.
Source§

fn update_weight( &mut self, stack: Self::GradientStack, ) -> Result<(), TrainingError>

Type of object that holds the gradient needed to update the unit weights. Read more

Auto Trait Implementations§

§

impl<U, C, P, OP, D, I, PI, const N: usize> Freeze for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: Freeze, D: Freeze, C: Freeze, OP: Freeze,

§

impl<U, C, P, OP, D, I, PI, const N: usize> RefUnwindSafe for BiasLayer<U, C, P, OP, D, I, PI, N>

§

impl<U, C, P, OP, D, I, PI, const N: usize> Send for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: Send, D: Send, C: Send, OP: Send,

§

impl<U, C, P, OP, D, I, PI, const N: usize> Sync for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: Sync, D: Sync, C: Sync, OP: Sync,

§

impl<U, C, P, OP, D, I, PI, const N: usize> Unpin for BiasLayer<U, C, P, OP, D, I, PI, N>
where P: Unpin, D: Unpin, C: Unpin, OP: Unpin, U: Unpin,

§

impl<U, C, P, OP, D, I, PI, const N: usize> UnwindSafe for BiasLayer<U, C, P, OP, D, I, PI, N>

Blanket Implementations§

Source§

impl<T> AddLayer for T
where T: ForwardAll,

Source§

fn add_layer<C, F>(self, f: F) -> C
where C: ForwardAll, F: FnOnce(T) -> C,

Adding Layers Read more
Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T, U> ForwardDiff<U> for T
where T: PreTrain<U>, U: UnitValue<U>,

Source§

fn forward_diff( &self, input: <T as ForwardAll>::Input, ) -> Result<<T as PreTrain<U>>::OutStack, EvaluateError>

Forward propagation (differential application) Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> TryAddLayer for T
where T: ForwardAll,

Source§

fn try_add_layer<C, F, E>(self, f: F) -> Result<C, E>
where C: ForwardAll, F: FnOnce(T) -> Result<C, E>,

Adding Layers Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.