pub struct LayerBase<F, P> {
pub rho: F,
pub params: P,
}Expand description
The LayerBase implementation works to provide a generic interface for layers within a
neural network by associating an activation function F with a set of parameters P.
Fields§
§rho: Fthe activation function of the layer
params: Pthe parameters of the layer; often weights and biases
Implementations§
Source§impl<F, P, A> LayerBase<F, P>where
P: RawParams<Elem = A>,
impl<F, P, A> LayerBase<F, P>where
P: RawParams<Elem = A>,
Sourcepub const fn new(rho: F, params: P) -> Self
pub const fn new(rho: F, params: P) -> Self
create a new LayerBase from the given activation function and parameters.
Sourcepub fn from_params(params: P) -> Selfwhere
F: Default,
pub fn from_params(params: P) -> Selfwhere
F: Default,
create a new LayerBase from the given parameters assuming the logical default for
the activation of type F.
Sourcepub fn from_rho<Sh>(rho: F) -> Selfwhere
P: Default,
pub fn from_rho<Sh>(rho: F) -> Selfwhere
P: Default,
create a new LayerBase from the given activation function and shape.
Sourcepub const fn params_mut(&mut self) -> &mut P
pub const fn params_mut(&mut self) -> &mut P
returns a mutable reference to the layer’s parameters
Sourcepub const fn rho(&self) -> &F
pub const fn rho(&self) -> &F
returns an immutable reference to the activation function of the layer
Sourcepub const fn rho_mut(&mut self) -> &mut F
pub const fn rho_mut(&mut self) -> &mut F
returns a mutable reference to the activation function of the layer
Sourcepub fn with_params<Y>(self, params: Y) -> LayerBase<F, Y>where
F: Activator<Y>,
pub fn with_params<Y>(self, params: Y) -> LayerBase<F, Y>where
F: Activator<Y>,
consumes the current instance and returns another with the given parameters.
Sourcepub fn with_rho<G>(self, rho: G) -> LayerBase<G, P>where
G: Activator<P>,
pub fn with_rho<G>(self, rho: G) -> LayerBase<G, P>where
G: Activator<P>,
consumes the current instance and returns another with the given activation function. This is useful during the creation of the model, when the activation function is not known yet.
Source§impl<F, S, D, A> LayerBase<F, ArrayBase<S, D, A>>
impl<F, S, D, A> LayerBase<F, ArrayBase<S, D, A>>
Source§impl<F, S, D, E, A> LayerBase<F, ParamsBase<S, D, A>>
impl<F, S, D, E, A> LayerBase<F, ParamsBase<S, D, A>>
Sourcepub fn from_rho_with_shape<Sh>(rho: F, shape: Sh) -> Self
pub fn from_rho_with_shape<Sh>(rho: F, shape: Sh) -> Self
create a new layer from the given activation function and shape.
pub const fn bias(&self) -> &ArrayBase<S, E, A>
pub const fn bias_mut(&mut self) -> &mut ArrayBase<S, E, A>
pub const fn weights(&self) -> &ArrayBase<S, D, A>
pub const fn weights_mut(&mut self) -> &mut ArrayBase<S, D, A>
pub fn dim(&self) -> D::Pattern
pub fn raw_dim(&self) -> D
pub fn shape(&self) -> &[usize]
Source§impl<A, P> LayerBase<HyperbolicTangent, P>where
P: RawParams<Elem = A>,
impl<A, P> LayerBase<HyperbolicTangent, P>where
P: RawParams<Elem = A>,
Trait Implementations§
Source§impl<F, P, A, X, Y, Z> Forward<X> for LayerBase<F, P>
impl<F, P, A, X, Y, Z> Forward<X> for LayerBase<F, P>
type Output = <F as Activator<Y>>::Output
Source§fn forward_then<F>(&self, input: &Rhs, then: F) -> Self::Output
fn forward_then<F>(&self, input: &Rhs, then: F) -> Self::Output
Source§impl<F: Ord, P: Ord> Ord for LayerBase<F, P>
impl<F: Ord, P: Ord> Ord for LayerBase<F, P>
1.21.0 · Source§fn max(self, other: Self) -> Selfwhere
Self: Sized,
fn max(self, other: Self) -> Selfwhere
Self: Sized,
Source§impl<F: PartialOrd, P: PartialOrd> PartialOrd for LayerBase<F, P>
impl<F: PartialOrd, P: PartialOrd> PartialOrd for LayerBase<F, P>
Source§impl<F, P, A> RawLayerMut<F, A> for LayerBase<F, P>
impl<F, P, A> RawLayerMut<F, A> for LayerBase<F, P>
Source§fn params_mut(&mut self) -> &mut P
fn params_mut(&mut self) -> &mut P
Source§fn backward<X, Y, Z, Dt>(&mut self, input: X, error: Y, gamma: A)where
A: Clone,
F: ActivatorGradient<Y, Rel = F, Delta = Dt>,
Self::Params<A>: Backward<X, Dt, Elem = A>,
fn backward<X, Y, Z, Dt>(&mut self, input: X, error: Y, gamma: A)where
A: Clone,
F: ActivatorGradient<Y, Rel = F, Delta = Dt>,
Self::Params<A>: Backward<X, Dt, Elem = A>,
Source§fn set_params(&mut self, params: Self::Params<A>)
fn set_params(&mut self, params: Self::Params<A>)
Source§fn replace_params(&mut self, params: Self::Params<A>) -> Self::Params<A>
fn replace_params(&mut self, params: Self::Params<A>) -> Self::Params<A>
replace the params of the layer, returning the previous valueSource§fn swap_params(&mut self, other: &mut Self::Params<A>)
fn swap_params(&mut self, other: &mut Self::Params<A>)
swap the params of the layer with anotherimpl<F: Eq, P: Eq> Eq for LayerBase<F, P>
impl<F, P> StructuralPartialEq for LayerBase<F, P>
Auto Trait Implementations§
impl<F, P> Freeze for LayerBase<F, P>
impl<F, P> RefUnwindSafe for LayerBase<F, P>where
F: RefUnwindSafe,
P: RefUnwindSafe,
impl<F, P> Send for LayerBase<F, P>
impl<F, P> Sync for LayerBase<F, P>
impl<F, P> Unpin for LayerBase<F, P>
impl<F, P> UnwindSafe for LayerBase<F, P>where
F: UnwindSafe,
P: UnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<Q, K> Comparable<K> for Q
impl<Q, K> Comparable<K> for Q
Source§impl<Q, K> Equivalent<K> for Q
impl<Q, K> Equivalent<K> for Q
Source§fn equivalent(&self, key: &K) -> bool
fn equivalent(&self, key: &K) -> bool
key and return true if they are equal.