concision_neural/layers/layer/
impl_layer.rs1use crate::layers::LayerBase;
6
7use crate::layers::{Activator, ActivatorGradient, Layer};
8use cnc::{Forward, ParamsBase};
9use ndarray::{Data, Dimension, RawData};
10
11impl<F, S, D> core::ops::Deref for LayerBase<F, S, D>
12where
13 D: Dimension,
14 S: RawData,
15{
16 type Target = ParamsBase<S, D>;
17
18 fn deref(&self) -> &Self::Target {
19 &self.params
20 }
21}
22
23impl<F, S, D> core::ops::DerefMut for LayerBase<F, S, D>
24where
25 D: Dimension,
26 S: RawData,
27{
28 fn deref_mut(&mut self) -> &mut Self::Target {
29 &mut self.params
30 }
31}
32
33impl<A, X, Y, F, S, D> Forward<X> for LayerBase<F, S, D>
34where
35 A: Clone,
36 F: Activator<Y, Output = Y>,
37 D: Dimension,
38 S: Data<Elem = A>,
39 ParamsBase<S, D>: Forward<X, Output = Y>,
40{
41 type Output = Y;
42
43 fn forward(&self, inputs: &X) -> cnc::Result<Self::Output> {
44 let y = self.params().forward(inputs)?;
45
46 Ok(self.rho().activate(y))
47 }
48}
49
50impl<U, V, F, S, D> Activator<U> for LayerBase<F, S, D>
51where
52 F: Activator<U, Output = V>,
53 D: Dimension,
54 S: RawData,
55{
56 type Output = V;
57
58 fn activate(&self, x: U) -> Self::Output {
59 self.rho().activate(x)
60 }
61}
62
63impl<U, F, S, D> ActivatorGradient<U> for LayerBase<F, S, D>
64where
65 F: ActivatorGradient<U>,
66 D: Dimension,
67 S: RawData,
68{
69 type Input = F::Input;
70 type Delta = F::Delta;
71
72 fn activate_gradient(&self, inputs: F::Input) -> F::Delta {
73 self.rho().activate_gradient(inputs)
74 }
75}
76
77impl<A, F, S, D> Layer<S, D> for LayerBase<F, S, D>
78where
79 F: Activator<A, Output = A>,
80 D: Dimension,
81 S: RawData<Elem = A>,
82{
83 type Elem = A;
84 type Rho = F;
85
86 fn rho(&self) -> &Self::Rho {
87 &self.rho
88 }
89
90 fn params(&self) -> &ParamsBase<S, D> {
91 &self.params
92 }
93
94 fn params_mut(&mut self) -> &mut ParamsBase<S, D> {
95 &mut self.params
96 }
97}