ducky_learn/
layers.rs

1extern crate ndarray;
2extern crate ndarray_rand;
3
4use ndarray::prelude::*;
5use ndarray_rand::rand_distr::Uniform;
6use ndarray_rand::RandomExt;
7use std::sync::RwLock;
8
9pub trait Layer1d {
10    /// Feeds forward the 1d array through the layer.
11    ///
12    /// # Arguments
13    ///
14    /// * `input_array`: Has to be the same size as the input size of the layer else will panic
15    ///
16    /// returns: `Array1<f64>`
17    ///
18    /// # Examples
19    ///
20    /// ```
21    /// use ducky_learn::layers::*;
22    /// use ndarray::{arr1, arr2};
23    ///
24    /// let layer = Dense1d::from(
25    ///                 |x| x, // Activation function that is does nothing
26    ///                 |x| x.map(|i| 1f64), // Derivative of Activation function
27    ///                 arr2(&[[1., 1.], [1., 1.]]), // 2x2 array
28    ///                 arr1(&[1., 1.]) // len 2
29    ///             );
30    ///
31    /// let output = layer.pass(arr1(&[1., 1.]));
32    ///
33    /// ```
34    fn pass(&self, input_array: Array1<f64>) -> (Array1<f64>, Array1<f64>); // TODO: update doc
35}
36
37pub struct Dense1d {
38    activation: fn(Array1<f64>) -> Array1<f64>,
39    deriv_activation: fn(Array1<f64>) -> Array1<f64>,
40    weights: RwLock<Array2<f64>>,
41    bias: RwLock<Array1<f64>>,
42}
43
44impl Dense1d {
45    /// Create Dense1d layer with full control over every part of the layer
46    ///
47    /// # Arguments
48    ///
49    /// * `activation`: Activation function of whole 1d array
50    /// * `weights`: 2d array that has to be of shape( output, input )
51    /// * `bias`: 1d array of basis that has to be the size of the output
52    ///
53    /// returns: `Dense1d`
54    ///
55    /// # Examples
56    ///
57    /// ```
58    /// use ducky_learn::layers::*;
59    /// use ndarray::{arr1, arr2};
60    ///
61    /// let layer = Dense1d::from(
62    ///                 |x| x, // Activation function that is does nothing
63    ///                 |x| x.map(|i| 1f64), // Derivative of Activation function
64    ///                 arr2(&[[1., 1.], [1., 1.]]), // 2x2 array
65    ///                 arr1(&[1., 1.]) // len 2
66    ///             );
67    /// ```
68    pub fn from(
69        activation: fn(Array1<f64>) -> Array1<f64>,
70        deriv_activation: fn(Array1<f64>) -> Array1<f64>,
71        weights: Array2<f64>,
72        bias: Array1<f64>,
73    ) -> Self {
74        Self {
75            activation,
76            deriv_activation,
77            weights: RwLock::new(weights),
78            bias: RwLock::new(bias),
79        }
80    }
81
82    /// Create randomly set weights and bias's for the dense1d layer.
83    /// Creates weights and bias's using a normal distribution from -1. -> 1.
84    ///
85    /// # Arguments
86    ///
87    /// * `input_size`: size of input array
88    /// * `layer_size`: number of nodes in the layer
89    /// * `activation_fn`: activation function for the layer
90    ///
91    /// returns: `Dense1d`
92    ///
93    /// # Examples
94    ///
95    /// ```
96    /// use ducky_learn::layers::*;
97    /// use ndarray::{arr1, arr2};
98    ///
99    /// let layer = Dense1d::new(5, 10, |x| x, |x| x);
100    /// let input_array = arr1(&[
101    ///     1., 1., 1., 1., 1.
102    /// ]);
103    ///
104    /// layer.pass(input_array);
105    /// ```
106    pub fn new(
107        input_size: usize,
108        layer_size: usize,
109        activation_fn: fn(Array1<f64>) -> Array1<f64>,
110        deriv_activation_fn: fn(Array1<f64>) -> Array1<f64>,
111    ) -> Self {
112        Self {
113            activation: activation_fn,
114            deriv_activation: deriv_activation_fn,
115            weights: RwLock::new(Array2::random(
116                (layer_size, input_size),
117                Uniform::new(-1., 1.),
118            )),
119            bias: RwLock::new(Array1::random(layer_size, Uniform::new(-1., 1.))),
120        }
121    }
122}
123
124impl Layer1d for Dense1d {
125    fn pass(&self, input_array: Array1<f64>) -> (Array1<f64>, Array1<f64>) {
126        let weights = self.weights.read().unwrap();
127        let bias = self.bias.read().unwrap();
128
129        assert_eq!(
130            weights.shape()[1],
131            input_array.shape()[0],
132            "Layer input size is {}, \
133            Layer was given size of {}",
134            weights.shape()[1],
135            input_array.shape()[0]
136        );
137
138        let z = weights.dot(&input_array) + &*bias;
139        let a = (self.activation)(z.clone());
140        (z, a)
141    }
142}
143
144
145#[cfg(test)]
146mod layers_tests {
147    use super::*;
148    use ndarray::*;
149    use crate::activations::*;
150
151    #[test]
152    fn dense1d_pass_arr1_1() {
153        let layer = Dense1d::from(
154            |x| x,
155            |x| x,
156            arr2(&[[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]),
157            arr1(&[1., 1., 1.]),
158        );
159        let input_array = arr1(&[1., 1., 1.]);
160
161        assert_eq!(layer.pass(input_array).1, arr1(&[4., 4., 4.]))
162    }
163
164    #[test]
165    fn dense1d_pass_arr1_2() {
166        let layer = Dense1d::from(
167            |x| x,
168            |x| x,
169            arr2(&[
170                [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
171                [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
172                [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
173            ]),
174            arr1(&[1., 1., 1.]),
175        );
176        let input_array = arr1(&[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]);
177
178        assert_eq!(layer.pass(input_array).1, arr1(&[13.0, 13.0, 13.0]))
179    }
180
181    #[test]
182    #[should_panic]
183    fn dense1d_pass_arr1_diff_size() {
184        let layer = Dense1d::from(
185            |x| x,
186            |x| x,
187            arr2(&[[1., 1., 1., 1.], [1., 1., 1., 1.]]),
188            arr1(&[0., 0.]),
189        );
190        let input_array = arr1(&[1.]);
191
192        layer.pass(input_array);
193    }
194
195    #[test]
196    fn dense1d_new() {
197        let layer = Dense1d::new(5, 10, |x| x, |x| x);
198
199        let input_array = arr1(&[1., 1., 1., 1., 1.]);
200
201        layer.pass(input_array);
202    }
203
204    #[test]
205    fn dense1d_activation() {
206        let layer = Dense1d::from(
207            relu_1d,
208            deriv_relu_1d,
209            arr2(&[[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]),
210            arr1(&[-10., -10., 1.]),
211        );
212        let input_array = arr1(&[1., 1., 1.]);
213
214        assert_eq!(layer.pass(input_array).1, arr1(&[0., 0., 4.]))
215    }
216}