1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
extern crate ndarray;
extern crate ndarray_rand;
use ndarray::prelude::*;
use ndarray_rand::rand_distr::Uniform;
use ndarray_rand::RandomExt;
use std::sync::RwLock;
pub trait Layer1d {
/// Feeds forward the 1d array through the layer.
///
/// # Arguments
///
/// * `input_array`: Has to be the same size as the input size of the layer else will panic
///
/// returns: `Array1<f64>`
///
/// # Examples
///
/// ```
/// use ducky_learn::layers::*;
/// use ndarray::{arr1, arr2};
///
/// let layer = Dense1d::from(
/// |x| x, // Activation function that is does nothing
/// |x| x.map(|i| 1f64), // Derivative of Activation function
/// arr2(&[[1., 1.], [1., 1.]]), // 2x2 array
/// arr1(&[1., 1.]) // len 2
/// );
///
/// let output = layer.pass(arr1(&[1., 1.]));
///
/// ```
fn pass(&self, input_array: Array1<f64>) -> (Array1<f64>, Array1<f64>); // TODO: update doc
}
pub struct Dense1d {
activation: fn(Array1<f64>) -> Array1<f64>,
deriv_activation: fn(Array1<f64>) -> Array1<f64>,
weights: RwLock<Array2<f64>>,
bias: RwLock<Array1<f64>>,
}
impl Dense1d {
/// Create Dense1d layer with full control over every part of the layer
///
/// # Arguments
///
/// * `activation`: Activation function of whole 1d array
/// * `weights`: 2d array that has to be of shape( output, input )
/// * `bias`: 1d array of basis that has to be the size of the output
///
/// returns: `Dense1d`
///
/// # Examples
///
/// ```
/// use ducky_learn::layers::*;
/// use ndarray::{arr1, arr2};
///
/// let layer = Dense1d::from(
/// |x| x, // Activation function that is does nothing
/// |x| x.map(|i| 1f64), // Derivative of Activation function
/// arr2(&[[1., 1.], [1., 1.]]), // 2x2 array
/// arr1(&[1., 1.]) // len 2
/// );
/// ```
pub fn from(
activation: fn(Array1<f64>) -> Array1<f64>,
deriv_activation: fn(Array1<f64>) -> Array1<f64>,
weights: Array2<f64>,
bias: Array1<f64>,
) -> Self {
Self {
activation,
deriv_activation,
weights: RwLock::new(weights),
bias: RwLock::new(bias),
}
}
/// Create randomly set weights and bias's for the dense1d layer.
/// Creates weights and bias's using a normal distribution from -1. -> 1.
///
/// # Arguments
///
/// * `input_size`: size of input array
/// * `layer_size`: number of nodes in the layer
/// * `activation_fn`: activation function for the layer
///
/// returns: `Dense1d`
///
/// # Examples
///
/// ```
/// use ducky_learn::layers::*;
/// use ndarray::{arr1, arr2};
///
/// let layer = Dense1d::new(5, 10, |x| x, |x| x);
/// let input_array = arr1(&[
/// 1., 1., 1., 1., 1.
/// ]);
///
/// layer.pass(input_array);
/// ```
pub fn new(
input_size: usize,
layer_size: usize,
activation_fn: fn(Array1<f64>) -> Array1<f64>,
deriv_activation_fn: fn(Array1<f64>) -> Array1<f64>,
) -> Self {
Self {
activation: activation_fn,
deriv_activation: deriv_activation_fn,
weights: RwLock::new(Array2::random(
(layer_size, input_size),
Uniform::new(-1., 1.),
)),
bias: RwLock::new(Array1::random(layer_size, Uniform::new(-1., 1.))),
}
}
}
impl Layer1d for Dense1d {
fn pass(&self, input_array: Array1<f64>) -> (Array1<f64>, Array1<f64>) {
let weights = self.weights.read().unwrap();
let bias = self.bias.read().unwrap();
assert_eq!(
weights.shape()[1],
input_array.shape()[0],
"Layer input size is {}, \
Layer was given size of {}",
weights.shape()[1],
input_array.shape()[0]
);
let z = weights.dot(&input_array) + &*bias;
let a = (self.activation)(z.clone());
(z, a)
}
}