ducky_learn/
activations.rs

1extern crate ndarray;
2
3use ndarray::prelude::*;
4
5/// Relu activation function for 1d array
6///
7/// More info: https://machinelearningmastery.com/rectified-linear-activation-function-for-deep-learning-neural-networks/
8///
9/// # Arguments
10///
11/// * `input_array`: 1d array
12///
13/// returns: `Array1<f64>`
14///
15/// # Examples
16///
17/// ```
18/// use ducky_learn::activations::*;
19/// use ndarray::arr1;
20///
21/// let input_array = arr1(&[-1.3456435325242, -32145324321., -132432888.]);
22/// assert_eq!(relu_1d(input_array), arr1(&[0., 0., 0.]));
23/// ```
24pub fn relu_1d(input_array: Array1<f64>) -> Array1<f64> {
25    input_array.map(|value| value.max(0.))
26}
27
28/// Derivative of Relu function
29///
30/// # Arguments
31///
32/// * `input_array`: 1d array
33///
34/// returns: `Array1<f64>`
35///
36/// # Examples
37///
38/// ```
39/// use ducky_learn::activations::*;
40/// use ndarray::arr1;
41///
42/// let input_array = arr1(&[1.3456435325242, -32145324321., 132432888.]);
43/// assert_eq!(deriv_relu_1d(input_array), arr1(&[1., 0., 1.]));
44/// ```
45pub fn deriv_relu_1d(input_array: Array1<f64>) -> Array1<f64> {
46    input_array.map(|value| (*value > 0f64) as i32 as f64)
47}
48
49/// Softmax activation function for 1d array. Note that you can run into NaN issue if values are
50/// < -1000 or > 1000 (https://users.rust-lang.org/t/watch-out-for-nans/70016)
51///
52/// More info: https://deepai.org/machine-learning-glossary-and-terms/softmax-layer#:~:text=The%20softmax%20function%20is%20a,can%20be%20interpreted%20as%20probabilities.
53///
54/// # Arguments
55///
56/// * `input_array`: 1d array
57///
58/// returns: `Array1<f64>`
59///
60/// # Examples
61///
62/// ```
63/// use ducky_learn::activations::*;
64/// use ndarray::arr1;
65///
66/// let input_array = arr1(&[0., 1., -1., 0.01, -0.1]);
67/// assert_eq!(softmax_1d(input_array),
68///            arr1(&[0.16663753690463112, 0.4529677885070323, 0.0613025239546613, 0.16831227199301688, 0.15077987864065834]));
69/// ```
70pub fn softmax_1d(input_array: Array1<f64>) -> Array1<f64> {
71    let sum_exp_input_array = input_array.map(|value| value.exp()).sum();
72
73    input_array.map(|value| value.exp() / sum_exp_input_array)
74}
75
76#[cfg(test)]
77mod activations_tests {
78    use super::*;
79    use ndarray::arr1;
80
81    #[test]
82    fn relu_1d_1() {
83        let input_array = arr1(&[0., 1., -1., 0.01, -0.1]);
84
85        assert_eq!(relu_1d(input_array), arr1(&[0., 1., 0., 0.01, 0.]));
86    }
87
88    #[test]
89    fn relu_1d_2() {
90        let input_array = arr1(&[]);
91
92        assert_eq!(relu_1d(input_array), arr1(&[]));
93    }
94
95    #[test]
96    fn relu_1d_3() {
97        let input_array = arr1(&[-1.3456435325242, -32145324321., -132432888.]);
98
99        assert_eq!(relu_1d(input_array), arr1(&[0., 0., 0.]));
100    }
101
102    #[test]
103    fn deriv_relu_1d_1() {
104        let input_array = arr1(&[1.3456435325242, -32145324321., 132432888.]);
105        assert_eq!(deriv_relu_1d(input_array), arr1(&[1., 0., 1.]));
106    }
107
108    #[test]
109    fn deriv_relu_1d_2() {
110        let input_array = arr1(&[-1.3456435325242, -32145324321., 132432888.]);
111        assert_eq!(deriv_relu_1d(input_array), arr1(&[0., 0., 1.]));
112    }
113
114    #[test]
115    fn deriv_relu_1d_3() {
116        let input_array = arr1(&[]);
117        assert_eq!(deriv_relu_1d(input_array), arr1(&[]));
118    }
119
120    #[test]
121    fn softmax_1d_1() {
122        let input_array = arr1(&[0., 1., -1., 0.01, -0.1]);
123
124        assert_eq!(
125            softmax_1d(input_array),
126            arr1(&[
127                0.16663753690463112,
128                0.4529677885070323,
129                0.0613025239546613,
130                0.16831227199301688,
131                0.15077987864065834
132            ])
133        );
134    }
135
136    #[test]
137    fn softmax_1d_2() {
138        let input_array = arr1(&[]);
139
140        assert_eq!(softmax_1d(input_array), arr1(&[]));
141    }
142
143    #[test]
144    fn softmax_1d_3() {
145        let input_array = arr1(&[-0.3456435325242, 232., -888.]);
146
147        assert_eq!(
148            softmax_1d(input_array),
149            arr1(&[1.2404210269803915e-101, 1.0, 0.0])
150        );
151    }
152}