activation_functions/
lib.rs

1//! # activation functions
2//! 
3//! `activation_functions` is a collection of functions, which can be used as
4//! activation function for machine learning
5
6/// containing the activation functions which need an f32 parameter and return a f32
7pub mod f32 {
8    /// calculate the `sigmoid` to the given f32 number
9    /// 
10    /// # Examples
11    /// 
12    /// ```
13    /// let x:f32 = 0.5;
14    /// let answer:f32 = sigmoid(x);
15    /// 
16    /// println!("sigmoid({}) => {}",x,answer);
17    /// ```
18    pub fn sigmoid(x:f32) -> f32 {
19        1 as f32 / (1 as f32 + std::f32::consts::E.powf(-x))
20    }
21    /// calculate the `binary step` to the given f32 number
22    /// 
23    /// # Examples
24    /// 
25    /// ```
26    /// let x:f32 = 0.5;
27    /// let answer:f32 = bstep(x);
28    /// 
29    /// println!("bstep({}) => {}",x,answer);
30    /// ```
31    pub fn bstep(x:f32) -> f32 {
32        if x<0 as f32 {
33            0 as f32
34        } else {
35            1 as f32
36        }
37    }
38    /// calculate the `tanh` to the given f32 number
39    /// 
40    /// # Examples
41    /// 
42    /// ```
43    /// let x:f32 = 0.5;
44    /// let answer:f32 = tanh(x);
45    /// 
46    /// println!("tanh({}) => {}",x,answer);
47    /// ```
48    pub fn tanh(x:f32) -> f32 {
49        (std::f32::consts::E.powf(x) - std::f32::consts::E.powf(-x)) / (std::f32::consts::E.powf(x) + std::f32::consts::E.powf(-x))
50    }
51    /// calculate the `rectified linear unit` to the given f32 number
52    /// 
53    /// # Examples
54    /// 
55    /// ```
56    /// let x:f32 = 0.5;
57    /// let answer:f32 = relu(x);
58    /// 
59    /// println!("relu({}) => {}",x,answer);
60    /// ```
61    pub fn relu(x:f32) -> f32 {
62        if x<=0 as f32 {
63            0 as f32
64        } else {
65            1 as f32
66        }
67    }
68    /// calculate the `sigmoid linear unit` to the given f32 number
69    /// 
70    /// # Examples
71    /// 
72    /// ```
73    /// let x:f32 = 0.5;
74    /// let answer:f32 = silu(x);
75    /// 
76    /// println!("silu({}) => {}",x,answer);
77    /// ```
78    pub fn silu(x:f32) -> f32 {
79        x / (1 as f32 + std::f32::consts::E.powf(-x))
80    }
81    /// calculate the `gaussian` to the given f32 number
82    /// 
83    /// # Examples
84    /// 
85    /// ```
86    /// let x:f32 = 0.5;
87    /// let answer:f32 = gaussian(x);
88    /// 
89    /// println!("gaussian({}) => {}",x,answer);
90    /// ```
91    pub fn gaussian(x:f32) -> f32 {
92        std::f32::consts::E.powf(-(x*x))
93    }
94}
95
96/// containing the activation functions which need an f64 parameter and return a f64
97pub mod f64 {
98    /// calculate the `sigmoid` to the given f64 number
99    /// 
100    /// # Examples
101    /// 
102    /// ```
103    /// let x:f64 = 0.5;
104    /// let answer:f64 = sigmoid(x);
105    /// 
106    /// println!("sigmoid({}) => {}",x,answer);
107    /// ```
108    pub fn sigmoid(x:f64) -> f64 {
109        1 as f64 / (1 as f64 + std::f64::consts::E.powf(-x))
110    }
111    /// calculate the `binary step` to the given f64 number
112    /// 
113    /// # Examples
114    /// 
115    /// ```
116    /// let x:f64 = 0.5;
117    /// let answer:f64 = bstep(x);
118    /// 
119    /// println!("bstep({}) => {}",x,answer);
120    /// ```
121    pub fn bstep(x:f64) -> f64 {
122        if x<0 as f64 {            0 as f64        } else {            1 as f64        }
123    }
124    /// calculate the `tanh` to the given f64 number
125    /// 
126    /// # Examples
127    /// 
128    /// ```
129    /// let x:f64 = 0.5;
130    /// let answer:f64 = tanh(x);
131    /// 
132    /// println!("tanh({}) => {}",x,answer);
133    /// ```
134    pub fn tanh(x:f64) -> f64 {
135        (std::f64::consts::E.powf(x) - std::f64::consts::E.powf(-x)) / (std::f64::consts::E.powf(x) + std::f64::consts::E.powf(-x))
136    }
137    /// calculate the `rectified linear unit` to the given f64 number
138    /// 
139    /// # Examples
140    /// 
141    /// ```
142    /// let x:f64 = 0.5;
143    /// let answer:f64 = relu(x);
144    /// 
145    /// println!("relu({}) => {}",x,answer);
146    /// ```
147    pub fn relu(x:f64) -> f64 {
148        if x<=0 as f64 {
149            0 as f64
150        } else {
151            1 as f64
152        }
153    }
154    /// calculate the `sigmoid linear unit` to the given f64 number
155    /// 
156    /// # Examples
157    /// 
158    /// ```
159    /// let x:f64 = 0.5;
160    /// let answer:f64 = silu(x);
161    /// 
162    /// println!("silu({}) => {}",x,answer);
163    /// ```
164    pub fn silu(x:f64) -> f64 {
165        x / (1 as f64 + std::f64::consts::E.powf(-x))
166    }
167    /// calculate the `gaussian` to the given f64 number
168    /// 
169    /// # Examples
170    /// 
171    /// ```
172    /// let x:f64 = 0.5;
173    /// let answer:f64 = gaussian(x);
174    /// 
175    /// println!("gaussian({}) => {}",x,answer);
176    /// ```
177    pub fn gaussian(x:f64) -> f64 {
178        std::f64::consts::E.powf(-(x*x))
179    }
180}