1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
pub mod neat;
pub mod edge;
pub mod neatenv;
pub mod neuron;
pub mod layers;
pub mod tracer;
pub mod optimizer;
pub mod activation {
use std::f32::consts::E as Eul;
#[derive(Deserialize, Serialize, Debug, PartialEq, Clone, Copy)]
pub enum Activation {
Sigmoid,
Tahn,
Relu,
Softmax,
LeakyRelu(f32),
ExpRelu(f32),
Linear(f32)
}
impl Activation {
#[inline]
pub fn activate(&self, x: f32) -> f32 {
match self {
Self::Sigmoid => {
1.0 / (1.0 + (-x * 4.9).exp())
},
Self::Tahn => {
x.tanh()
},
Self::Relu => {
if x > 0.0 {
x
} else {
0.0
}
},
Self::Linear(alpha) => {
alpha * x
},
Self::LeakyRelu(alpha) => {
let a = alpha * x;
if a > x {
return a;
}
x
},
Self::ExpRelu(alpha) => {
if x >= 0.0 {
return x;
}
alpha * (Eul.powf(x) - 1.0)
},
_ => panic!("Cannot activate single neuron")
}
}
#[inline]
pub fn deactivate(&self, x: f32) -> f32 {
match self {
Self::Sigmoid => {
self.activate(x) * (1.0 - self.activate(x))
},
Self::Tahn => {
1.0 - (self.activate(x)).powf(2.0)
},
Self::Linear(alpha) => {
*alpha
},
Self::Relu => {
if self.activate(x) > 0.0 {
return 1.0;
}
0.0
},
Self::ExpRelu(alpha) => {
if self.activate(x) > 0.0 {
return 1.0;
}
alpha * x.exp()
},
Self::LeakyRelu(alpha) => {
if self.activate(x) > 0.0 {
return 1.0;
}
*alpha
},
_ => panic!("Cannot deactivate single neuron")
}
}
}
}
pub mod neurontype {
#[derive(Deserialize, Serialize, Debug, PartialEq, Clone, Copy)]
pub enum NeuronType {
Input,
Output,
Hidden
}
}