1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
use crate::prelude::*;
pub fn mse_loss<T: Tensor<Dtype = f32>>(pred: T, targ: &T::NoTape) -> Tensor0D<T::TapeHolder> {
mean(square(sub(targ, pred)))
}
pub fn rmse_loss<T: Tensor<Dtype = f32>>(pred: T, targ: &T::NoTape) -> Tensor0D<T::TapeHolder> {
sqrt(mse_loss(pred, targ))
}
pub fn mae_loss<T: Tensor<Dtype = f32>>(pred: T, targ: &T::NoTape) -> Tensor0D<T::TapeHolder> {
mean(abs(sub(targ, pred)))
}
pub fn cross_entropy_with_logits_loss<T: Tensor<Dtype = f32>>(
logits: T,
targ: &T::NoTape,
) -> Tensor0D<T::TapeHolder> {
-mean(mul(targ, log_softmax(logits)))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mse() {
let x = Tensor1D::new([0.87248087, -0.24252531, -1.00609493, 1.15508401, 1.55450475]);
let y = Tensor1D::new([
-0.90954804,
-1.01931846,
-0.39221755,
2.25248861,
1.30355537,
]);
let loss = mse_loss(x.trace(), &y);
assert_eq!(loss.data(), &1.0846305);
let g = loss.backward();
assert_eq!(
g.ref_gradient(&x),
&[0.71281159, 0.31071725, -0.24555098, -0.43896183, 0.10037976]
);
}
#[test]
fn test_mae() {
let x = Tensor1D::new([0.87248087, -0.24252531, -1.00609493, 1.15508401, 1.55450475]);
let y = Tensor1D::new([
-0.90954804,
-1.01931846,
-0.39221755,
2.25248861,
1.30355537,
]);
let loss = mae_loss(x.trace(), &y);
assert_eq!(loss.data(), &0.90421069);
let g = loss.backward();
assert_eq!(g.ref_gradient(&x), &[0.2, 0.2, -0.2, -0.2, 0.2]);
}
#[test]
fn test_crossentropy() {
let x = Tensor1D::new([0.87248087, -0.24252531, -1.00609493, 1.15508401, 1.55450475]);
let losses = [1.56552291, 2.68052912, 3.444099, 1.2829198, 0.883499];
for i in 0..5 {
let mut targ = [0.0; 5];
targ[i] = 1.0;
let y = Tensor1D::new(targ);
let loss = cross_entropy_with_logits_loss(x.trace(), &y);
assert_eq!(*loss.data() * 5.0, losses[i]);
}
}
}