1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
mod cpu_kernel;
#[cfg(feature = "cuda")]
mod cuda_kernel;
use super::{ops::try_binary_op, Device};
use crate::{shapes::*, tensor::*};
#[repr(C)]
#[derive(Debug, Default, Clone, Copy)]
pub struct HuberErrorKernelOp<E> {
pub delta: E,
}
pub fn huber_error<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D> + Merge<R>, R: Tape<E, D>>(
lhs: Tensor<S, E, D, T>,
rhs: Tensor<S, E, D, R>,
delta: E,
) -> Tensor<S, E, D, T> {
lhs.huber_error(rhs, delta)
}
impl<S: Shape, E: Dtype, D: Device<E>, T: Tape<E, D>> Tensor<S, E, D, T> {
pub fn huber_error<R: Tape<E, D>>(self, rhs: Tensor<S, E, D, R>, delta: E) -> Self
where
T: Merge<R>,
{
self.try_huber_error(rhs, delta).unwrap()
}
pub fn try_huber_error<R: Tape<E, D>>(
self,
rhs: Tensor<S, E, D, R>,
delta: E,
) -> Result<Self, D::Err>
where
T: Merge<R>,
{
try_binary_op(HuberErrorKernelOp { delta }, self, rhs)
}
}
#[cfg(test)]
mod tests {
use crate::{tensor::*, tests::*};
#[test]
fn test_huber_error() {
let dev: TestDevice = Default::default();
let a: Tensor<_, TestDtype, _> = dev.tensor([
[-0.8424031, 0.6309481, 1.0416432],
[1.325225, 0.5840275, 1.9167633],
]);
let b: Tensor<_, TestDtype, _> = dev.tensor([
[0.52022195, 0.578804, 0.17535722],
[0.75429636, 0.66566986, 0.6182751],
]);
let r1 = a.leaky_trace().huber_error(b.leaky_trace(), 1.0);
let r2 = a.leaky_trace().huber_error(b.leaky_trace(), 100.0);
assert_close(
&r1.array(),
&[
[0.8626251, 0.0013595072, 0.37522575],
[0.16297975, 0.003332735, 0.79848814],
],
);
assert_close(&r2.array(), &((a - b).square() / 2.0).array());
}
}