use crate as burn;
use crate::module::{Content, DisplaySettings, ModuleDisplay};
use crate::tensor::backend::Backend;
use crate::tensor::Tensor;
use crate::{config::Config, module::Module};
use super::Reduction;
#[derive(Config, Debug)]
pub struct HuberLossConfig {
pub delta: f32,
}
impl HuberLossConfig {
pub fn init(&self) -> HuberLoss {
self.assertions();
HuberLoss {
delta: self.delta,
lin_bias: self.delta * self.delta * 0.5,
}
}
fn assertions(&self) {
assert!(
self.delta >= 0., "Delta for Huber loss must be a non-negative number."
);
}
}
#[derive(Module, Debug, Clone)]
#[module(custom_display)]
pub struct HuberLoss {
pub delta: f32,
pub lin_bias: f32, }
impl ModuleDisplay for HuberLoss {
fn custom_settings(&self) -> Option<DisplaySettings> {
DisplaySettings::new()
.with_new_line_after_attribute(false)
.optional()
}
fn custom_content(&self, content: Content) -> Option<Content> {
content
.add("delta", &self.delta)
.add("lin_bias", &self.lin_bias)
.optional()
}
}
impl HuberLoss {
pub fn forward<const D: usize, B: Backend>(
&self,
predictions: Tensor<B, D>,
targets: Tensor<B, D>,
reduction: Reduction,
) -> Tensor<B, 1> {
let loss = self.forward_no_reduction(predictions, targets);
match reduction {
Reduction::Mean | Reduction::Auto => loss.mean(),
Reduction::Sum => loss.sum(),
}
}
pub fn forward_no_reduction<const D: usize, B: Backend>(
&self,
predictions: Tensor<B, D>,
targets: Tensor<B, D>,
) -> Tensor<B, D> {
let residuals = targets - predictions;
self.forward_residuals(residuals)
}
pub fn forward_residuals<const D: usize, B: Backend>(
&self,
residuals: Tensor<B, D>,
) -> Tensor<B, D> {
let is_large = residuals.clone().abs().greater_elem(self.delta);
let softsign = residuals.clone().clamp(-self.delta, self.delta);
let outside = softsign.mul(residuals.clone()).sub_scalar(self.lin_bias);
let inside = residuals.powf_scalar(2.).mul_scalar(0.5);
inside.mask_where(is_large, outside)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::TensorData;
use crate::TestBackend;
type TestTensor<const D: usize> = Tensor<TestBackend, D>;
#[test]
fn test_huber_loss() {
let predict = TensorData::from([-2., -0.5, 0., 0.3, 1.]);
let targets = TensorData::from([0., 0., 0., 0., 0.]);
let device = Default::default();
let predict = TestTensor::<1>::from_data(predict, &device);
let targets = TestTensor::<1>::from_data(targets, &device);
let huber = HuberLossConfig::new(0.5).init();
let loss_sum = huber.forward(predict.clone(), targets.clone(), Reduction::Sum);
let loss = huber.forward(predict.clone(), targets.clone(), Reduction::Auto);
let loss_no_reduction = huber.forward_no_reduction(predict, targets);
let expected = TensorData::from([0.875, 0.125, 0., 0.045, 0.375]);
loss_no_reduction.into_data().assert_approx_eq(&expected, 7);
let expected = TensorData::from([0.284]);
loss.into_data().assert_approx_eq(&expected, 7);
let expected = TensorData::from([1.42]);
loss_sum.into_data().assert_approx_eq(&expected, 5);
}
#[cfg(feature = "std")]
#[test]
fn test_huber_ad_loss() {
type TestAutodiffTensor = Tensor<crate::TestAutodiffBackend, 1>;
let predict = TensorData::from([-2., -0.5, 0., 0.3, 1.]);
let targets = TensorData::from([0., 0., 0., 0., 0.]);
let device = Default::default();
let predict = TestAutodiffTensor::from_data(predict, &device).require_grad();
let targets = TestAutodiffTensor::from_data(targets, &device);
let loss = HuberLossConfig::new(0.5).init();
let loss = loss.forward_no_reduction(predict.clone(), targets);
let grads = loss.backward();
let grads_predict = predict.grad(&grads).unwrap();
let expected = TensorData::from([-0.5, -0.5, 0., 0.3, 0.5]);
grads_predict.to_data().assert_approx_eq(&expected, 3);
}
#[test]
fn display() {
let config = HuberLossConfig::new(0.5);
let loss = config.init();
assert_eq!(
alloc::format!("{}", loss),
"HuberLoss {delta: 0.5, lin_bias: 0.125}"
);
}
}