use crate::core::loss_functions::LossFunction;
use nalgebra::{DMatrix, DVector};
#[derive(Debug, Clone)]
pub struct Corrector {
sqrt_rho1: f64,
residual_scaling: f64,
alpha_sq_norm: f64,
}
impl Corrector {
pub fn new(loss_function: &dyn LossFunction, sq_norm: f64) -> Self {
let rho = loss_function.evaluate(sq_norm);
let rho_1 = rho[1]; let rho_2 = rho[2];
let sqrt_rho1 = rho_1.sqrt();
if sq_norm == 0.0 || rho_2 <= 0.0 {
return Self {
sqrt_rho1,
residual_scaling: sqrt_rho1,
alpha_sq_norm: 0.0,
};
}
let d = (1.0 + 2.0 * sq_norm * rho_2 / rho_1).max(0.0);
let alpha = 1.0 - d.sqrt();
Self {
sqrt_rho1,
residual_scaling: sqrt_rho1 / (1.0 - alpha),
alpha_sq_norm: alpha / sq_norm,
}
}
pub fn correct_jacobian(&self, residual: &DVector<f64>, jacobian: &mut DMatrix<f64>) {
if self.alpha_sq_norm == 0.0 {
*jacobian *= self.sqrt_rho1;
return;
}
let r_rtj = residual * residual.transpose() * jacobian.clone();
*jacobian = (jacobian.clone() - r_rtj * self.alpha_sq_norm) * self.sqrt_rho1;
}
pub fn correct_residuals(&self, residual: &mut DVector<f64>) {
*residual *= self.residual_scaling;
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::core::loss_functions::{CauchyLoss, HuberLoss};
type TestResult = Result<(), Box<dyn std::error::Error>>;
#[test]
fn test_corrector_huber_inlier() -> TestResult {
let loss = HuberLoss::new(1.0)?;
let residual = DVector::from_vec(vec![0.1, 0.2, 0.1]); let squared_norm = residual.dot(&residual);
let corrector = Corrector::new(&loss, squared_norm);
assert!((corrector.sqrt_rho1 - 1.0).abs() < 1e-10);
assert!((corrector.alpha_sq_norm).abs() < 1e-10);
let mut corrected_residual = residual.clone();
corrector.correct_residuals(&mut corrected_residual);
assert!((corrected_residual - residual).norm() < 1e-10);
Ok(())
}
#[test]
fn test_corrector_huber_outlier() -> TestResult {
let loss = HuberLoss::new(1.0)?;
let residual = DVector::from_vec(vec![5.0, 5.0, 5.0]); let squared_norm = residual.dot(&residual);
let corrector = Corrector::new(&loss, squared_norm);
assert!(corrector.sqrt_rho1 < 1.0);
assert!(corrector.sqrt_rho1 > 0.0);
let mut corrected_residual = residual.clone();
corrector.correct_residuals(&mut corrected_residual);
assert!(corrected_residual.norm() < residual.norm());
Ok(())
}
#[test]
fn test_corrector_cauchy() -> TestResult {
let loss = CauchyLoss::new(1.0)?;
let residual = DVector::from_vec(vec![2.0, 3.0]);
let squared_norm = residual.dot(&residual);
let corrector = Corrector::new(&loss, squared_norm);
assert!(corrector.sqrt_rho1 < 1.0);
assert!(corrector.sqrt_rho1 > 0.0);
let mut corrected_residual = residual.clone();
corrector.correct_residuals(&mut corrected_residual);
assert!(corrected_residual.norm() < residual.norm());
Ok(())
}
#[test]
fn test_corrector_jacobian() -> TestResult {
let loss = HuberLoss::new(1.0)?;
let residual = DVector::from_vec(vec![2.0, 1.0]);
let squared_norm = residual.dot(&residual);
let corrector = Corrector::new(&loss, squared_norm);
let mut jacobian = DMatrix::from_row_slice(2, 3, &[1.0, 0.0, 1.0, 0.0, 1.0, 1.0]);
let original_jacobian = jacobian.clone();
corrector.correct_jacobian(&residual, &mut jacobian);
assert!(jacobian != original_jacobian);
Ok(())
}
struct EdgeCaseLoss;
impl LossFunction for EdgeCaseLoss {
fn evaluate(&self, _s: f64) -> [f64; 3] {
[0.5, -0.1, 0.5] }
}
#[test]
fn test_corrector_no_nan_on_negative_d() {
let loss = EdgeCaseLoss;
let sq_norm = 100.0;
let corrector = Corrector::new(&loss, sq_norm);
assert!(corrector.sqrt_rho1.is_nan()); }
#[test]
fn test_corrector_positive_rho1_large_rho2_ratio() {
struct HighCurvatureLoss;
impl LossFunction for HighCurvatureLoss {
fn evaluate(&self, _s: f64) -> [f64; 3] {
[0.5, 0.001, 0.001]
}
}
let loss = HighCurvatureLoss;
let corrector = Corrector::new(&loss, 10.0);
assert!(!corrector.sqrt_rho1.is_nan());
assert!(!corrector.residual_scaling.is_nan());
assert!(!corrector.alpha_sq_norm.is_nan());
}
}