use crate::matrix::vector::Vector;
use crate::traits::FloatScalar;
use crate::Matrix;
use super::{LeastSquaresResult, OptimError};
#[derive(Debug, Clone, Copy)]
pub struct GaussNewtonSettings<T> {
pub grad_tol: T,
pub x_tol: T,
pub f_tol: T,
pub max_iter: usize,
}
impl Default for GaussNewtonSettings<f64> {
fn default() -> Self {
Self {
grad_tol: 1e-8,
x_tol: 1e-12,
f_tol: 1e-12,
max_iter: 100,
}
}
}
impl Default for GaussNewtonSettings<f32> {
fn default() -> Self {
Self {
grad_tol: 1e-4,
x_tol: 1e-6,
f_tol: 1e-6,
max_iter: 100,
}
}
}
pub fn least_squares_gn<T: FloatScalar, const M: usize, const N: usize>(
mut residual: impl FnMut(&Vector<T, N>) -> Vector<T, M>,
mut jacobian: impl FnMut(&Vector<T, N>) -> Matrix<T, M, N>,
x0: &Vector<T, N>,
settings: &GaussNewtonSettings<T>,
) -> Result<LeastSquaresResult<T, N>, OptimError> {
let mut x = *x0;
let mut r = residual(&x);
let mut r_evals = 1usize;
let mut j_evals = 0usize;
let half = T::one() / (T::one() + T::one());
let mut cost = r.dot(&r) * half;
for iter in 0..settings.max_iter {
let j = jacobian(&x);
j_evals += 1;
let g = j.transpose() * r;
let g_norm = g.norm();
if g_norm < settings.grad_tol {
return Ok(LeastSquaresResult {
x,
cost,
grad_norm: g_norm,
iterations: iter,
r_evals,
j_evals,
});
}
let neg_r = -r;
let qr = j.qr().map_err(|_| OptimError::Singular)?;
let delta = qr.solve(&neg_r);
let x_new = x + delta;
r = residual(&x_new);
r_evals += 1;
let cost_new = r.dot(&r) * half;
if delta.norm() < settings.x_tol * (T::one() + x.norm()) {
return Ok(LeastSquaresResult {
x: x_new,
cost: cost_new,
grad_norm: g_norm,
iterations: iter + 1,
r_evals,
j_evals,
});
}
if (cost - cost_new).abs() < settings.f_tol * (T::one() + cost.abs()) {
return Ok(LeastSquaresResult {
x: x_new,
cost: cost_new,
grad_norm: g_norm,
iterations: iter + 1,
r_evals,
j_evals,
});
}
x = x_new;
cost = cost_new;
}
Err(OptimError::MaxIterations)
}