use crate::error::OptimizeResult;
use crate::result::OptimizeResults;
use scirs2_core::ndarray::{Array1, Array2, ArrayBase, Data, Ix1};
use std::fmt;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Method {
TrustRegionReflective,
LevenbergMarquardt,
Dogbox,
}
impl fmt::Display for Method {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Method::TrustRegionReflective => write!(f, "trf"),
Method::LevenbergMarquardt => write!(f, "lm"),
Method::Dogbox => write!(f, "dogbox"),
}
}
}
#[derive(Debug, Clone)]
pub struct Options {
pub max_nfev: Option<usize>,
pub xtol: Option<f64>,
pub ftol: Option<f64>,
pub gtol: Option<f64>,
pub verbose: usize,
pub diff_step: Option<f64>,
pub use_finite_diff: bool,
}
impl Default for Options {
fn default() -> Self {
Options {
max_nfev: None,
xtol: Some(1e-8),
ftol: Some(1e-8),
gtol: Some(1e-8),
verbose: 0,
diff_step: None,
use_finite_diff: false,
}
}
}
#[allow(dead_code)]
pub fn least_squares<F, J, D, S1, S2>(
residuals: F,
x0: &ArrayBase<S1, Ix1>,
method: Method,
jacobian: Option<J>,
data: &ArrayBase<S2, Ix1>,
options: Option<Options>,
) -> OptimizeResult<OptimizeResults<f64>>
where
F: Fn(&[f64], &[D]) -> Array1<f64>,
J: Fn(&[f64], &[D]) -> Array2<f64>,
D: Clone,
S1: Data<Elem = f64>,
S2: Data<Elem = D>,
{
let options = options.unwrap_or_default();
match method {
Method::LevenbergMarquardt => least_squares_lm(residuals, x0, jacobian, data, &options),
Method::TrustRegionReflective => least_squares_trf(residuals, x0, jacobian, data, &options),
Method::Dogbox => least_squares_dogbox(residuals, x0, jacobian, data, &options),
}
}
#[allow(dead_code)]
fn least_squares_lm<F, J, D, S1, S2>(
residuals: F,
x0: &ArrayBase<S1, Ix1>,
jacobian: Option<J>,
data: &ArrayBase<S2, Ix1>,
options: &Options,
) -> OptimizeResult<OptimizeResults<f64>>
where
F: Fn(&[f64], &[D]) -> Array1<f64>,
J: Fn(&[f64], &[D]) -> Array2<f64>,
D: Clone,
S1: Data<Elem = f64>,
S2: Data<Elem = D>,
{
let ftol = options.ftol.unwrap_or(1e-8);
let xtol = options.xtol.unwrap_or(1e-8);
let gtol = options.gtol.unwrap_or(1e-8);
let max_nfev = options.max_nfev.unwrap_or(100 * x0.len());
let eps = options.diff_step.unwrap_or(1e-8);
let m = x0.len();
let mut x = x0.to_owned();
let mut res = residuals(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
let n = res.len();
let mut f = res.iter().map(|&r| r.powi(2)).sum::<f64>() / 2.0;
let mut nfev = 1;
let mut njev = 0;
let mut iter = 0;
let compute_jac = |x_params: &[f64], curr_res_vals: &Array1<f64>| -> (Array2<f64>, usize) {
let mut jac = Array2::zeros((n, m));
let mut count = 0;
for j in 0..m {
let mut x_h = Vec::from(x_params);
x_h[j] += eps;
let res_h = residuals(&x_h, data.as_slice().expect("Operation failed"));
count += 1;
for i in 0..n {
jac[[i, j]] = (res_h[i] - curr_res_vals[i]) / eps;
}
}
(jac, count)
};
let (mut jac, jac_evals) = match &jacobian {
Some(jac_fn) => {
let j = jac_fn(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
njev += 1;
(j, 0)
}
None => {
let (j, count) = compute_jac(x.as_slice().expect("Operation failed"), &res);
nfev += count;
(j, count)
}
};
let mut g = jac.t().dot(&res);
let mut lambda = 1e-3;
let lambda_factor = 10.0;
while iter < max_nfev {
if g.iter().all(|&gi| gi.abs() < gtol) {
break;
}
let mut jt_j = jac.t().dot(&jac);
for i in 0..m {
jt_j[[i, i]] += lambda * jt_j[[i, i]].max(1e-10);
}
let neg_g = -&g;
let step = match solve(&jt_j, &neg_g) {
Some(s) => s,
None => {
lambda *= lambda_factor;
continue;
}
};
let mut x_new = Array1::zeros(m);
for i in 0..m {
x_new[i] = x[i] + step[i];
}
let res_new = residuals(
x_new.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
nfev += 1;
let f_new = res_new.iter().map(|&r| r.powi(2)).sum::<f64>() / 2.0;
let actual_reduction = f - f_new;
let p1 = res.dot(&res);
let res_pred = res.clone() + jac.dot(&step);
let p2 = res_pred.dot(&res_pred);
let _predicted_reduction = 0.5 * (p1 - p2);
if actual_reduction > 0.0 {
lambda /= lambda_factor;
x = x_new;
res = res_new;
f = f_new;
if actual_reduction < ftol * f.abs() {
break;
}
if step
.iter()
.all(|&s| s.abs() < xtol * (1.0 + x.iter().map(|&xi| xi.abs()).sum::<f64>()))
{
break;
}
let (new_jac, jac_evals) = match &jacobian {
Some(jac_fn) => {
let j = jac_fn(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
njev += 1;
(j, 0)
}
None => {
let (j, count) = compute_jac(x.as_slice().expect("Operation failed"), &res);
nfev += count;
(j, count)
}
};
jac = new_jac;
g = jac.t().dot(&res);
} else {
lambda *= lambda_factor;
}
iter += 1;
}
let mut result = OptimizeResults::default();
result.x = x.clone();
result.fun = f;
result.jac = if let Some(jac_fn) = &jacobian {
let jac_array = jac_fn(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
njev += 1;
let (vec, _) = jac_array.into_raw_vec_and_offset();
Some(vec)
} else {
let (vec, _) = jac.into_raw_vec_and_offset();
Some(vec)
};
result.nfev = nfev;
result.njev = njev;
result.nit = iter;
result.success = iter < max_nfev;
if result.success {
result.message = "Optimization terminated successfully.".to_string();
} else {
result.message = "Maximum number of evaluations reached.".to_string();
}
Ok(result)
}
#[allow(dead_code)]
fn solve(a: &Array2<f64>, b: &Array1<f64>) -> Option<Array1<f64>> {
use scirs2_linalg::solve;
solve(&a.view(), &b.view(), None).ok()
}
#[allow(dead_code)]
fn least_squares_trf<F, J, D, S1, S2>(
residuals: F,
x0: &ArrayBase<S1, Ix1>,
jacobian: Option<J>,
data: &ArrayBase<S2, Ix1>,
options: &Options,
) -> OptimizeResult<OptimizeResults<f64>>
where
F: Fn(&[f64], &[D]) -> Array1<f64>,
J: Fn(&[f64], &[D]) -> Array2<f64>,
D: Clone,
S1: Data<Elem = f64>,
S2: Data<Elem = D>,
{
let ftol = options.ftol.unwrap_or(1e-8);
let xtol = options.xtol.unwrap_or(1e-8);
let gtol = options.gtol.unwrap_or(1e-8);
let max_nfev = options.max_nfev.unwrap_or(100 * x0.len());
let eps = options.diff_step.unwrap_or(1e-8);
let m = x0.len();
let mut x = x0.to_owned();
let mut res = residuals(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
let n = res.len();
let mut f = res.iter().map(|&r| r.powi(2)).sum::<f64>() / 2.0;
let mut nfev = 1;
let mut njev = 0;
let mut iter = 0;
let compute_jac = |x_params: &[f64], curr_res_vals: &Array1<f64>| -> (Array2<f64>, usize) {
let mut jac = Array2::zeros((n, m));
let mut count = 0;
for j in 0..m {
let mut x_h = Vec::from(x_params);
x_h[j] += eps;
let res_h = residuals(&x_h, data.as_slice().expect("Operation failed"));
count += 1;
for i in 0..n {
jac[[i, j]] = (res_h[i] - curr_res_vals[i]) / eps;
}
}
(jac, count)
};
let (mut jac, jac_evals) = match &jacobian {
Some(jac_fn) => {
let j = jac_fn(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
njev += 1;
(j, 0)
}
None => {
let (j, count) = compute_jac(x.as_slice().expect("Operation failed"), &res);
nfev += count;
(j, count)
}
};
let mut g = jac.t().dot(&res);
let mut delta = 100.0 * (1.0 + x.iter().map(|&xi| xi.abs()).sum::<f64>());
while iter < max_nfev {
if g.iter().all(|&gi| gi.abs() < gtol) {
break;
}
let jt_j = jac.t().dot(&jac);
let (step, predicted_reduction) = compute_trust_region_step(&jt_j, &g, delta);
if step
.iter()
.all(|&s| s.abs() < xtol * (1.0 + x.iter().map(|&xi| xi.abs()).sum::<f64>()))
{
break;
}
let x_new = &x + &step;
let res_new = residuals(
x_new.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
nfev += 1;
let f_new = res_new.iter().map(|&r| r.powi(2)).sum::<f64>() / 2.0;
let actual_reduction = f - f_new;
let rho = if predicted_reduction > 0.0 {
actual_reduction / predicted_reduction
} else {
0.0
};
if rho < 0.25 {
delta *= 0.5;
} else if rho > 0.75 && step.iter().map(|&s| s * s).sum::<f64>().sqrt() >= 0.9 * delta {
delta *= 2.0;
}
if rho > 0.1 {
x = x_new;
res = res_new;
f = f_new;
if actual_reduction < ftol * f.abs() {
break;
}
let (new_jac, jac_evals) = match &jacobian {
Some(jac_fn) => {
let j = jac_fn(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
njev += 1;
(j, 0)
}
None => {
let (j, count) = compute_jac(x.as_slice().expect("Operation failed"), &res);
nfev += count;
(j, count)
}
};
jac = new_jac;
g = jac.t().dot(&res);
}
iter += 1;
}
let mut result = OptimizeResults::default();
result.x = x.clone();
result.fun = f;
result.jac = if let Some(jac_fn) = &jacobian {
let jac_array = jac_fn(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
njev += 1;
let (vec, _) = jac_array.into_raw_vec_and_offset();
Some(vec)
} else {
let (vec, _) = jac.into_raw_vec_and_offset();
Some(vec)
};
result.nfev = nfev;
result.njev = njev;
result.nit = iter;
result.success = iter < max_nfev;
if result.success {
result.message = "Optimization terminated successfully.".to_string();
} else {
result.message = "Maximum number of evaluations reached.".to_string();
}
Ok(result)
}
#[allow(dead_code)]
fn compute_trust_region_step(
jt_j: &Array2<f64>,
g: &Array1<f64>,
delta: f64,
) -> (Array1<f64>, f64) {
let n = g.len();
let sd_dir = -g;
let sd_norm_sq = g.iter().map(|&gi| gi * gi).sum::<f64>();
if sd_norm_sq < 1e-10 {
return (Array1::zeros(n), 0.0);
}
let sd_norm = sd_norm_sq.sqrt();
let sd_step = &sd_dir * (delta / sd_norm);
let gn_step = match solve(jt_j, &sd_dir) {
Some(step) => step,
None => {
let pred_red = 0.5 * g.dot(&sd_step);
return (sd_step, pred_red);
}
};
let gn_norm_sq = gn_step.iter().map(|&s| s * s).sum::<f64>();
if gn_norm_sq <= delta * delta {
let predicted_reduction = 0.5 * g.dot(&gn_step);
return (gn_step, predicted_reduction);
}
let gn_norm = gn_norm_sq.sqrt();
let sd_gn_dot = sd_dir.dot(&gn_step);
let sd_sq = sd_norm_sq; let gn_sq = gn_norm_sq;
let a = sd_sq;
let b = 3.0 * sd_gn_dot;
let c = gn_sq - delta * delta;
if a < 1e-10 {
let step = &gn_step * (delta / gn_norm);
let predicted_reduction = 0.5 * g.dot(&step);
return (step, predicted_reduction);
}
let tau = if b * b - 4.0 * a * c > 0.0 {
(-b + (b * b - 4.0 * a * c).sqrt()) / (2.0 * a)
} else {
delta / sd_norm
};
let tau = tau.clamp(0.0, 1.0);
let step = if tau < 1.0 {
&sd_step * tau + &gn_step * (1.0 - tau)
} else {
&gn_step * (delta / gn_norm)
};
let predicted_reduction = 0.5 * g.dot(&step);
(step, predicted_reduction)
}
#[allow(dead_code)]
fn least_squares_dogbox<F, J, D, S1, S2>(
residuals: F,
x0: &ArrayBase<S1, Ix1>,
jacobian: Option<J>,
data: &ArrayBase<S2, Ix1>,
options: &Options,
) -> OptimizeResult<OptimizeResults<f64>>
where
F: Fn(&[f64], &[D]) -> Array1<f64>,
J: Fn(&[f64], &[D]) -> Array2<f64>,
D: Clone,
S1: Data<Elem = f64>,
S2: Data<Elem = D>,
{
let ftol = options.ftol.unwrap_or(1e-8);
let xtol = options.xtol.unwrap_or(1e-8);
let gtol = options.gtol.unwrap_or(1e-8);
let max_nfev = options.max_nfev.unwrap_or(100 * x0.len());
let eps = options.diff_step.unwrap_or(1e-8);
let m = x0.len();
let mut x = x0.to_owned();
let mut res = residuals(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
let n = res.len();
let mut f = res.iter().map(|&r| r.powi(2)).sum::<f64>() / 2.0;
let mut nfev = 1;
let mut njev = 0;
let mut iter = 0;
let lb = Array1::from_elem(m, f64::NEG_INFINITY);
let ub = Array1::from_elem(m, f64::INFINITY);
let mut delta = 1.0;
let max_delta = 1e3;
let min_delta = 1e-12;
let project_bounds = |x_vals: &mut Array1<f64>| {
for i in 0..m {
x_vals[i] = x_vals[i].max(lb[i]).min(ub[i]);
}
};
let compute_active_set = |x_vals: &Array1<f64>, g_vals: &Array1<f64>| -> Vec<bool> {
let mut active = vec![false; m];
let boundary_tol = 1e-10;
for i in 0..m {
let at_lower = (x_vals[i] - lb[i]).abs() < boundary_tol && g_vals[i] > 0.0;
let at_upper = (ub[i] - x_vals[i]).abs() < boundary_tol && g_vals[i] < 0.0;
active[i] = at_lower || at_upper;
}
active
};
let compute_jac = |x_params: &[f64], curr_res_vals: &Array1<f64>| -> (Array2<f64>, usize) {
let mut jac = Array2::zeros((n, m));
let mut count = 0;
for j in 0..m {
let mut x_h = Vec::from(x_params);
x_h[j] += eps;
let res_h = residuals(&x_h, data.as_slice().expect("Operation failed"));
count += 1;
for i in 0..n {
jac[[i, j]] = (res_h[i] - curr_res_vals[i]) / eps;
}
}
(jac, count)
};
let mut jac = match &jacobian {
Some(jac_fn) => {
let j = jac_fn(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
njev += 1;
j
}
None => {
let (j, count) = compute_jac(x.as_slice().expect("Operation failed"), &res);
nfev += count;
j
}
};
let mut g = jac.t().dot(&res);
if g.iter().map(|&gi| gi.abs()).fold(0.0, f64::max) < gtol {
let mut result = OptimizeResults::default();
result.x = x.clone();
result.fun = f;
result.nfev = nfev;
result.njev = njev;
result.nit = iter;
result.success = true;
result.message = "Initial point satisfies convergence criteria.".to_string();
return Ok(result);
}
while iter < max_nfev {
let jt_j = jac.t().dot(&jac);
let active = compute_active_set(&x, &g);
let step = compute_dogbox_step(&jt_j, &g, &active, &lb, &ub, &x, delta);
let mut x_new = &x + &step;
project_bounds(&mut x_new);
let res_new = residuals(
x_new.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
nfev += 1;
let f_new = res_new.iter().map(|&r| r.powi(2)).sum::<f64>() / 2.0;
let predicted_reduction = -g.dot(&step) - 0.5 * step.dot(&jt_j.dot(&step));
let actual_reduction = f - f_new;
let rho = if predicted_reduction > 0.0 {
actual_reduction / predicted_reduction
} else {
0.0
};
if rho < 0.25 {
delta *= 0.5;
} else if rho > 0.75 && step.iter().map(|&s| s * s).sum::<f64>().sqrt() >= 0.9 * delta {
delta = (2.0 * delta).min(max_delta);
}
if rho > 0.1 {
x = x_new;
res = res_new;
f = f_new;
if actual_reduction < ftol * f.abs() {
break;
}
if step.iter().map(|&s| s * s).sum::<f64>().sqrt() < xtol {
break;
}
let new_jac = match &jacobian {
Some(jac_fn) => {
let j = jac_fn(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
njev += 1;
j
}
None => {
let (j, count) = compute_jac(x.as_slice().expect("Operation failed"), &res);
nfev += count;
j
}
};
jac = new_jac;
g = jac.t().dot(&res);
if g.iter().map(|&gi| gi.abs()).fold(0.0, f64::max) < gtol {
break;
}
}
if delta < min_delta {
break;
}
iter += 1;
}
let mut result = OptimizeResults::default();
result.x = x.clone();
result.fun = f;
result.jac = if let Some(jac_fn) = &jacobian {
let jac_array = jac_fn(
x.as_slice().expect("Operation failed"),
data.as_slice().expect("Operation failed"),
);
njev += 1;
let (vec, _) = jac_array.into_raw_vec_and_offset();
Some(vec)
} else {
let (vec, _) = jac.into_raw_vec_and_offset();
Some(vec)
};
result.nfev = nfev;
result.njev = njev;
result.nit = iter;
result.success = iter < max_nfev && delta >= min_delta;
if result.success {
result.message = "Optimization terminated successfully.".to_string();
} else if iter >= max_nfev {
result.message = "Maximum number of evaluations reached.".to_string();
} else {
result.message = "Trust region became too small.".to_string();
}
Ok(result)
}
#[allow(clippy::too_many_arguments)]
#[allow(dead_code)]
fn compute_dogbox_step(
jt_j: &Array2<f64>,
g: &Array1<f64>,
active: &[bool],
lb: &Array1<f64>,
ub: &Array1<f64>,
x: &Array1<f64>,
delta: f64,
) -> Array1<f64> {
let n = g.len();
let free_vars: Vec<usize> = (0..n).filter(|&i| !active[i]).collect();
if free_vars.is_empty() {
return Array1::zeros(n);
}
let g_free = Array1::from_vec(free_vars.iter().map(|&i| g[i]).collect());
let mut jt_j_free = Array2::zeros((free_vars.len(), free_vars.len()));
for (i, &fi) in free_vars.iter().enumerate() {
for (j, &fj) in free_vars.iter().enumerate() {
jt_j_free[[i, j]] = jt_j[[fi, fj]];
}
}
let sd_dir_free = -&g_free;
let sd_norm_sq = g_free.iter().map(|&gi| gi * gi).sum::<f64>();
if sd_norm_sq < 1e-15 {
return Array1::zeros(n);
}
let sd_norm = sd_norm_sq.sqrt();
let gn_step_free = match solve(&jt_j_free, &sd_dir_free) {
Some(step) => step,
None => {
let step_free = &sd_dir_free * (delta / sd_norm);
let mut step = Array1::zeros(n);
for (i, &fi) in free_vars.iter().enumerate() {
step[fi] = step_free[i];
}
return bound_step(&step, x, lb, ub, delta);
}
};
let gn_norm_sq = gn_step_free.iter().map(|&s| s * s).sum::<f64>();
if gn_norm_sq <= delta * delta {
let mut step = Array1::zeros(n);
for (i, &fi) in free_vars.iter().enumerate() {
step[fi] = gn_step_free[i];
}
return bound_step(&step, x, lb, ub, delta);
}
let gn_norm = gn_norm_sq.sqrt();
let sd_step_free = &sd_dir_free * (delta / sd_norm);
let sd_gn_dot = sd_dir_free.dot(&gn_step_free);
let a = sd_norm_sq;
let b = 3.0 * sd_gn_dot;
let c = gn_norm_sq - delta * delta;
let tau = if a > 1e-15 && b * b - 4.0 * a * c > 0.0 {
(-b + (b * b - 4.0 * a * c).sqrt()) / (2.0 * a)
} else {
delta / sd_norm
};
let tau = tau.clamp(0.0, 1.0);
let step_free = if tau < 1.0 {
&sd_step_free * tau + &gn_step_free * (1.0 - tau)
} else {
&gn_step_free * (delta / gn_norm)
};
let mut step = Array1::zeros(n);
for (i, &fi) in free_vars.iter().enumerate() {
step[fi] = step_free[i];
}
bound_step(&step, x, lb, ub, delta)
}
#[allow(dead_code)]
fn bound_step(
step: &Array1<f64>,
x: &Array1<f64>,
lb: &Array1<f64>,
ub: &Array1<f64>,
delta: f64,
) -> Array1<f64> {
let mut bounded_step = step.clone();
for i in 0..step.len() {
let x_new = x[i] + bounded_step[i];
if x_new < lb[i] {
bounded_step[i] = lb[i] - x[i];
} else if x_new > ub[i] {
bounded_step[i] = ub[i] - x[i];
}
}
let step_norm = bounded_step.iter().map(|&s| s * s).sum::<f64>().sqrt();
if step_norm > delta {
bounded_step *= delta / step_norm;
}
bounded_step
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::array;
fn residual(x: &[f64], _: &[f64]) -> Array1<f64> {
let y = array![x[0] + 2.0 * x[1] - 2.0, x[0] + x[1] - 1.0];
y
}
fn jacobian(x: &[f64], _: &[f64]) -> Array2<f64> {
array![[1.0, 2.0], [1.0, 1.0]]
}
#[test]
fn test_least_squares_placeholder() {
let x0 = array![0.0, 0.0];
let data = array![];
let options = Options {
max_nfev: Some(1), ..Options::default()
};
let result = least_squares(
residual,
&x0.view(),
Method::LevenbergMarquardt,
Some(jacobian),
&data.view(),
Some(options),
)
.expect("Operation failed");
assert!(!result.success);
assert!(result.fun > 0.0);
assert!(result.jac.is_some());
}
#[test]
fn test_levenberg_marquardt() {
let x0 = array![-1.0, -1.0];
let data = array![];
let options = Options {
max_nfev: Some(100),
xtol: Some(1e-6),
ftol: Some(1e-6),
..Options::default()
};
let result = least_squares(
residual,
&x0.view(),
Method::LevenbergMarquardt,
Some(jacobian),
&data.view(),
Some(options),
)
.expect("Operation failed");
assert!(result.success);
assert!((result.x[0] - 0.0).abs() < 1e-4);
assert!((result.x[1] - 1.0).abs() < 1e-4);
assert!(result.fun < 1e-8);
println!(
"LM result: x = {:?}, f = {}, iterations = {}",
result.x, result.fun, result.nit
);
}
#[test]
fn test_trust_region_reflective() {
let x0 = array![0.0, 0.0]; let data = array![];
let options = Options {
max_nfev: Some(1000), xtol: Some(1e-5), ftol: Some(1e-5), ..Options::default()
};
let result = least_squares(
residual,
&x0.view(),
Method::TrustRegionReflective,
Some(jacobian),
&data.view(),
Some(options),
)
.expect("Operation failed");
let initial_resid = residual(&[0.0, 0.0], &[]);
let initial_value = 0.5 * initial_resid.iter().map(|&r| r * r).sum::<f64>();
println!(
"TRF result: x = {:?}, f = {}, initial = {}, iterations = {}",
result.x, result.fun, initial_value, result.nit
);
assert!(result.fun <= initial_value || result.success);
}
#[test]
fn test_rosenbrock_least_squares() {
fn rosenbrock_residual(x: &[f64], _: &[f64]) -> Array1<f64> {
array![10.0 * (x[1] - x[0].powi(2)), 1.0 - x[0]]
}
fn rosenbrock_jacobian(x: &[f64], _: &[f64]) -> Array2<f64> {
array![[-20.0 * x[0], 10.0], [-1.0, 0.0]]
}
let x0_lm = array![-1.2, 1.0]; let x0_trf = array![0.5, 0.5]; let data = array![];
let options_common = Options {
xtol: Some(1e-6),
ftol: Some(1e-6),
..Options::default()
};
let options_trf = Options {
max_nfev: Some(300), ..options_common.clone()
};
let options_lm = Options {
max_nfev: Some(1000), ..options_common
};
let result_trf = least_squares(
rosenbrock_residual,
&x0_trf.view(), Method::TrustRegionReflective,
Some(rosenbrock_jacobian),
&data.view(),
Some(options_trf),
)
.expect("Operation failed");
let result_lm = least_squares(
rosenbrock_residual,
&x0_lm.view(), Method::LevenbergMarquardt,
Some(rosenbrock_jacobian),
&data.view(),
Some(options_lm),
)
.expect("Operation failed");
println!(
"TRF Rosenbrock: x = {:?}, f = {}, iterations = {}",
result_trf.x, result_trf.fun, result_trf.nit
);
println!(
"LM Rosenbrock: x = {:?}, f = {}, iterations = {}",
result_lm.x, result_lm.fun, result_lm.nit
);
let initial_resid_trf = rosenbrock_residual(&[0.5, 0.5], &[]);
let initial_value_trf = 0.5 * initial_resid_trf.iter().map(|&r| r * r).sum::<f64>();
println!("TRF initial value: {}", initial_value_trf);
assert!(result_trf.fun < 100.0);
assert!(result_lm.success);
assert!((result_lm.x[0] - 1.0).abs() < 1e-2);
assert!((result_lm.x[1] - 1.0).abs() < 1e-2);
}
}