Skip to main content

math_audio_optimisation/
levenberg_marquardt.rs

1//! Levenberg-Marquardt bounded nonlinear least-squares solver.
2//!
3//! Solves `min_x  ||r(x)||^2_W` subject to `lb <= x <= ub`, where `r(x)` is a
4//! vector-valued residual function and `W` is a diagonal weight matrix.
5//!
6//! The solver interpolates between Gauss-Newton (fast near the minimum) and
7//! gradient descent (robust far from it) via an adaptive damping parameter `lambda`.
8//! Bounds are enforced by projecting the trial step.
9
10use ndarray::{Array1, Array2};
11use thiserror::Error;
12
13// ---------------------------------------------------------------------------
14// Error types
15// ---------------------------------------------------------------------------
16
17/// Errors from the Levenberg-Marquardt solver.
18#[derive(Debug, Error)]
19pub enum LMError {
20    /// Lower and upper bounds have different lengths, or don't match x0.
21    #[error("bounds/x0 dimension mismatch: x0 has {x0_len}, bounds has {bounds_len}")]
22    DimensionMismatch {
23        /// Length of x0
24        x0_len: usize,
25        /// Number of bound pairs
26        bounds_len: usize,
27    },
28
29    /// A lower bound exceeds its upper bound.
30    #[error("invalid bounds at index {index}: lower ({lower}) > upper ({upper})")]
31    InvalidBounds {
32        /// Index of the bad pair
33        index: usize,
34        /// Lower bound
35        lower: f64,
36        /// Upper bound
37        upper: f64,
38    },
39
40    /// Residual function returned different sizes on successive calls.
41    #[error("residual dimension changed: was {expected}, now {got}")]
42    ResidualDimensionChanged {
43        /// First observed size
44        expected: usize,
45        /// New size
46        got: usize,
47    },
48
49    /// The Jacobian-derived system is singular (QR rank-deficient).
50    #[error("singular Jacobian — cannot compute step")]
51    SingularJacobian,
52}
53
54/// Result alias for LM operations.
55pub type LMResult<T> = std::result::Result<T, LMError>;
56
57// ---------------------------------------------------------------------------
58// Callback
59// ---------------------------------------------------------------------------
60
61/// Action returned by a progress callback.
62#[derive(Debug, Clone, Copy, PartialEq, Eq)]
63pub enum LMCallbackAction {
64    /// Keep iterating.
65    Continue,
66    /// Stop early.
67    Stop,
68}
69
70/// Snapshot of solver state passed to the callback.
71pub struct LMIntermediate {
72    /// Current parameter vector.
73    pub x: Array1<f64>,
74    /// Current objective value (weighted sum of squared residuals).
75    pub fun: f64,
76    /// Current damping parameter.
77    pub lambda: f64,
78    /// Iteration number.
79    pub iter: usize,
80}
81
82// ---------------------------------------------------------------------------
83// Config
84// ---------------------------------------------------------------------------
85
86/// Configuration for the LM solver.
87pub struct LMConfig {
88    /// Maximum iterations (default 100).
89    pub maxiter: usize,
90    /// Relative convergence tolerance on the objective.
91    pub tol: f64,
92    /// Absolute convergence tolerance on the objective.
93    pub atol: f64,
94    /// Initial damping parameter (default 1.0).
95    pub lambda_init: f64,
96    /// Finite-difference step for Jacobian approximation (default 1e-8).
97    pub jacobian_epsilon: f64,
98    /// Initial guess (required).
99    pub x0: Array1<f64>,
100    /// Per-residual weights (optional; default = uniform).
101    pub weights: Option<Array1<f64>>,
102    /// Print progress to stderr.
103    pub disp: bool,
104    /// Optional progress callback.
105    pub callback: Option<Box<dyn FnMut(&LMIntermediate) -> LMCallbackAction>>,
106}
107
108/// Fluent builder for [`LMConfig`].
109pub struct LMConfigBuilder {
110    maxiter: usize,
111    tol: f64,
112    atol: f64,
113    lambda_init: f64,
114    jacobian_epsilon: f64,
115    x0: Option<Array1<f64>>,
116    weights: Option<Array1<f64>>,
117    disp: bool,
118    callback: Option<Box<dyn FnMut(&LMIntermediate) -> LMCallbackAction>>,
119}
120
121impl LMConfigBuilder {
122    /// Create a new builder with defaults.
123    pub fn new() -> Self {
124        Self {
125            maxiter: 100,
126            tol: 1e-10,
127            atol: 1e-14,
128            lambda_init: 1.0,
129            jacobian_epsilon: 1e-8,
130            x0: None,
131            weights: None,
132            disp: false,
133            callback: None,
134        }
135    }
136
137    /// Set the initial guess (required).
138    pub fn x0(mut self, x0: Array1<f64>) -> Self {
139        self.x0 = Some(x0);
140        self
141    }
142
143    /// Maximum number of iterations.
144    pub fn maxiter(mut self, n: usize) -> Self {
145        self.maxiter = n;
146        self
147    }
148
149    /// Relative convergence tolerance.
150    pub fn tol(mut self, t: f64) -> Self {
151        self.tol = t;
152        self
153    }
154
155    /// Absolute convergence tolerance.
156    pub fn atol(mut self, t: f64) -> Self {
157        self.atol = t;
158        self
159    }
160
161    /// Initial damping factor.
162    pub fn lambda_init(mut self, l: f64) -> Self {
163        self.lambda_init = l;
164        self
165    }
166
167    /// Finite-difference epsilon for Jacobian.
168    pub fn jacobian_epsilon(mut self, eps: f64) -> Self {
169        self.jacobian_epsilon = eps;
170        self
171    }
172
173    /// Per-residual weights.
174    pub fn weights(mut self, w: Array1<f64>) -> Self {
175        self.weights = Some(w);
176        self
177    }
178
179    /// Print progress.
180    pub fn disp(mut self, d: bool) -> Self {
181        self.disp = d;
182        self
183    }
184
185    /// Progress callback.
186    pub fn callback(mut self, cb: Box<dyn FnMut(&LMIntermediate) -> LMCallbackAction>) -> Self {
187        self.callback = Some(cb);
188        self
189    }
190
191    /// Build the config. Panics if `x0` was not set.
192    pub fn build(self) -> LMConfig {
193        LMConfig {
194            maxiter: self.maxiter,
195            tol: self.tol,
196            atol: self.atol,
197            lambda_init: self.lambda_init,
198            jacobian_epsilon: self.jacobian_epsilon,
199            x0: self.x0.expect("LMConfigBuilder: x0 is required"),
200            weights: self.weights,
201            disp: self.disp,
202            callback: self.callback,
203        }
204    }
205}
206
207impl Default for LMConfigBuilder {
208    fn default() -> Self {
209        Self::new()
210    }
211}
212
213// ---------------------------------------------------------------------------
214// Report
215// ---------------------------------------------------------------------------
216
217/// Result of a Levenberg-Marquardt optimization.
218#[derive(Debug)]
219pub struct LMReport {
220    /// Optimal parameter vector.
221    pub x: Array1<f64>,
222    /// Final objective value (weighted sum of squared residuals).
223    pub fun: f64,
224    /// Final residual vector.
225    pub residuals: Array1<f64>,
226    /// Whether the solver converged.
227    pub success: bool,
228    /// Human-readable termination message.
229    pub message: String,
230    /// Number of iterations performed.
231    pub nit: usize,
232    /// Number of residual function evaluations.
233    pub nfev: usize,
234}
235
236// ---------------------------------------------------------------------------
237// Solver
238// ---------------------------------------------------------------------------
239
240/// Bounded Levenberg-Marquardt nonlinear least-squares solver.
241///
242/// Minimizes `sum_i  w_i * r_i(x)^2` subject to `bounds[j].0 <= x[j] <= bounds[j].1`.
243///
244/// # Arguments
245/// * `residual_fn` — returns the residual vector `r(x)`.
246/// * `bounds` — per-parameter `(lower, upper)` pairs.
247/// * `config` — solver configuration (must include `x0`).
248pub fn levenberg_marquardt<F>(
249    residual_fn: &F,
250    bounds: &[(f64, f64)],
251    config: LMConfig,
252) -> LMResult<LMReport>
253where
254    F: Fn(&Array1<f64>) -> Array1<f64>,
255{
256    let n_params = config.x0.len();
257
258    // --- Validate inputs ---
259    if bounds.len() != n_params {
260        return Err(LMError::DimensionMismatch {
261            x0_len: n_params,
262            bounds_len: bounds.len(),
263        });
264    }
265    for (i, &(lo, hi)) in bounds.iter().enumerate() {
266        if lo > hi {
267            return Err(LMError::InvalidBounds {
268                index: i,
269                lower: lo,
270                upper: hi,
271            });
272        }
273    }
274
275    // --- Initialize ---
276    let mut x = project(&config.x0, bounds);
277    let mut r = residual_fn(&x);
278    let n_residuals = r.len();
279    let mut nfev: usize = 1;
280
281    let w = config.weights.unwrap_or_else(|| Array1::ones(n_residuals));
282    let mut f_val = weighted_sos(&r, &w);
283    let mut lambda = config.lambda_init;
284    let eps = config.jacobian_epsilon;
285
286    let mut success = false;
287    let mut message = format!("max iterations ({}) reached", config.maxiter);
288    let mut callback = config.callback;
289    let mut nit: usize = 0;
290
291    for iter in 0..config.maxiter {
292        nit = iter + 1;
293        // --- Callback ---
294        if let Some(ref mut cb) = callback {
295            let intermediate = LMIntermediate {
296                x: x.clone(),
297                fun: f_val,
298                lambda,
299                iter,
300            };
301            if cb(&intermediate) == LMCallbackAction::Stop {
302                message = "stopped by callback".to_string();
303                break;
304            }
305        }
306
307        // --- Check for already-converged ---
308        if f_val <= config.atol {
309            success = true;
310            message = format!("converged: objective {:.2e} <= atol {:.2e}", f_val, config.atol);
311            break;
312        }
313
314        // --- Compute Jacobian via central finite differences ---
315        let jac = compute_jacobian(residual_fn, &x, &r, n_residuals, eps, &mut nfev)?;
316
317        // --- Form damped normal equations ---
318        // LM step solves: (J^T W J + λ·D) δ = −J^T W r
319        // where D = diag(J^T W J) (Marquardt scaling).
320        let jtwj = jtw_j(&jac, &w);
321        let jtwr = jtw_r(&jac, &w, &r); // gradient direction
322
323        // Marquardt diagonal scaling
324        let diag: Array1<f64> = (0..n_params)
325            .map(|j| jtwj[[j, j]].max(1e-20))
326            .collect();
327
328        // H = J^T W J + λ·diag(D)
329        let mut h = jtwj.clone();
330        for j in 0..n_params {
331            h[[j, j]] += lambda * diag[j];
332        }
333
334        // RHS = −J^T W r  (descent direction)
335        let neg_jtwr = jtwr.mapv(|v| -v);
336
337        // Solve H · δ = −J^T W r
338        let delta = match solve_linear_system(&h, &neg_jtwr) {
339            Some(d) => d,
340            None => {
341                // Increase damping and retry
342                lambda *= 4.0;
343                continue;
344            }
345        };
346
347        // --- Trial step with bounds projection ---
348        let x_trial = project(&(&x + &delta), bounds);
349        let r_trial = residual_fn(&x_trial);
350        nfev += 1;
351
352        if r_trial.len() != n_residuals {
353            return Err(LMError::ResidualDimensionChanged {
354                expected: n_residuals,
355                got: r_trial.len(),
356            });
357        }
358
359        let f_trial = weighted_sos(&r_trial, &w);
360
361        // --- Gain ratio ---
362        // Model reduction for f(x) = r^T W r with step δ solving (JTWJ + λD)δ = −jtwr:
363        //   pred = δ^T JTWJ δ + 2λ δ^T D δ
364        let pred: f64 = {
365            let mut p = 0.0;
366            for j in 0..n_params {
367                let mut jtwj_delta_j = 0.0;
368                for k in 0..n_params {
369                    jtwj_delta_j += jtwj[[j, k]] * delta[k];
370                }
371                p += delta[j] * jtwj_delta_j + 2.0 * lambda * diag[j] * delta[j] * delta[j];
372            }
373            p
374        };
375
376        let actual = f_val - f_trial;
377        let rho = if pred.abs() > 1e-30 { actual / pred } else { 0.0 };
378
379        if rho > 0.0 && f_trial < f_val {
380            // Accept step
381            let f_old = f_val;
382            x = x_trial;
383            r = r_trial;
384            f_val = f_trial;
385            lambda = (lambda / 2.0).max(1e-15);
386
387            // Convergence check
388            if (f_old - f_val).abs() < config.tol * f_old + config.atol {
389                success = true;
390                message = format!(
391                    "converged: |df|={:.2e} < tol*f+atol={:.2e}",
392                    (f_old - f_val).abs(),
393                    config.tol * f_old + config.atol
394                );
395                break;
396            }
397        } else {
398            // Reject step — increase damping
399            lambda = (lambda * 2.0).min(1e15);
400        }
401
402        if config.disp && iter % 10 == 0 {
403            eprintln!(
404                "LM iter {}: f={:.6e}, lambda={:.2e}, rho={:.3}",
405                iter, f_val, lambda, rho
406            );
407        }
408    }
409
410    Ok(LMReport {
411        x,
412        fun: f_val,
413        residuals: r,
414        success,
415        message,
416        nit,
417        nfev,
418    })
419}
420
421// ---------------------------------------------------------------------------
422// Internal helpers
423// ---------------------------------------------------------------------------
424
425/// Project `x` onto the box `[lb, ub]`.
426fn project(x: &Array1<f64>, bounds: &[(f64, f64)]) -> Array1<f64> {
427    Array1::from(
428        x.iter()
429            .zip(bounds.iter())
430            .map(|(&xi, &(lo, hi))| xi.clamp(lo, hi))
431            .collect::<Vec<_>>(),
432    )
433}
434
435/// Weighted sum of squares: sum(w_i * r_i^2).
436fn weighted_sos(r: &Array1<f64>, w: &Array1<f64>) -> f64 {
437    r.iter().zip(w.iter()).map(|(&ri, &wi)| wi * ri * ri).sum()
438}
439
440/// Compute Jacobian via central finite differences.
441fn compute_jacobian<F>(
442    residual_fn: &F,
443    x: &Array1<f64>,
444    _r0: &Array1<f64>,
445    n_residuals: usize,
446    eps: f64,
447    nfev: &mut usize,
448) -> LMResult<Array2<f64>>
449where
450    F: Fn(&Array1<f64>) -> Array1<f64>,
451{
452    let n_params = x.len();
453    let mut jac = Array2::zeros((n_residuals, n_params));
454
455    for j in 0..n_params {
456        let mut x_plus = x.clone();
457        let mut x_minus = x.clone();
458        let h = eps.max(eps * x[j].abs());
459        x_plus[j] += h;
460        x_minus[j] -= h;
461
462        let r_plus = residual_fn(&x_plus);
463        let r_minus = residual_fn(&x_minus);
464        *nfev += 2;
465
466        if r_plus.len() != n_residuals {
467            return Err(LMError::ResidualDimensionChanged {
468                expected: n_residuals,
469                got: r_plus.len(),
470            });
471        }
472        if r_minus.len() != n_residuals {
473            return Err(LMError::ResidualDimensionChanged {
474                expected: n_residuals,
475                got: r_minus.len(),
476            });
477        }
478
479        let inv_2h = 1.0 / (2.0 * h);
480        for i in 0..n_residuals {
481            jac[[i, j]] = (r_plus[i] - r_minus[i]) * inv_2h;
482        }
483    }
484
485    Ok(jac)
486}
487
488/// Compute J^T W J (n_params x n_params).
489fn jtw_j(jac: &Array2<f64>, w: &Array1<f64>) -> Array2<f64> {
490    let n_params = jac.ncols();
491    let n_res = jac.nrows();
492    let mut result = Array2::zeros((n_params, n_params));
493
494    for j in 0..n_params {
495        for k in j..n_params {
496            let mut val = 0.0;
497            for i in 0..n_res {
498                val += w[i] * jac[[i, j]] * jac[[i, k]];
499            }
500            result[[j, k]] = val;
501            result[[k, j]] = val;
502        }
503    }
504
505    result
506}
507
508/// Compute J^T W r (n_params vector).
509fn jtw_r(jac: &Array2<f64>, w: &Array1<f64>, r: &Array1<f64>) -> Array1<f64> {
510    let n_params = jac.ncols();
511    let n_res = jac.nrows();
512    let mut result = Array1::zeros(n_params);
513
514    for j in 0..n_params {
515        let mut val = 0.0;
516        for i in 0..n_res {
517            val += w[i] * jac[[i, j]] * r[i];
518        }
519        result[j] = val;
520    }
521
522    result
523}
524
525/// Solve a dense symmetric positive-definite system Ax = b via Gaussian
526/// elimination with partial pivoting. Returns `None` if the matrix is singular.
527fn solve_linear_system(a: &Array2<f64>, b: &Array1<f64>) -> Option<Array1<f64>> {
528    let n = b.len();
529    debug_assert_eq!(a.nrows(), n);
530    debug_assert_eq!(a.ncols(), n);
531
532    // Augmented matrix [A | b]
533    let mut aug = Array2::zeros((n, n + 1));
534    for i in 0..n {
535        for j in 0..n {
536            aug[[i, j]] = a[[i, j]];
537        }
538        aug[[i, n]] = b[i];
539    }
540
541    // Forward elimination with partial pivoting
542    for col in 0..n {
543        // Find pivot
544        let mut max_val = aug[[col, col]].abs();
545        let mut max_row = col;
546        for row in (col + 1)..n {
547            let val = aug[[row, col]].abs();
548            if val > max_val {
549                max_val = val;
550                max_row = row;
551            }
552        }
553
554        if max_val < 1e-30 {
555            return None; // Singular
556        }
557
558        // Swap rows
559        if max_row != col {
560            for j in 0..=n {
561                let tmp = aug[[col, j]];
562                aug[[col, j]] = aug[[max_row, j]];
563                aug[[max_row, j]] = tmp;
564            }
565        }
566
567        // Eliminate below
568        let pivot = aug[[col, col]];
569        for row in (col + 1)..n {
570            let factor = aug[[row, col]] / pivot;
571            for j in col..=n {
572                aug[[row, j]] -= factor * aug[[col, j]];
573            }
574        }
575    }
576
577    // Back substitution
578    let mut x = Array1::zeros(n);
579    for col in (0..n).rev() {
580        let mut sum = aug[[col, n]];
581        for j in (col + 1)..n {
582            sum -= aug[[col, j]] * x[j];
583        }
584        x[col] = sum / aug[[col, col]];
585    }
586
587    // NaN check
588    if x.iter().any(|v| !v.is_finite()) {
589        return None;
590    }
591
592    Some(x)
593}
594
595// ---------------------------------------------------------------------------
596// Tests
597// ---------------------------------------------------------------------------
598
599#[cfg(test)]
600mod tests {
601    use super::*;
602    use ndarray::array;
603
604    #[test]
605    fn test_sphere() {
606        // r_i = x_i, minimum at origin
607        let residual = |x: &Array1<f64>| x.clone();
608        let bounds = vec![(-10.0, 10.0); 3];
609        let config = LMConfigBuilder::new()
610            .x0(array![3.0, -2.0, 1.0])
611            .maxiter(50)
612            .build();
613
614        let report = levenberg_marquardt(&residual, &bounds, config).unwrap();
615        assert!(report.success, "should converge: {}", report.message);
616        assert!(report.fun < 1e-12, "objective should be ~0, got {}", report.fun);
617        for &xi in report.x.iter() {
618            assert!(xi.abs() < 1e-6, "x should be ~0, got {}", xi);
619        }
620    }
621
622    #[test]
623    fn test_rosenbrock_residual() {
624        // r = [10*(x1 - x0^2), 1 - x0]  =>  min at (1, 1)
625        let residual = |x: &Array1<f64>| array![10.0 * (x[1] - x[0] * x[0]), 1.0 - x[0]];
626        let bounds = vec![(-5.0, 5.0); 2];
627        let config = LMConfigBuilder::new()
628            .x0(array![-1.0, 1.0])
629            .maxiter(200)
630            .tol(1e-12)
631            .build();
632
633        let report = levenberg_marquardt(&residual, &bounds, config).unwrap();
634        assert!(report.success, "should converge: {}", report.message);
635        assert!((report.x[0] - 1.0).abs() < 1e-4, "x0 should be ~1, got {}", report.x[0]);
636        assert!((report.x[1] - 1.0).abs() < 1e-4, "x1 should be ~1, got {}", report.x[1]);
637    }
638
639    #[test]
640    fn test_bounded_solution() {
641        // r = [x - 5]  =>  unconstrained min at x=5, but bound x <= 3
642        let residual = |x: &Array1<f64>| array![x[0] - 5.0];
643        let bounds = vec![(-10.0, 3.0)];
644        let config = LMConfigBuilder::new()
645            .x0(array![0.0])
646            .maxiter(50)
647            .build();
648
649        let report = levenberg_marquardt(&residual, &bounds, config).unwrap();
650        assert!(
651            (report.x[0] - 3.0).abs() < 1e-6,
652            "x should be at bound 3.0, got {}",
653            report.x[0]
654        );
655    }
656
657    #[test]
658    fn test_nan_residual_handled() {
659        // Residual that returns NaN for large x
660        let residual = |x: &Array1<f64>| {
661            if x[0].abs() > 100.0 {
662                array![f64::NAN]
663            } else {
664                array![x[0] - 1.0]
665            }
666        };
667        let bounds = vec![(-200.0, 200.0)];
668        let config = LMConfigBuilder::new()
669            .x0(array![0.0])
670            .maxiter(50)
671            .build();
672
673        // Should not panic
674        let result = levenberg_marquardt(&residual, &bounds, config);
675        assert!(result.is_ok());
676    }
677
678    #[test]
679    fn test_zero_residual() {
680        // Already at the optimum
681        let residual = |x: &Array1<f64>| array![x[0], x[1]];
682        let bounds = vec![(-10.0, 10.0); 2];
683        let config = LMConfigBuilder::new()
684            .x0(array![0.0, 0.0])
685            .maxiter(10)
686            .build();
687
688        let report = levenberg_marquardt(&residual, &bounds, config).unwrap();
689        assert!(report.success, "already at optimum: {}", report.message);
690        assert!(report.fun < 1e-14);
691    }
692
693    #[test]
694    fn test_callback_stop() {
695        let residual = |x: &Array1<f64>| x.clone();
696        let bounds = vec![(-10.0, 10.0); 2];
697        let config = LMConfigBuilder::new()
698            .x0(array![5.0, 5.0])
699            .maxiter(1000)
700            .callback(Box::new(|inter| {
701                if inter.iter >= 3 {
702                    LMCallbackAction::Stop
703                } else {
704                    LMCallbackAction::Continue
705                }
706            }))
707            .build();
708
709        let report = levenberg_marquardt(&residual, &bounds, config).unwrap();
710        assert_eq!(report.message, "stopped by callback");
711    }
712
713    #[test]
714    fn test_weighted_residuals() {
715        // r = [x - 1, x - 3], unweighted min at x=2
716        // With w = [10, 1], should pull x closer to 1
717        let residual = |x: &Array1<f64>| array![x[0] - 1.0, x[0] - 3.0];
718        let bounds = vec![(-10.0, 10.0)];
719
720        // Unweighted
721        let config_unw = LMConfigBuilder::new()
722            .x0(array![0.0])
723            .maxiter(50)
724            .build();
725        let report_unw = levenberg_marquardt(&residual, &bounds, config_unw).unwrap();
726
727        // Weighted (10x weight on first residual)
728        let config_w = LMConfigBuilder::new()
729            .x0(array![0.0])
730            .maxiter(50)
731            .weights(array![10.0, 1.0])
732            .build();
733        let report_w = levenberg_marquardt(&residual, &bounds, config_w).unwrap();
734
735        // Unweighted should land at ~2.0
736        assert!(
737            (report_unw.x[0] - 2.0).abs() < 0.01,
738            "unweighted x should be ~2, got {}",
739            report_unw.x[0]
740        );
741        // Weighted should be closer to 1.0
742        assert!(
743            report_w.x[0] < report_unw.x[0],
744            "weighted x ({}) should be less than unweighted ({})",
745            report_w.x[0],
746            report_unw.x[0]
747        );
748        assert!(
749            (report_w.x[0] - 1.0).abs() < 0.5,
750            "weighted x should be near 1.0, got {}",
751            report_w.x[0]
752        );
753    }
754
755    #[test]
756    fn test_dimension_mismatch() {
757        let residual = |x: &Array1<f64>| x.clone();
758        let bounds = vec![(-1.0, 1.0); 3]; // 3 bounds
759        let config = LMConfigBuilder::new()
760            .x0(array![0.0, 0.0]) // 2 params
761            .build();
762
763        let err = levenberg_marquardt(&residual, &bounds, config).unwrap_err();
764        assert!(matches!(err, LMError::DimensionMismatch { .. }));
765    }
766
767    #[test]
768    fn test_nit_tracks_iterations_not_nfev() {
769        // nit should count outer iterations, nfev counts residual evaluations
770        // For a 2-param problem, each iteration does 1 trial + 4 FD evals = 5+ nfev
771        let residual = |x: &Array1<f64>| x.clone();
772        let bounds = vec![(-10.0, 10.0); 2];
773        let config = LMConfigBuilder::new()
774            .x0(array![5.0, 5.0])
775            .maxiter(5)
776            .tol(1e-20) // don't converge early
777            .atol(0.0)
778            .build();
779
780        let report = levenberg_marquardt(&residual, &bounds, config).unwrap();
781        assert_eq!(report.nit, 5, "nit should be maxiter (5), got {}", report.nit);
782        assert!(
783            report.nfev > report.nit,
784            "nfev ({}) should be much larger than nit ({})",
785            report.nfev,
786            report.nit
787        );
788    }
789
790    #[test]
791    fn test_invalid_bounds() {
792        let residual = |x: &Array1<f64>| x.clone();
793        let bounds = vec![(5.0, 1.0)]; // lower > upper
794        let config = LMConfigBuilder::new().x0(array![0.0]).build();
795
796        let err = levenberg_marquardt(&residual, &bounds, config).unwrap_err();
797        assert!(matches!(err, LMError::InvalidBounds { .. }));
798    }
799}