apex_solver/optimizer/
gauss_newton.rs

1//! Gauss-Newton optimization algorithm implementation.
2//!
3//! The Gauss-Newton method is a fundamental iterative algorithm for solving nonlinear least squares problems
4//! of the form:
5//!
6//! ```text
7//! min f(x) = ½||r(x)||² = ½Σᵢ rᵢ(x)²
8//! ```
9//!
10//! where `r: ℝⁿ → ℝᵐ` is the residual vector function.
11//!
12//! # Algorithm Overview
13//!
14//! The Gauss-Newton method solves the normal equations at each iteration:
15//!
16//! ```text
17//! J^T·J·h = -J^T·r
18//! ```
19//!
20//! where:
21//! - `J` is the Jacobian matrix (m × n) of partial derivatives ∂rᵢ/∂xⱼ
22//! - `r` is the residual vector (m × 1)
23//! - `h` is the step vector (n × 1)
24//!
25//! The approximated Hessian `H ≈ J^T·J` replaces the true Hessian `∇²f = J^T·J + Σᵢ rᵢ·∇²rᵢ`,
26//! which works well when residuals are small or nearly linear.
27//!
28//! ## Convergence Properties
29//!
30//! - **Quadratic convergence** near the solution when the Gauss-Newton approximation is valid
31//! - **May diverge** if the initial guess is far from the optimum or the problem is ill-conditioned
32//! - **No step size control** - always takes the full Newton step without damping
33//!
34//! ## When to Use
35//!
36//! Gauss-Newton is most effective when:
37//! - The problem is well-conditioned with `J^T·J` having good numerical properties
38//! - The initial parameter guess is close to the solution
39//! - Fast convergence is prioritized over robustness
40//! - Residuals at the solution are expected to be small
41//!
42//! For ill-conditioned problems or poor initial guesses, consider:
43//! - [`LevenbergMarquardt`](crate::optimizer::LevenbergMarquardt) for adaptive damping
44//! - [`DogLeg`](crate::optimizer::DogLeg) for trust region control
45//!
46//! # Implementation Features
47//!
48//! - **Sparse matrix support**: Efficient handling of large-scale problems via `faer` sparse library
49//! - **Robust linear solvers**: Choice between Cholesky (fast) and QR (stable) factorizations
50//! - **Jacobi scaling**: Optional diagonal preconditioning to improve conditioning
51//! - **Manifold operations**: Support for optimization on Lie groups (SE2, SE3, SO2, SO3)
52//! - **Comprehensive diagnostics**: Detailed convergence and performance summaries
53//!
54//! # Mathematical Background
55//!
56//! At each iteration k, the algorithm:
57//!
58//! 1. **Linearizes** the problem around current estimate xₖ: `r(xₖ + h) ≈ r(xₖ) + J(xₖ)·h`
59//! 2. **Solves** the normal equations for step h: `J^T·J·h = -J^T·r`
60//! 3. **Updates** parameters: `xₖ₊₁ = xₖ ⊕ h` (using manifold plus operation)
61//! 4. **Checks** convergence criteria (cost, gradient, parameter change)
62//!
63//! The method terminates when cost change, gradient norm, or parameter update fall below
64//! specified tolerances, or when maximum iterations are reached.
65//!
66//! # Examples
67//!
68//! ## Basic usage
69//!
70//! ```no_run
71//! use apex_solver::optimizer::GaussNewton;
72//! use apex_solver::core::problem::Problem;
73//! use std::collections::HashMap;
74//!
75//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
76//! // Create optimization problem
77//! let mut problem = Problem::new();
78//! // ... add residual blocks (factors) to problem ...
79//!
80//! // Set up initial parameter values
81//! let initial_values = HashMap::new();
82//! // ... initialize parameters ...
83//!
84//! // Create solver with default configuration
85//! let mut solver = GaussNewton::new();
86//!
87//! // Run optimization
88//! let result = solver.optimize(&problem, &initial_values)?;
89//! # Ok(())
90//! # }
91//! ```
92//!
93//! ## Advanced configuration
94//!
95//! ```no_run
96//! use apex_solver::optimizer::gauss_newton::{GaussNewtonConfig, GaussNewton};
97//! use apex_solver::linalg::LinearSolverType;
98//!
99//! # fn main() {
100//! let config = GaussNewtonConfig::new()
101//!     .with_max_iterations(100)
102//!     .with_cost_tolerance(1e-8)
103//!     .with_parameter_tolerance(1e-8)
104//!     .with_gradient_tolerance(1e-10)
105//!     .with_linear_solver_type(LinearSolverType::SparseQR)  // More stable
106//!     .with_jacobi_scaling(true);  // Improve conditioning
107//!
108//! let mut solver = GaussNewton::with_config(config);
109//! # }
110//! ```
111//!
112//! # References
113//!
114//! - Nocedal, J. & Wright, S. (2006). *Numerical Optimization* (2nd ed.). Springer. Chapter 10.
115//! - Madsen, K., Nielsen, H. B., & Tingleff, O. (2004). *Methods for Non-Linear Least Squares Problems* (2nd ed.).
116//! - Björck, Å. (1996). *Numerical Methods for Least Squares Problems*. SIAM.
117
118use crate::{core::problem, error, linalg, manifold, optimizer};
119
120use faer::sparse;
121use std::{collections, fmt, time};
122use tracing::debug;
123
124/// Summary statistics for the Gauss-Newton optimization process.
125#[derive(Debug, Clone)]
126pub struct GaussNewtonSummary {
127    /// Initial cost value
128    pub initial_cost: f64,
129    /// Final cost value
130    pub final_cost: f64,
131    /// Total number of iterations performed
132    pub iterations: usize,
133    /// Average cost reduction per iteration
134    pub average_cost_reduction: f64,
135    /// Maximum gradient norm encountered
136    pub max_gradient_norm: f64,
137    /// Final gradient norm
138    pub final_gradient_norm: f64,
139    /// Maximum parameter update norm
140    pub max_parameter_update_norm: f64,
141    /// Final parameter update norm
142    pub final_parameter_update_norm: f64,
143    /// Total time elapsed
144    pub total_time: time::Duration,
145    /// Average time per iteration
146    pub average_time_per_iteration: time::Duration,
147    /// Detailed per-iteration statistics history
148    pub iteration_history: Vec<IterationStats>,
149    /// Convergence status
150    pub convergence_status: optimizer::OptimizationStatus,
151}
152
153/// Per-iteration statistics for detailed logging (Ceres-style output).
154///
155/// Captures all relevant metrics for each optimization iteration, enabling
156/// detailed analysis and debugging of the optimization process.
157#[derive(Debug, Clone)]
158pub struct IterationStats {
159    /// Iteration number (0-indexed)
160    pub iteration: usize,
161    /// Cost function value at this iteration
162    pub cost: f64,
163    /// Change in cost from previous iteration
164    pub cost_change: f64,
165    /// L2 norm of the gradient (||J^T·r||)
166    pub gradient_norm: f64,
167    /// L2 norm of the parameter update step (||Δx||)
168    pub step_norm: f64,
169    /// Trust region ratio (not used in Gauss-Newton, always 0.0)
170    pub tr_ratio: f64,
171    /// Trust region radius (not used in Gauss-Newton, always 0.0)
172    pub tr_radius: f64,
173    /// Linear solver iterations (0 for direct solvers like Cholesky)
174    pub ls_iter: usize,
175    /// Time taken for this iteration in milliseconds
176    pub iter_time_ms: f64,
177    /// Total elapsed time since optimization started in milliseconds
178    pub total_time_ms: f64,
179    /// Whether the step was accepted (always true for Gauss-Newton)
180    pub accepted: bool,
181}
182
183impl IterationStats {
184    /// Print table header in Ceres-style format
185    pub fn print_header() {
186        debug!(
187            "{:>4}  {:>13}  {:>13}  {:>13}  {:>13}  {:>11}  {:>11}  {:>7}  {:>11}  {:>13}  {:>6}",
188            "iter",
189            "cost",
190            "cost_change",
191            "|gradient|",
192            "|step|",
193            "tr_ratio",
194            "tr_radius",
195            "ls_iter",
196            "iter_time",
197            "total_time",
198            "status"
199        );
200    }
201
202    pub fn print_line(&self) {
203        let status = if self.iteration == 0 {
204            "-"
205        } else if self.accepted {
206            "✓"
207        } else {
208            "✗"
209        };
210
211        debug!(
212            "{:>4}  {:>13.6e}  {:>13.2e}  {:>13.2e}  {:>13.2e}  {:>11.2e}  {:>11.2e}  {:>7}  {:>9.2}ms  {:>11.2}ms  {:>6}",
213            self.iteration,
214            self.cost,
215            self.cost_change,
216            self.gradient_norm,
217            self.step_norm,
218            self.tr_ratio,
219            self.tr_radius,
220            self.ls_iter,
221            self.iter_time_ms,
222            self.total_time_ms,
223            status
224        );
225    }
226}
227
228impl fmt::Display for GaussNewtonSummary {
229    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
230        // Determine if converged
231        let converged = matches!(
232            self.convergence_status,
233            optimizer::OptimizationStatus::Converged
234                | optimizer::OptimizationStatus::CostToleranceReached
235                | optimizer::OptimizationStatus::GradientToleranceReached
236                | optimizer::OptimizationStatus::ParameterToleranceReached
237        );
238
239        writeln!(f, "Gauss-Newton Final Result")?;
240
241        // Title with convergence status
242        if converged {
243            writeln!(f, "CONVERGED ({:?})", self.convergence_status)?;
244        } else {
245            writeln!(f, "DIVERGED ({:?})", self.convergence_status)?;
246        }
247
248        writeln!(f)?;
249        writeln!(f, "Cost:")?;
250        writeln!(f, "  Initial:   {:.6e}", self.initial_cost)?;
251        writeln!(f, "  Final:     {:.6e}", self.final_cost)?;
252        writeln!(
253            f,
254            "  Reduction: {:.6e} ({:.2}%)",
255            self.initial_cost - self.final_cost,
256            100.0 * (self.initial_cost - self.final_cost) / self.initial_cost.max(1e-12)
257        )?;
258        writeln!(f)?;
259        writeln!(f, "Iterations:")?;
260        writeln!(f, "  Total: {}", self.iterations)?;
261        writeln!(f)?;
262        writeln!(f, "Gradient:")?;
263        writeln!(f, "  Max norm:   {:.2e}", self.max_gradient_norm)?;
264        writeln!(f, "  Final norm: {:.2e}", self.final_gradient_norm)?;
265        writeln!(f)?;
266        writeln!(f, "Parameter Update:")?;
267        writeln!(f, "  Max norm:   {:.2e}", self.max_parameter_update_norm)?;
268        writeln!(f, "  Final norm: {:.2e}", self.final_parameter_update_norm)?;
269        writeln!(f)?;
270        writeln!(f, "Performance:")?;
271        writeln!(
272            f,
273            "  Total time:             {:.2}ms",
274            self.total_time.as_secs_f64() * 1000.0
275        )?;
276        writeln!(
277            f,
278            "  Average per iteration:  {:.2}ms",
279            self.average_time_per_iteration.as_secs_f64() * 1000.0
280        )?;
281
282        Ok(())
283    }
284}
285
286/// Configuration parameters for the Gauss-Newton optimizer.
287///
288/// Controls the behavior of the Gauss-Newton algorithm including convergence criteria,
289/// linear solver selection, and numerical stability enhancements.
290///
291/// # Builder Pattern
292///
293/// All configuration options can be set using the builder pattern:
294///
295/// ```
296/// use apex_solver::optimizer::gauss_newton::GaussNewtonConfig;
297/// use apex_solver::linalg::LinearSolverType;
298///
299/// let config = GaussNewtonConfig::new()
300///     .with_max_iterations(50)
301///     .with_cost_tolerance(1e-6)
302///     .with_linear_solver_type(LinearSolverType::SparseQR);
303/// ```
304///
305/// # Convergence Criteria
306///
307/// The optimizer terminates when ANY of the following conditions is met:
308///
309/// - **Cost tolerance**: `|cost_k - cost_{k-1}| < cost_tolerance`
310/// - **Parameter tolerance**: `||step|| < parameter_tolerance`
311/// - **Gradient tolerance**: `||J^T·r|| < gradient_tolerance`
312/// - **Maximum iterations**: `iteration >= max_iterations`
313/// - **Timeout**: `elapsed_time >= timeout`
314///
315/// # See Also
316///
317/// - [`GaussNewton`] - The solver that uses this configuration
318/// - [`LevenbergMarquardtConfig`](crate::optimizer::LevenbergMarquardtConfig) - For adaptive damping
319/// - [`DogLegConfig`](crate::optimizer::DogLegConfig) - For trust region methods
320#[derive(Clone)]
321pub struct GaussNewtonConfig {
322    /// Type of linear solver for the linear systems
323    pub linear_solver_type: linalg::LinearSolverType,
324    /// Maximum number of iterations
325    pub max_iterations: usize,
326    /// Convergence tolerance for cost function
327    pub cost_tolerance: f64,
328    /// Convergence tolerance for parameter updates
329    pub parameter_tolerance: f64,
330    /// Convergence tolerance for gradient norm
331    pub gradient_tolerance: f64,
332    /// Timeout duration
333    pub timeout: Option<time::Duration>,
334    /// Use Jacobi column scaling (preconditioning)
335    ///
336    /// When enabled, normalizes Jacobian columns by their L2 norm before solving.
337    /// This can improve convergence for problems with mixed parameter scales
338    /// (e.g., positions in meters + angles in radians) but adds ~5-10% overhead.
339    ///
340    /// Default: false (Gauss-Newton is typically used on well-conditioned problems)
341    pub use_jacobi_scaling: bool,
342    /// Small regularization to ensure J^T·J is positive definite
343    ///
344    /// Pure Gauss-Newton (λ=0) can fail when J^T·J is singular or near-singular.
345    /// Adding a tiny diagonal regularization (e.g., 1e-10) ensures numerical stability
346    /// while maintaining the fast convergence of Gauss-Newton.
347    ///
348    /// Default: 1e-10 (very small, practically identical to pure Gauss-Newton)
349    pub min_diagonal: f64,
350
351    /// Minimum objective function cutoff (optional early termination)
352    ///
353    /// If set, optimization terminates when cost falls below this threshold.
354    /// Useful for early stopping when a "good enough" solution is acceptable.
355    ///
356    /// Default: None (disabled)
357    pub min_cost_threshold: Option<f64>,
358
359    /// Maximum condition number for Jacobian matrix (optional check)
360    ///
361    /// If set, the optimizer checks if condition_number(J^T*J) exceeds this
362    /// threshold and terminates with IllConditionedJacobian status.
363    /// Note: Computing condition number is expensive, so this is disabled by default.
364    ///
365    /// Default: None (disabled)
366    pub max_condition_number: Option<f64>,
367
368    /// Compute per-variable covariance matrices (uncertainty estimation)
369    ///
370    /// When enabled, computes covariance by inverting the Hessian matrix after
371    /// convergence. The full covariance matrix is extracted into per-variable
372    /// blocks stored in both Variable structs and optimier::SolverResult.
373    ///
374    /// Default: false (to avoid performance overhead)
375    pub compute_covariances: bool,
376
377    /// Enable real-time visualization (graphical debugging).
378    ///
379    /// When enabled, optimization progress is logged to a Rerun viewer.
380    /// **Note:** Requires the `visualization` feature to be enabled in `Cargo.toml`.
381    ///
382    /// Default: false
383    #[cfg(feature = "visualization")]
384    pub enable_visualization: bool,
385}
386
387impl Default for GaussNewtonConfig {
388    fn default() -> Self {
389        Self {
390            linear_solver_type: linalg::LinearSolverType::default(),
391            // Ceres Solver default: 50 (changed from 100 for compatibility)
392            max_iterations: 50,
393            // Ceres Solver default: 1e-6 (changed from 1e-8 for compatibility)
394            cost_tolerance: 1e-6,
395            // Ceres Solver default: 1e-8 (unchanged)
396            parameter_tolerance: 1e-8,
397            // Ceres Solver default: 1e-10 (changed from 1e-8 for compatibility)
398            gradient_tolerance: 1e-10,
399            timeout: None,
400            use_jacobi_scaling: false,
401            min_diagonal: 1e-10,
402            // New Ceres-compatible termination parameters
403            min_cost_threshold: None,
404            max_condition_number: None,
405            compute_covariances: false,
406            #[cfg(feature = "visualization")]
407            enable_visualization: false,
408        }
409    }
410}
411
412impl GaussNewtonConfig {
413    /// Create a new Gauss-Newton configuration with default values.
414    pub fn new() -> Self {
415        Self::default()
416    }
417
418    /// Set the linear solver type
419    pub fn with_linear_solver_type(mut self, linear_solver_type: linalg::LinearSolverType) -> Self {
420        self.linear_solver_type = linear_solver_type;
421        self
422    }
423
424    /// Set the maximum number of iterations
425    pub fn with_max_iterations(mut self, max_iterations: usize) -> Self {
426        self.max_iterations = max_iterations;
427        self
428    }
429
430    /// Set the cost tolerance
431    pub fn with_cost_tolerance(mut self, cost_tolerance: f64) -> Self {
432        self.cost_tolerance = cost_tolerance;
433        self
434    }
435
436    /// Set the parameter tolerance
437    pub fn with_parameter_tolerance(mut self, parameter_tolerance: f64) -> Self {
438        self.parameter_tolerance = parameter_tolerance;
439        self
440    }
441
442    /// Set the gradient tolerance
443    pub fn with_gradient_tolerance(mut self, gradient_tolerance: f64) -> Self {
444        self.gradient_tolerance = gradient_tolerance;
445        self
446    }
447
448    /// Set the timeout duration
449    pub fn with_timeout(mut self, timeout: time::Duration) -> Self {
450        self.timeout = Some(timeout);
451        self
452    }
453
454    /// Enable or disable Jacobi column scaling (preconditioning).
455    ///
456    /// When enabled, normalizes Jacobian columns by their L2 norm before solving.
457    /// Can improve convergence for mixed-scale problems but adds ~5-10% overhead.
458    pub fn with_jacobi_scaling(mut self, use_jacobi_scaling: bool) -> Self {
459        self.use_jacobi_scaling = use_jacobi_scaling;
460        self
461    }
462
463    /// Set the minimum diagonal regularization for numerical stability.
464    ///
465    /// A small value (e.g., 1e-10) ensures J^T·J is positive definite while
466    /// maintaining the fast convergence of pure Gauss-Newton.
467    pub fn with_min_diagonal(mut self, min_diagonal: f64) -> Self {
468        self.min_diagonal = min_diagonal;
469        self
470    }
471
472    /// Set minimum objective function cutoff for early termination.
473    ///
474    /// When set, optimization terminates with MinCostThresholdReached status
475    /// if the cost falls below this threshold. Useful for early stopping when
476    /// a "good enough" solution is acceptable.
477    pub fn with_min_cost_threshold(mut self, min_cost: f64) -> Self {
478        self.min_cost_threshold = Some(min_cost);
479        self
480    }
481
482    /// Set maximum condition number for Jacobian matrix.
483    ///
484    /// If set, the optimizer checks if condition_number(J^T*J) exceeds this
485    /// threshold and terminates with IllConditionedJacobian status.
486    /// Note: Computing condition number is expensive, disabled by default.
487    pub fn with_max_condition_number(mut self, max_cond: f64) -> Self {
488        self.max_condition_number = Some(max_cond);
489        self
490    }
491
492    /// Enable or disable covariance computation (uncertainty estimation).
493    ///
494    /// When enabled, computes the full covariance matrix by inverting the Hessian
495    /// after convergence, then extracts per-variable covariance blocks.
496    pub fn with_compute_covariances(mut self, compute_covariances: bool) -> Self {
497        self.compute_covariances = compute_covariances;
498        self
499    }
500
501    /// Enable real-time visualization.
502    ///
503    /// **Note:** Requires the `visualization` feature to be enabled in `Cargo.toml`.
504    ///
505    /// # Arguments
506    ///
507    /// * `enable` - Whether to enable visualization
508    #[cfg(feature = "visualization")]
509    pub fn with_visualization(mut self, enable: bool) -> Self {
510        self.enable_visualization = enable;
511        self
512    }
513
514    /// Print configuration parameters (info level logging)
515    pub fn print_configuration(&self) {
516        debug!(
517            "\nConfiguration:\n  Solver:        Gauss-Newton\n  Linear solver: {:?}\n  Convergence Criteria:\n  Max iterations:      {}\n  Cost tolerance:      {:.2e}\n  Parameter tolerance: {:.2e}\n  Gradient tolerance:  {:.2e}\n  Timeout:             {:?}\n  Numerical Settings:\n  Jacobi scaling:      {}\n  Compute covariances: {}",
518            self.linear_solver_type,
519            self.max_iterations,
520            self.cost_tolerance,
521            self.parameter_tolerance,
522            self.gradient_tolerance,
523            self.timeout,
524            if self.use_jacobi_scaling {
525                "enabled"
526            } else {
527                "disabled"
528            },
529            if self.compute_covariances {
530                "enabled"
531            } else {
532                "disabled"
533            }
534        );
535    }
536}
537
538/// State for optimization iteration
539struct LinearizerResult {
540    variables: collections::HashMap<String, problem::VariableEnum>,
541    variable_index_map: collections::HashMap<String, usize>,
542    sorted_vars: Vec<String>,
543    symbolic_structure: problem::SymbolicStructure,
544    current_cost: f64,
545    initial_cost: f64,
546}
547
548/// Result from step computation
549struct StepResult {
550    step: faer::Mat<f64>,
551    gradient_norm: f64,
552}
553
554/// Result from cost evaluation
555struct CostEvaluation {
556    new_cost: f64,
557    cost_reduction: f64,
558}
559
560/// Gauss-Newton solver for nonlinear least squares optimization.
561///
562/// Implements the classical Gauss-Newton algorithm which solves `J^T·J·h = -J^T·r` at each
563/// iteration to find the step `h`. This provides fast quadratic convergence near the solution
564/// but may diverge for poor initial guesses or ill-conditioned problems.
565///
566/// # Algorithm
567///
568/// At each iteration k:
569/// 1. Compute residual `r(xₖ)` and Jacobian `J(xₖ)`
570/// 2. Form normal equations: `(J^T·J)·h = -J^T·r`
571/// 3. Solve for step `h` using Cholesky or QR factorization
572/// 4. Update parameters: `xₖ₊₁ = xₖ ⊕ h` (manifold plus operation)
573/// 5. Check convergence criteria
574///
575/// # Examples
576///
577/// ```no_run
578/// use apex_solver::optimizer::GaussNewton;
579/// use apex_solver::core::problem::Problem;
580/// use std::collections::HashMap;
581///
582/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
583/// let mut problem = Problem::new();
584/// // ... add factors to problem ...
585///
586/// let initial_values = HashMap::new();
587/// // ... initialize parameters ...
588///
589/// let mut solver = GaussNewton::new();
590/// let result = solver.optimize(&problem, &initial_values)?;
591/// # Ok(())
592/// # }
593/// ```
594///
595/// # See Also
596///
597/// - [`GaussNewtonConfig`] - Configuration options
598/// - [`LevenbergMarquardt`](crate::optimizer::LevenbergMarquardt) - For adaptive damping
599/// - [`DogLeg`](crate::optimizer::DogLeg) - For trust region control
600pub struct GaussNewton {
601    config: GaussNewtonConfig,
602    jacobi_scaling: Option<sparse::SparseColMat<usize, f64>>,
603    observers: optimizer::OptObserverVec,
604}
605
606impl Default for GaussNewton {
607    fn default() -> Self {
608        Self::new()
609    }
610}
611
612impl GaussNewton {
613    /// Create a new Gauss-Newton solver with default configuration.
614    pub fn new() -> Self {
615        Self::with_config(GaussNewtonConfig::default())
616    }
617
618    /// Create a new Gauss-Newton solver with the given configuration.
619    pub fn with_config(config: GaussNewtonConfig) -> Self {
620        Self {
621            config,
622            jacobi_scaling: None,
623            observers: optimizer::OptObserverVec::new(),
624        }
625    }
626
627    /// Add an observer to the solver.
628    ///
629    /// Observers are notified at each iteration with the current variable values.
630    /// This enables real-time visualization, logging, metrics collection, etc.
631    ///
632    /// # Examples
633    ///
634    /// ```no_run
635    /// use apex_solver::optimizer::GaussNewton;
636    /// # use apex_solver::optimizer::OptObserver;
637    /// # use std::collections::HashMap;
638    /// # use apex_solver::core::problem::VariableEnum;
639    ///
640    /// # struct MyObserver;
641    /// # impl OptObserver for MyObserver {
642    /// #     fn on_step(&self, _: &HashMap<String, VariableEnum>, _: usize) {}
643    /// # }
644    /// let mut solver = GaussNewton::new();
645    /// solver.add_observer(MyObserver);
646    /// ```
647    pub fn add_observer(&mut self, observer: impl optimizer::OptObserver + 'static) {
648        self.observers.add(observer);
649    }
650
651    /// Create the appropriate linear solver based on configuration
652    fn create_linear_solver(&self) -> Box<dyn linalg::SparseLinearSolver> {
653        match self.config.linear_solver_type {
654            linalg::LinearSolverType::SparseCholesky => {
655                Box::new(linalg::SparseCholeskySolver::new())
656            }
657            linalg::LinearSolverType::SparseQR => Box::new(linalg::SparseQRSolver::new()),
658            linalg::LinearSolverType::SparseSchurComplement => {
659                // Schur complement solver requires special handling - fallback to Cholesky
660                Box::new(linalg::SparseCholeskySolver::new())
661            }
662        }
663    }
664
665    /// Check convergence criteria
666    /// Check convergence using comprehensive termination criteria.
667    ///
668    /// Implements 8 termination criteria following Ceres Solver standards for Gauss-Newton:
669    ///
670    /// 1. **Gradient Norm (First-Order Optimality)**: ||g||∞ ≤ gradient_tolerance
671    /// 2. **Parameter Change Tolerance**: ||h|| ≤ parameter_tolerance * (||x|| + parameter_tolerance)
672    /// 3. **Function Value Change Tolerance**: |ΔF| < cost_tolerance * F
673    /// 4. **Objective Function Cutoff**: F_new < min_cost_threshold (optional)
674    /// 5. **Singular/Ill-Conditioned Jacobian**: Detected during linear solve
675    /// 6. **Invalid Numerical Values**: NaN or Inf in cost or parameters
676    /// 7. **Maximum Iterations**: iteration >= max_iterations
677    /// 8. **Timeout**: elapsed >= timeout
678    ///
679    /// **Note**: Trust region radius termination is NOT applicable to Gauss-Newton
680    /// as it is a line search method, not a trust region method.
681    ///
682    /// # Arguments
683    ///
684    /// * `iteration` - Current iteration number
685    /// * `current_cost` - Cost before applying the step
686    /// * `new_cost` - Cost after applying the step
687    /// * `parameter_norm` - L2 norm of current parameter vector ||x||
688    /// * `parameter_update_norm` - L2 norm of parameter update step ||h||
689    /// * `gradient_norm` - Infinity norm of gradient ||g||∞
690    /// * `elapsed` - Elapsed time since optimization start
691    ///
692    /// # Returns
693    ///
694    /// `Some(OptimizationStatus)` if any termination criterion is satisfied, `None` otherwise.
695    ///
696    /// # Design Note
697    /// This internal convergence checker requires multiple scalar metrics for comprehensive
698    /// termination criteria. Grouping into a struct would reduce clarity without performance benefit.
699    #[allow(clippy::too_many_arguments)]
700    fn check_convergence(
701        &self,
702        iteration: usize,
703        current_cost: f64,
704        new_cost: f64,
705        parameter_norm: f64,
706        parameter_update_norm: f64,
707        gradient_norm: f64,
708        elapsed: time::Duration,
709    ) -> Option<optimizer::OptimizationStatus> {
710        // ========================================================================
711        // CRITICAL SAFETY CHECKS (perform first, before convergence checks)
712        // ========================================================================
713
714        // CRITERION 6: Invalid Numerical Values (NaN/Inf)
715        // Always check for numerical instability first
716        if !new_cost.is_finite() || !parameter_update_norm.is_finite() || !gradient_norm.is_finite()
717        {
718            return Some(optimizer::OptimizationStatus::InvalidNumericalValues);
719        }
720
721        // CRITERION 8: Timeout
722        // Check wall-clock time limit
723        if let Some(timeout) = self.config.timeout
724            && elapsed >= timeout
725        {
726            return Some(optimizer::OptimizationStatus::Timeout);
727        }
728
729        // CRITERION 7: Maximum Iterations
730        // Check iteration count limit
731        if iteration >= self.config.max_iterations {
732            return Some(optimizer::OptimizationStatus::MaxIterationsReached);
733        }
734
735        // ========================================================================
736        // CONVERGENCE CRITERIA
737        // ========================================================================
738        // Note: Gauss-Newton always accepts the computed step (no step acceptance check)
739
740        // CRITERION 1: Gradient Norm (First-Order Optimality)
741        // Check if gradient infinity norm is below threshold
742        // This indicates we're at a critical point (local minimum, saddle, or maximum)
743        if gradient_norm < self.config.gradient_tolerance {
744            return Some(optimizer::OptimizationStatus::GradientToleranceReached);
745        }
746
747        // Only check parameter and cost criteria after first iteration
748        if iteration > 0 {
749            // CRITERION 2: Parameter Change Tolerance (xtol)
750            // Ceres formula: ||h|| ≤ ε_param * (||x|| + ε_param)
751            // This is a relative measure that scales with parameter magnitude
752            let relative_step_tolerance = self.config.parameter_tolerance
753                * (parameter_norm + self.config.parameter_tolerance);
754
755            if parameter_update_norm <= relative_step_tolerance {
756                return Some(optimizer::OptimizationStatus::ParameterToleranceReached);
757            }
758
759            // CRITERION 3: Function Value Change Tolerance (ftol)
760            // Ceres formula: |ΔF| < ε_cost * F
761            // Check relative cost change (not absolute)
762            let cost_change = (current_cost - new_cost).abs();
763            let relative_cost_change = cost_change / current_cost.max(1e-10); // Avoid division by zero
764
765            if relative_cost_change < self.config.cost_tolerance {
766                return Some(optimizer::OptimizationStatus::CostToleranceReached);
767            }
768        }
769
770        // CRITERION 4: Objective Function Cutoff (optional early stopping)
771        // Useful for "good enough" solutions
772        if let Some(min_cost) = self.config.min_cost_threshold
773            && new_cost < min_cost
774        {
775            return Some(optimizer::OptimizationStatus::MinCostThresholdReached);
776        }
777
778        // CRITERION 5: Singular/Ill-Conditioned Jacobian
779        // Note: This is typically detected during the linear solve and handled there
780        // The max_condition_number check would be expensive to compute here
781        // If linear solve fails, it returns an error that's converted to NumericalFailure
782
783        // No termination criterion satisfied
784        None
785    }
786
787    /// Compute total parameter vector norm ||x||.
788    ///
789    /// Computes the L2 norm of all parameter vectors concatenated together.
790    /// This is used in the relative parameter tolerance check.
791    ///
792    /// # Arguments
793    ///
794    /// * `variables` - Map of variable names to their current values
795    ///
796    /// # Returns
797    ///
798    /// The L2 norm of the concatenated parameter vector
799    fn compute_parameter_norm(
800        variables: &collections::HashMap<String, problem::VariableEnum>,
801    ) -> f64 {
802        variables
803            .values()
804            .map(|v| {
805                let vec = v.to_vector();
806                vec.norm_squared()
807            })
808            .sum::<f64>()
809            .sqrt()
810    }
811
812    /// Create Jacobi scaling matrix from Jacobian
813    fn create_jacobi_scaling(
814        &self,
815        jacobian: &sparse::SparseColMat<usize, f64>,
816    ) -> Result<sparse::SparseColMat<usize, f64>, optimizer::OptimizerError> {
817        use faer::sparse::Triplet;
818
819        let cols = jacobian.ncols();
820        let jacobi_scaling_vec: Vec<Triplet<usize, usize, f64>> = (0..cols)
821            .map(|c| {
822                // Compute column norm: sqrt(sum(J_col^2))
823                let col_norm_squared: f64 = jacobian
824                    .triplet_iter()
825                    .filter(|t| t.col == c)
826                    .map(|t| t.val * t.val)
827                    .sum();
828                let col_norm = col_norm_squared.sqrt();
829                // Scaling factor: 1.0 / (1.0 + col_norm)
830                let scaling = 1.0 / (1.0 + col_norm);
831                Triplet::new(c, c, scaling)
832            })
833            .collect();
834
835        sparse::SparseColMat::try_new_from_triplets(cols, cols, &jacobi_scaling_vec).map_err(|e| {
836            optimizer::OptimizerError::JacobiScalingCreation(e.to_string()).log_with_source(e)
837        })
838    }
839
840    /// Initialize optimization state from problem and initial parameters
841    fn initialize_optimization_state(
842        &self,
843        problem: &problem::Problem,
844        initial_params: &collections::HashMap<
845            String,
846            (manifold::ManifoldType, nalgebra::DVector<f64>),
847        >,
848    ) -> Result<LinearizerResult, error::ApexSolverError> {
849        // Initialize variables from initial values
850        let variables = problem.initialize_variables(initial_params);
851
852        // Create column mapping for variables
853        let mut variable_index_map = collections::HashMap::new();
854        let mut col_offset = 0;
855        let mut sorted_vars: Vec<String> = variables.keys().cloned().collect();
856        sorted_vars.sort();
857
858        for var_name in &sorted_vars {
859            variable_index_map.insert(var_name.clone(), col_offset);
860            col_offset += variables[var_name].get_size();
861        }
862
863        // Build symbolic structure for sparse operations
864        let symbolic_structure =
865            problem.build_symbolic_structure(&variables, &variable_index_map, col_offset)?;
866
867        // Initial cost evaluation (residual only, no Jacobian needed)
868        let residual = problem.compute_residual_sparse(&variables)?;
869        let current_cost = optimizer::compute_cost(&residual);
870        let initial_cost = current_cost;
871
872        Ok(LinearizerResult {
873            variables,
874            variable_index_map,
875            sorted_vars,
876            symbolic_structure,
877            current_cost,
878            initial_cost,
879        })
880    }
881
882    /// Process Jacobian by creating and applying Jacobi scaling if enabled
883    fn process_jacobian(
884        &mut self,
885        jacobian: &sparse::SparseColMat<usize, f64>,
886        iteration: usize,
887    ) -> Result<sparse::SparseColMat<usize, f64>, optimizer::OptimizerError> {
888        // Create Jacobi scaling on first iteration if enabled
889        if iteration == 0 {
890            let scaling = self.create_jacobi_scaling(jacobian)?;
891            self.jacobi_scaling = Some(scaling);
892        }
893        let scaling = self
894            .jacobi_scaling
895            .as_ref()
896            .ok_or_else(|| optimizer::OptimizerError::JacobiScalingNotInitialized.log())?;
897        Ok(jacobian * scaling)
898    }
899
900    /// Compute Gauss-Newton step by solving the normal equations
901    fn compute_gauss_newton_step(
902        &self,
903        residuals: &faer::Mat<f64>,
904        scaled_jacobian: &sparse::SparseColMat<usize, f64>,
905        linear_solver: &mut Box<dyn linalg::SparseLinearSolver>,
906    ) -> Option<StepResult> {
907        // Solve the Gauss-Newton equation: J^T·J·Δx = -J^T·r
908        // Use min_diagonal for numerical stability (tiny regularization)
909        let residuals_owned = residuals.as_ref().to_owned();
910        let scaled_step = linear_solver
911            .solve_normal_equation(&residuals_owned, scaled_jacobian)
912            .ok()?;
913
914        // Get gradient from the solver (J^T * r)
915        let gradient = linear_solver.get_gradient()?;
916        // Compute gradient norm for convergence check
917        let gradient_norm = gradient.norm_l2();
918
919        // Apply inverse Jacobi scaling to get final step (if enabled)
920        let step = if self.config.use_jacobi_scaling {
921            let scaling = self.jacobi_scaling.as_ref()?;
922            &scaled_step * scaling
923        } else {
924            scaled_step
925        };
926
927        Some(StepResult {
928            step,
929            gradient_norm,
930        })
931    }
932
933    /// Apply step to parameters and evaluate new cost
934    fn apply_step_and_evaluate_cost(
935        &self,
936        step_result: &StepResult,
937        state: &mut LinearizerResult,
938        problem: &problem::Problem,
939    ) -> error::ApexSolverResult<CostEvaluation> {
940        // Apply parameter updates using manifold operations
941        let _step_norm = optimizer::apply_parameter_step(
942            &mut state.variables,
943            step_result.step.as_ref(),
944            &state.sorted_vars,
945        );
946
947        // Compute new cost (residual only, no Jacobian needed for step evaluation)
948        let new_residual = problem.compute_residual_sparse(&state.variables)?;
949        let new_cost = optimizer::compute_cost(&new_residual);
950
951        // Compute cost reduction
952        let cost_reduction = state.current_cost - new_cost;
953
954        // Update current cost
955        state.current_cost = new_cost;
956
957        Ok(CostEvaluation {
958            new_cost,
959            cost_reduction,
960        })
961    }
962
963    /// Create optimization summary
964    ///
965    /// # Design Note
966    /// This internal summary builder accepts individual scalar results from the optimization loop.
967    /// A struct parameter would not improve readability for this private method.
968    #[allow(clippy::too_many_arguments)]
969    fn create_summary(
970        &self,
971        initial_cost: f64,
972        final_cost: f64,
973        iterations: usize,
974        max_gradient_norm: f64,
975        final_gradient_norm: f64,
976        max_parameter_update_norm: f64,
977        final_parameter_update_norm: f64,
978        total_cost_reduction: f64,
979        total_time: time::Duration,
980        iteration_history: Vec<IterationStats>,
981        convergence_status: optimizer::OptimizationStatus,
982    ) -> GaussNewtonSummary {
983        GaussNewtonSummary {
984            initial_cost,
985            final_cost,
986            iterations,
987            average_cost_reduction: if iterations > 0 {
988                total_cost_reduction / iterations as f64
989            } else {
990                0.0
991            },
992            max_gradient_norm,
993            final_gradient_norm,
994            max_parameter_update_norm,
995            final_parameter_update_norm,
996            total_time,
997            average_time_per_iteration: if iterations > 0 {
998                total_time / iterations as u32
999            } else {
1000                time::Duration::from_secs(0)
1001            },
1002            iteration_history,
1003            convergence_status,
1004        }
1005    }
1006
1007    pub fn optimize(
1008        &mut self,
1009        problem: &problem::Problem,
1010        initial_params: &collections::HashMap<
1011            String,
1012            (manifold::ManifoldType, nalgebra::DVector<f64>),
1013        >,
1014    ) -> Result<
1015        optimizer::SolverResult<collections::HashMap<String, problem::VariableEnum>>,
1016        error::ApexSolverError,
1017    > {
1018        let start_time = time::Instant::now();
1019        let mut iteration = 0;
1020        let mut cost_evaluations = 1; // Initial cost evaluation
1021        let mut jacobian_evaluations = 0;
1022
1023        // Initialize optimization state
1024        let mut state = self.initialize_optimization_state(problem, initial_params)?;
1025
1026        // Create linear solver
1027        let mut linear_solver = self.create_linear_solver();
1028
1029        // Initialize summary tracking variables
1030        let mut max_gradient_norm: f64 = 0.0;
1031        let mut max_parameter_update_norm: f64 = 0.0;
1032        let mut total_cost_reduction = 0.0;
1033        let mut final_gradient_norm;
1034        let mut final_parameter_update_norm;
1035
1036        // Initialize iteration statistics tracking
1037        let mut iteration_stats = Vec::with_capacity(self.config.max_iterations);
1038        let mut previous_cost = state.current_cost;
1039
1040        // Print configuration and header if debug level is enabled
1041        if tracing::enabled!(tracing::Level::DEBUG) {
1042            self.config.print_configuration();
1043            IterationStats::print_header();
1044        }
1045
1046        // Main optimization loop
1047        loop {
1048            let iter_start = time::Instant::now();
1049            // Evaluate residuals and Jacobian
1050            let (residuals, jacobian) = problem.compute_residual_and_jacobian_sparse(
1051                &state.variables,
1052                &state.variable_index_map,
1053                &state.symbolic_structure,
1054            )?;
1055            jacobian_evaluations += 1;
1056
1057            // Process Jacobian (apply scaling if enabled)
1058            let scaled_jacobian = if self.config.use_jacobi_scaling {
1059                self.process_jacobian(&jacobian, iteration)?
1060            } else {
1061                jacobian
1062            };
1063
1064            // Compute Gauss-Newton step
1065            let step_result = match self.compute_gauss_newton_step(
1066                &residuals,
1067                &scaled_jacobian,
1068                &mut linear_solver,
1069            ) {
1070                Some(result) => result,
1071                None => {
1072                    return Err(optimizer::OptimizerError::LinearSolveFailed(
1073                        "Linear solver failed to solve Gauss-Newton system".to_string(),
1074                    )
1075                    .into());
1076                }
1077            };
1078
1079            // Update tracking variables
1080            max_gradient_norm = max_gradient_norm.max(step_result.gradient_norm);
1081            final_gradient_norm = step_result.gradient_norm;
1082            let step_norm = step_result.step.norm_l2();
1083            max_parameter_update_norm = max_parameter_update_norm.max(step_norm);
1084            final_parameter_update_norm = step_norm;
1085
1086            // Capture cost before applying step (for convergence check)
1087            let cost_before_step = state.current_cost;
1088
1089            // Apply step and evaluate new cost
1090            let cost_eval = self.apply_step_and_evaluate_cost(&step_result, &mut state, problem)?;
1091            cost_evaluations += 1;
1092            total_cost_reduction += cost_eval.cost_reduction;
1093
1094            // OPTIMIZATION: Only collect iteration statistics if debug level is enabled
1095            // This eliminates ~2-5ms overhead per iteration for non-debug optimization
1096            if tracing::enabled!(tracing::Level::DEBUG) {
1097                let iter_elapsed_ms = iter_start.elapsed().as_secs_f64() * 1000.0;
1098                let total_elapsed_ms = start_time.elapsed().as_secs_f64() * 1000.0;
1099
1100                let stats = IterationStats {
1101                    iteration,
1102                    cost: state.current_cost,
1103                    cost_change: previous_cost - state.current_cost,
1104                    gradient_norm: step_result.gradient_norm,
1105                    step_norm,
1106                    tr_ratio: 0.0,  // Not used in Gauss-Newton
1107                    tr_radius: 0.0, // Not used in Gauss-Newton
1108                    ls_iter: 0,     // Direct solver (Cholesky) has no iterations
1109                    iter_time_ms: iter_elapsed_ms,
1110                    total_time_ms: total_elapsed_ms,
1111                    accepted: true, // Gauss-Newton always accepts steps
1112                };
1113
1114                iteration_stats.push(stats.clone());
1115                stats.print_line();
1116            }
1117
1118            previous_cost = state.current_cost;
1119
1120            // Notify all observers with current state
1121            // First set metrics data, then notify observers
1122            self.observers.set_iteration_metrics(
1123                state.current_cost,
1124                step_result.gradient_norm,
1125                None, // Gauss-Newton doesn't use damping
1126                step_norm,
1127                None, // Gauss-Newton doesn't use step quality
1128            );
1129
1130            // Set matrix data if available and there are observers
1131            if !self.observers.is_empty()
1132                && let (Some(hessian), Some(gradient)) =
1133                    (linear_solver.get_hessian(), linear_solver.get_gradient())
1134            {
1135                self.observers
1136                    .set_matrix_data(Some(hessian.clone()), Some(gradient.clone()));
1137            }
1138
1139            // Notify observers with current variable values and iteration number
1140            self.observers.notify(&state.variables, iteration);
1141
1142            // Compute parameter norm for convergence check
1143            let parameter_norm = Self::compute_parameter_norm(&state.variables);
1144
1145            // Check convergence using comprehensive termination criteria
1146            let elapsed = start_time.elapsed();
1147            if let Some(status) = self.check_convergence(
1148                iteration,
1149                cost_before_step,
1150                cost_eval.new_cost,
1151                parameter_norm,
1152                step_norm,
1153                step_result.gradient_norm,
1154                elapsed,
1155            ) {
1156                let summary = self.create_summary(
1157                    state.initial_cost,
1158                    state.current_cost,
1159                    iteration + 1,
1160                    max_gradient_norm,
1161                    final_gradient_norm,
1162                    max_parameter_update_norm,
1163                    final_parameter_update_norm,
1164                    total_cost_reduction,
1165                    elapsed,
1166                    iteration_stats.clone(),
1167                    status.clone(),
1168                );
1169
1170                // Print summary only if debug level is enabled
1171                if tracing::enabled!(tracing::Level::DEBUG) {
1172                    debug!("{}", summary);
1173                }
1174
1175                // Compute covariances if enabled
1176                let covariances = if self.config.compute_covariances {
1177                    problem.compute_and_set_covariances(
1178                        &mut linear_solver,
1179                        &mut state.variables,
1180                        &state.variable_index_map,
1181                    )
1182                } else {
1183                    None
1184                };
1185
1186                return Ok(optimizer::SolverResult {
1187                    status,
1188                    iterations: iteration + 1,
1189                    initial_cost: state.initial_cost,
1190                    final_cost: state.current_cost,
1191                    parameters: state.variables.into_iter().collect(),
1192                    elapsed_time: elapsed,
1193                    convergence_info: Some(optimizer::ConvergenceInfo {
1194                        final_gradient_norm,
1195                        final_parameter_update_norm,
1196                        cost_evaluations,
1197                        jacobian_evaluations,
1198                    }),
1199                    covariances,
1200                });
1201            }
1202
1203            iteration += 1;
1204        }
1205    }
1206}
1207
1208impl optimizer::Solver for GaussNewton {
1209    type Config = GaussNewtonConfig;
1210    type Error = error::ApexSolverError;
1211
1212    fn new() -> Self {
1213        Self::default()
1214    }
1215
1216    fn optimize(
1217        &mut self,
1218        problem: &problem::Problem,
1219        initial_params: &std::collections::HashMap<
1220            String,
1221            (manifold::ManifoldType, nalgebra::DVector<f64>),
1222        >,
1223    ) -> Result<
1224        optimizer::SolverResult<std::collections::HashMap<String, problem::VariableEnum>>,
1225        Self::Error,
1226    > {
1227        self.optimize(problem, initial_params)
1228    }
1229}
1230
1231#[cfg(test)]
1232mod tests {
1233    use crate::{core::problem, factors, manifold, optimizer};
1234    use nalgebra::dvector;
1235    use std::collections;
1236
1237    /// Custom Rosenbrock Factor 1: r1 = 10(x2 - x1²)
1238    /// Demonstrates extensibility - custom factors can be defined outside of factors.rs
1239    #[derive(Debug, Clone)]
1240    struct RosenbrockFactor1;
1241
1242    impl factors::Factor for RosenbrockFactor1 {
1243        fn linearize(
1244            &self,
1245            params: &[nalgebra::DVector<f64>],
1246            compute_jacobian: bool,
1247        ) -> (nalgebra::DVector<f64>, Option<nalgebra::DMatrix<f64>>) {
1248            let x1 = params[0][0];
1249            let x2 = params[1][0];
1250
1251            // Residual: r1 = 10(x2 - x1²)
1252            let residual = nalgebra::dvector![10.0 * (x2 - x1 * x1)];
1253
1254            // Jacobian: ∂r1/∂x1 = -20*x1, ∂r1/∂x2 = 10
1255            let jacobian = if compute_jacobian {
1256                let mut jac = nalgebra::DMatrix::zeros(1, 2);
1257                jac[(0, 0)] = -20.0 * x1;
1258                jac[(0, 1)] = 10.0;
1259                Some(jac)
1260            } else {
1261                None
1262            };
1263
1264            (residual, jacobian)
1265        }
1266
1267        fn get_dimension(&self) -> usize {
1268            1
1269        }
1270    }
1271
1272    /// Custom Rosenbrock Factor 2: r2 = 1 - x1
1273    /// Demonstrates extensibility - custom factors can be defined outside of factors.rs
1274    #[derive(Debug, Clone)]
1275    struct RosenbrockFactor2;
1276
1277    impl factors::Factor for RosenbrockFactor2 {
1278        fn linearize(
1279            &self,
1280            params: &[nalgebra::DVector<f64>],
1281            compute_jacobian: bool,
1282        ) -> (nalgebra::DVector<f64>, Option<nalgebra::DMatrix<f64>>) {
1283            let x1 = params[0][0];
1284
1285            // Residual: r2 = 1 - x1
1286            let residual = nalgebra::dvector![1.0 - x1];
1287
1288            // Jacobian: ∂r2/∂x1 = -1
1289            let jacobian = if compute_jacobian {
1290                Some(nalgebra::DMatrix::from_element(1, 1, -1.0))
1291            } else {
1292                None
1293            };
1294
1295            (residual, jacobian)
1296        }
1297
1298        fn get_dimension(&self) -> usize {
1299            1
1300        }
1301    }
1302
1303    #[test]
1304    fn test_rosenbrock_optimization() -> Result<(), Box<dyn std::error::Error>> {
1305        // Rosenbrock function test:
1306        // Minimize: r1² + r2² where
1307        //   r1 = 10(x2 - x1²)
1308        //   r2 = 1 - x1
1309        // Starting point: [-1.2, 1.0]
1310        // Expected minimum: [1.0, 1.0]
1311
1312        let mut problem = problem::Problem::new();
1313        let mut initial_values = collections::HashMap::new();
1314
1315        // Add variables using Rn manifold (Euclidean space)
1316        initial_values.insert(
1317            "x1".to_string(),
1318            (manifold::ManifoldType::RN, dvector![-1.2]),
1319        );
1320        initial_values.insert(
1321            "x2".to_string(),
1322            (manifold::ManifoldType::RN, dvector![1.0]),
1323        );
1324
1325        // Add custom factors (demonstrates extensibility!)
1326        problem.add_residual_block(&["x1", "x2"], Box::new(RosenbrockFactor1), None);
1327        problem.add_residual_block(&["x1"], Box::new(RosenbrockFactor2), None);
1328
1329        // Configure Gauss-Newton optimizer
1330        let config = optimizer::gauss_newton::GaussNewtonConfig::new()
1331            .with_max_iterations(100)
1332            .with_cost_tolerance(1e-8)
1333            .with_parameter_tolerance(1e-8)
1334            .with_gradient_tolerance(1e-10);
1335
1336        let mut solver = optimizer::GaussNewton::with_config(config);
1337        let result = solver.optimize(&problem, &initial_values)?;
1338
1339        // Extract final values
1340        let x1_final = result
1341            .parameters
1342            .get("x1")
1343            .ok_or("x1 not found")?
1344            .to_vector()[0];
1345        let x2_final = result
1346            .parameters
1347            .get("x2")
1348            .ok_or("x2 not found")?
1349            .to_vector()[0];
1350
1351        // Verify convergence to [1.0, 1.0]
1352        assert!(
1353            matches!(
1354                result.status,
1355                optimizer::OptimizationStatus::Converged
1356                    | optimizer::OptimizationStatus::CostToleranceReached
1357                    | optimizer::OptimizationStatus::ParameterToleranceReached
1358                    | optimizer::OptimizationStatus::GradientToleranceReached
1359            ),
1360            "Optimization should converge"
1361        );
1362        assert!(
1363            (x1_final - 1.0).abs() < 1e-4,
1364            "x1 should converge to 1.0, got {}",
1365            x1_final
1366        );
1367        assert!(
1368            (x2_final - 1.0).abs() < 1e-4,
1369            "x2 should converge to 1.0, got {}",
1370            x2_final
1371        );
1372        assert!(
1373            result.final_cost < 1e-6,
1374            "Final cost should be near zero, got {}",
1375            result.final_cost
1376        );
1377        Ok(())
1378    }
1379}