scirs2_integrate/ode/utils/jacobian/
newton.rs

1//! Newton solver for nonlinear equations
2//!
3//! This module provides Newton-type solvers for nonlinear equations
4//! that arise in implicit ODE solvers. It includes standard Newton's method,
5//! modified Newton's method, and inexact Newton variants.
6
7use crate::common::IntegrateFloat;
8use crate::error::{IntegrateError, IntegrateResult};
9use crate::ode::utils::jacobian::JacobianManager;
10use crate::ode::utils::linear_solvers::{auto_solve_linear_system, LinearSolverType};
11use scirs2_core::ndarray::Array1;
12
13/// Newton solver parameters
14#[derive(Debug, Clone)]
15pub struct NewtonParameters<F: IntegrateFloat> {
16    /// Maximum number of iterations
17    pub max_iterations: usize,
18    /// Absolute tolerance for convergence
19    pub abs_tolerance: F,
20    /// Relative tolerance for convergence
21    pub rel_tolerance: F,
22    /// Jacobian update frequency (1 = every iteration)
23    pub jacobian_update_freq: usize,
24    /// Step size damping factor (1.0 = full steps)
25    pub damping_factor: F,
26    /// Minimum allowed step size factor
27    pub min_damping: F,
28    /// Reuse Jacobian from previous solve
29    pub reuse_jacobian: bool,
30    /// Force Jacobian update on first iteration
31    pub force_jacobian_init: bool,
32}
33
34impl<F: IntegrateFloat> Default for NewtonParameters<F> {
35    fn default() -> Self {
36        NewtonParameters {
37            max_iterations: 10,
38            abs_tolerance: F::from_f64(1e-10).unwrap(),
39            rel_tolerance: F::from_f64(1e-8).unwrap(),
40            jacobian_update_freq: 1,
41            damping_factor: F::one(),
42            min_damping: F::from_f64(0.1).unwrap(),
43            reuse_jacobian: true,
44            force_jacobian_init: false,
45        }
46    }
47}
48
49/// Result of a Newton solve
50#[derive(Debug, Clone)]
51pub struct NewtonResult<F: IntegrateFloat> {
52    /// Solution vector
53    pub solution: Array1<F>,
54    /// Residual at solution
55    pub residual: Array1<F>,
56    /// Number of iterations performed
57    pub iterations: usize,
58    /// Convergence flag
59    pub converged: bool,
60    /// Error estimate
61    pub error_estimate: F,
62    /// Number of function evaluations
63    pub func_evals: usize,
64    /// Number of Jacobian evaluations
65    pub jac_evals: usize,
66    /// Number of linear solves
67    pub linear_solves: usize,
68}
69
70/// Solve a nonlinear system F(x) = 0 using Newton's method
71///
72/// This version uses the improved Jacobian handling for performance
73#[allow(clippy::explicit_counter_loop)]
74#[allow(dead_code)]
75pub fn newton_solve<F, Func>(
76    f: Func,
77    x0: Array1<F>,
78    jac_manager: &mut JacobianManager<F>,
79    params: NewtonParameters<F>,
80) -> IntegrateResult<NewtonResult<F>>
81where
82    F: IntegrateFloat + std::default::Default,
83    Func: Fn(&Array1<F>) -> Array1<F>,
84{
85    let _n = x0.len();
86    let mut x = x0.clone();
87    let mut residual = f(&x);
88    let mut func_evals = 1;
89    let mut jac_evals = 0;
90    let mut linear_solves = 0;
91
92    // Calculate initial error
93    let mut error = calculate_error(&residual, &params);
94
95    // Check if already converged
96    if error <= params.abs_tolerance {
97        return Ok(NewtonResult {
98            solution: x,
99            residual,
100            iterations: 0,
101            converged: true,
102            error_estimate: error,
103            func_evals,
104            jac_evals,
105            linear_solves,
106        });
107    }
108
109    // Create dummy timepoint - for ODE Jacobians we typically need time,
110    // but for generic Newton solve we can use a dummy value
111    let dummy_t = F::zero();
112
113    // Iteration loop
114    for iter in 0..params.max_iterations {
115        // Update Jacobian if needed
116        if iter == 0 && params.force_jacobian_init {
117            // Force Jacobian update on first iteration if requested
118            jac_manager.update_jacobian(dummy_t, &x, &|_t, y| f(&y.to_owned()), None)?;
119            jac_evals += 1;
120        } else if iter > 0 && iter % params.jacobian_update_freq == 0 {
121            // Update Jacobian on specified frequency
122            jac_manager.update_jacobian(dummy_t, &x, &|_t, y| f(&y.to_owned()), None)?;
123            jac_evals += 1;
124        } else if jac_manager.jacobian().is_none() {
125            // Always update if no Jacobian exists
126            jac_manager.update_jacobian(dummy_t, &x, &|_t, y| f(&y.to_owned()), None)?;
127            jac_evals += 1;
128        }
129
130        // Get the Jacobian
131        let jacobian = jac_manager.jacobian().unwrap();
132
133        // Solve the linear system J * dx = -F(x) with optimized solver
134        let neg_residual = residual.clone() * F::from_f64(-1.0).unwrap();
135
136        // Use auto solver selection for the best performance
137        let dx = auto_solve_linear_system(
138            &jacobian.view(),
139            &neg_residual.view(),
140            LinearSolverType::Direct,
141        )?;
142        linear_solves += 1;
143
144        // Apply damping if needed
145        let mut damping = params.damping_factor;
146        let mut x_new = x.clone() + &dx * damping;
147        let mut residual_new = f(&x_new);
148        func_evals += 1;
149
150        // Calculate new error
151        let mut error_new = calculate_error(&residual_new, &params);
152
153        // Try to reduce step size if error increased
154        // Simple backtracking line search
155        if error_new > error && damping > params.min_damping {
156            let mut backtrack_count = 0;
157
158            while error_new > error && damping > params.min_damping && backtrack_count < 5 {
159                damping *= F::from_f64(0.5).unwrap();
160                x_new = x.clone() + &dx * damping;
161                residual_new = f(&x_new);
162                func_evals += 1;
163                error_new = calculate_error(&residual_new, &params);
164                backtrack_count += 1;
165            }
166        }
167
168        // Update current point
169        x = x_new;
170        residual = residual_new;
171        error = error_new;
172
173        // Check convergence
174        if error <= params.abs_tolerance || (error <= params.rel_tolerance * calculate_norm(&x)) {
175            return Ok(NewtonResult {
176                solution: x,
177                residual,
178                iterations: iter + 1,
179                converged: true,
180                error_estimate: error,
181                func_evals,
182                jac_evals,
183                linear_solves,
184            });
185        }
186    }
187
188    // Failed to converge
189    Err(IntegrateError::ConvergenceError(format!(
190        "Newton's method failed to converge in {} iterations (error = {:.2e})",
191        params.max_iterations, error,
192    )))
193}
194
195/// Modified Newton solve that reuses the same Jacobian for multiple iterations
196#[allow(dead_code)]
197pub fn modified_newton_solve<F, Func>(
198    f: Func,
199    x0: Array1<F>,
200    jac_manager: &mut JacobianManager<F>,
201    params: NewtonParameters<F>,
202) -> IntegrateResult<NewtonResult<F>>
203where
204    F: IntegrateFloat + std::default::Default,
205    Func: Fn(&Array1<F>) -> Array1<F>,
206{
207    // Set large Jacobian update frequency
208    let modified_params = NewtonParameters {
209        jacobian_update_freq: params.max_iterations + 1, // Only update at beginning
210        ..params
211    };
212
213    // Use standard Newton with modified parameters
214    newton_solve(f, x0, jac_manager, modified_params)
215}
216
217/// Calculate error norm of residual vector
218#[allow(dead_code)]
219fn calculate_error<F: IntegrateFloat>(residual: &Array1<F>, params: &NewtonParameters<F>) -> F {
220    // Use L-infinity norm (max absolute value)
221    let mut max_abs = F::zero();
222    for &r in residual.iter() {
223        max_abs = max_abs.max(r.abs());
224    }
225    max_abs
226}
227
228/// Calculate norm of a vector (for relative convergence)
229#[allow(dead_code)]
230fn calculate_norm<F: IntegrateFloat>(x: &Array1<F>) -> F {
231    // Use L-infinity norm
232    let mut max_abs = F::zero();
233    for &val in x.iter() {
234        max_abs = max_abs.max(val.abs());
235    }
236    max_abs
237}