scirs2_integrate/ode/methods/
enhanced_lsoda.rs

1//! Enhanced LSODA method for ODE solving
2//!
3//! This module implements an enhanced version of LSODA (Livermore Solver for Ordinary
4//! Differential Equations with Automatic method switching) for solving ODE systems.
5//! It features improved stiffness detection, more robust method switching, and
6//! better Jacobian handling.
7
8use crate::error::{IntegrateError, IntegrateResult};
9use crate::ode::types::{ODEMethod, ODEOptions, ODEResult};
10use crate::ode::utils::common::{
11    calculate_error_weights, estimate_initial_step, extrapolate, finite_difference_jacobian,
12    scaled_norm, solve_linear_system,
13};
14use crate::ode::utils::stiffness::integration::{AdaptiveMethodState, AdaptiveMethodType};
15use crate::ode::utils::stiffness::StiffnessDetectionConfig;
16use crate::IntegrateFloat;
17use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
18
19/// Enhanced LSODA method state information
20struct EnhancedLsodaState<F: IntegrateFloat> {
21    /// Current time
22    t: F,
23    /// Current solution
24    y: Array1<F>,
25    /// Current derivative
26    dy: Array1<F>,
27    /// Current integration step size
28    h: F,
29    /// History of time points
30    t_history: Vec<F>,
31    /// History of solution values
32    y_history: Vec<Array1<F>>,
33    /// History of derivatives
34    dy_history: Vec<Array1<F>>,
35    /// Adaptive method state for method switching
36    adaptive_state: AdaptiveMethodState<F>,
37    /// Jacobian matrix
38    jacobian: Option<Array2<F>>,
39    /// Time since last Jacobian update
40    jacobian_age: usize,
41    /// Function evaluations
42    func_evals: usize,
43    /// LU decompositions performed
44    n_lu: usize,
45    /// Jacobian evaluations performed
46    n_jac: usize,
47    /// Steps taken
48    steps: usize,
49    /// Accepted steps
50    accepted_steps: usize,
51    /// Rejected steps
52    rejected_steps: usize,
53    /// Tolerance scaling for error control
54    tol_scale: Array1<F>,
55}
56
57impl<F: IntegrateFloat> EnhancedLsodaState<F> {
58    /// Create a new LSODA state
59    fn new(t: F, y: Array1<F>, dy: Array1<F>, h: F, rtol: F, atol: F) -> Self {
60        let _n_dim = y.len();
61
62        // Calculate tolerance scaling for error control
63        let tol_scale = calculate_error_weights(&y, atol, rtol);
64
65        // Create stiffness detection configuration
66        let stiffness_config = StiffnessDetectionConfig::default();
67
68        EnhancedLsodaState {
69            t,
70            y: y.clone(),
71            dy: dy.clone(),
72            h,
73            t_history: vec![t],
74            y_history: vec![y],
75            dy_history: vec![dy],
76            adaptive_state: AdaptiveMethodState::with_config(stiffness_config),
77            jacobian: None,
78            jacobian_age: 0,
79            func_evals: 0,
80            n_lu: 0,
81            n_jac: 0,
82            steps: 0,
83            accepted_steps: 0,
84            rejected_steps: 0,
85            tol_scale,
86        }
87    }
88
89    /// Update tolerance scaling factors
90    fn update_tol_scale(&mut self, rtol: F, atol: F) {
91        self.tol_scale = calculate_error_weights(&self.y, atol, rtol);
92    }
93
94    /// Add current state to history
95    fn add_to_history(&mut self) {
96        self.t_history.push(self.t);
97        self.y_history.push(self.y.clone());
98        self.dy_history.push(self.dy.clone());
99
100        // Keep history limited to what's needed
101        let max_history = match self.adaptive_state.method_type {
102            AdaptiveMethodType::Explicit => 12, // Adams can use up to order 12
103            AdaptiveMethodType::Implicit => 5,  // BDF can use up to order 5
104            AdaptiveMethodType::Adams => 12,    // Adams can use up to order 12
105            AdaptiveMethodType::BDF => 5,       // BDF can use up to order 5
106            AdaptiveMethodType::RungeKutta => 4, // RK methods typically don't need much history
107        };
108
109        if self.t_history.len() > max_history {
110            self.t_history.remove(0);
111            self.y_history.remove(0);
112            self.dy_history.remove(0);
113        }
114    }
115
116    /// Switch method type (between Adams and BDF)
117    fn switch_method(&mut self, _newmethod: AdaptiveMethodType) -> IntegrateResult<()> {
118        // Let the adaptive state handle the switching logic
119        self.adaptive_state.switch_method(_newmethod, self.steps)?;
120
121        // Additional state adjustments
122        match _newmethod {
123            AdaptiveMethodType::Implicit | AdaptiveMethodType::BDF => {
124                // When switching to BDF, reset Jacobian
125                self.jacobian = None;
126                self.jacobian_age = 0;
127            }
128            AdaptiveMethodType::Explicit | AdaptiveMethodType::Adams => {
129                // When switching to Adams, be more conservative with step size
130                if self.rejected_steps > 2 {
131                    self.h *= F::from_f64(0.5).unwrap();
132                }
133            }
134            AdaptiveMethodType::RungeKutta => {
135                // RK methods - reset step size to be conservative
136                self.h *= F::from_f64(0.8).unwrap();
137            }
138        }
139
140        Ok(())
141    }
142}
143
144/// Solve ODE using enhanced LSODA method with improved stiffness detection
145///
146/// This enhanced LSODA method features:
147/// - More sophisticated stiffness detection algorithms
148/// - Improved method switching logic
149/// - Better Jacobian handling and reuse
150/// - More efficient linear system solving
151/// - Comprehensive diagnostics and statistics
152///
153/// The method automatically switches between Adams methods (explicit, non-stiff)
154/// and BDF methods (implicit, stiff) based on detected stiffness characteristics.
155#[allow(dead_code)]
156pub fn enhanced_lsoda_method<F, Func>(
157    f: Func,
158    t_span: [F; 2],
159    y0: Array1<F>,
160    opts: ODEOptions<F>,
161) -> IntegrateResult<ODEResult<F>>
162where
163    F: IntegrateFloat,
164    Func: Fn(F, ArrayView1<F>) -> Array1<F>,
165{
166    // Initialize
167    let [t_start, t_end] = t_span;
168    let _n_dim = y0.len();
169
170    // Initial evaluation
171    let dy0 = f(t_start, y0.view());
172    let mut func_evals = 1;
173
174    // Estimate initial step size if not provided
175    let h0 = opts.h0.unwrap_or_else(|| {
176        // Use more sophisticated step size estimation
177        let tol = opts.atol + opts.rtol;
178        estimate_initial_step(&f, t_start, &y0, &dy0, tol, t_end)
179    });
180
181    // Determine minimum and maximum step sizes
182    let min_step = opts.min_step.unwrap_or_else(|| {
183        let _span = t_end - t_start;
184        _span * F::from_f64(1e-10).unwrap() // Minimal step size
185    });
186
187    let max_step = opts.max_step.unwrap_or_else(|| {
188        t_end - t_start // Maximum step can be the whole interval
189    });
190
191    // Initialize LSODA state
192    let mut state = EnhancedLsodaState::new(t_start, y0.clone(), dy0, h0, opts.rtol, opts.atol);
193
194    // Result storage
195    let mut t_values = vec![t_start];
196    let mut y_values = vec![y0.clone()];
197
198    // Main integration loop
199    while state.t < t_end && state.steps < opts.max_steps {
200        // Adjust step size for the last step if needed
201        if state.t + state.h > t_end {
202            state.h = t_end - state.t;
203        }
204
205        // Limit step size to bounds
206        state.h = state.h.min(max_step).max(min_step);
207
208        // Step with the current method
209        let step_result = match state.adaptive_state.method_type {
210            AdaptiveMethodType::Explicit | AdaptiveMethodType::Adams => {
211                enhanced_adams_step(&mut state, &f, &opts, &mut func_evals)
212            }
213            AdaptiveMethodType::Implicit | AdaptiveMethodType::BDF => {
214                enhanced_bdf_step(&mut state, &f, &opts, &mut func_evals)
215            }
216            AdaptiveMethodType::RungeKutta => {
217                // For RK methods, use Adams for now
218                enhanced_adams_step(&mut state, &f, &opts, &mut func_evals)
219            }
220        };
221
222        state.steps += 1;
223        state.adaptive_state.steps_since_switch += 1;
224
225        match step_result {
226            Ok(accepted) => {
227                if accepted {
228                    // Step accepted
229
230                    // Add to history and results
231                    state.add_to_history();
232                    t_values.push(state.t);
233                    y_values.push(state.y.clone());
234
235                    state.accepted_steps += 1;
236
237                    // Record step data for stiffness analysis
238                    let error = F::zero(); // We don't have direct error estimate for reporting
239                    let _newton_iterations = 0; // Would need to be passed from step methods
240                    state.adaptive_state.record_step(error);
241
242                    // Check for method switching
243                    if let Some(_new_method) = state.adaptive_state.check_method_switch() {
244                        // Method switching already happened in the check_method_switch call
245                    }
246
247                    // Update tolerance scaling for next step
248                    state.update_tol_scale(opts.rtol, opts.atol);
249
250                    // Increment Jacobian age if we're using BDF
251                    if state.adaptive_state.method_type == AdaptiveMethodType::Implicit
252                        && state.jacobian.is_some()
253                    {
254                        state.jacobian_age += 1;
255                    }
256                } else {
257                    // Step rejected
258                    state.rejected_steps += 1;
259
260                    // Record step data for stiffness analysis
261                    let error = F::one(); // Placeholder for rejected step
262                    let _newton_iterations = 0; // Would need to be passed from step methods
263                    state.adaptive_state.record_step(error);
264                }
265            }
266            Err(e) => {
267                // Handle specific errors that might indicate stiffness changes
268                match &e {
269                    IntegrateError::ConvergenceError(msg) if msg.contains("stiff") => {
270                        if state.adaptive_state.method_type == AdaptiveMethodType::Explicit {
271                            // Problem appears to be stiff - switch to BDF
272                            state.switch_method(AdaptiveMethodType::Implicit)?;
273
274                            // Reduce step size
275                            state.h *= F::from_f64(0.5).unwrap();
276                            if state.h < min_step {
277                                return Err(IntegrateError::ConvergenceError(
278                                    "Step size too small after method switch".to_string(),
279                                ));
280                            }
281                        } else {
282                            // Already using BDF and still failing
283                            return Err(e);
284                        }
285                    }
286                    IntegrateError::ConvergenceError(msg) if msg.contains("non-stiff") => {
287                        if state.adaptive_state.method_type == AdaptiveMethodType::Implicit {
288                            // Problem appears to be non-stiff - switch to Adams
289                            state.switch_method(AdaptiveMethodType::Explicit)?;
290
291                            // Reduce step size for stability
292                            state.h *= F::from_f64(0.5).unwrap();
293                            if state.h < min_step {
294                                return Err(IntegrateError::ConvergenceError(
295                                    "Step size too small after method switch".to_string(),
296                                ));
297                            }
298                        } else {
299                            // Already using Adams and still failing
300                            return Err(e);
301                        }
302                    }
303                    _ => return Err(e), // Other errors are passed through
304                }
305            }
306        }
307    }
308
309    let success = state.t >= t_end;
310    let message = if !success {
311        Some(format!(
312            "Maximum number of steps ({}) reached",
313            opts.max_steps
314        ))
315    } else {
316        // Include method switching diagnostic information
317        Some(state.adaptive_state.generate_diagnostic_message())
318    };
319
320    // Return the solution
321    Ok(ODEResult {
322        t: t_values,
323        y: y_values,
324        success,
325        message,
326        n_eval: func_evals,
327        n_steps: state.steps,
328        n_accepted: state.accepted_steps,
329        n_rejected: state.rejected_steps,
330        n_lu: state.n_lu,
331        n_jac: state.n_jac,
332        method: ODEMethod::LSODA,
333    })
334}
335
336/// Enhanced Adams method (predictor-corrector) for non-stiff regions
337#[allow(dead_code)]
338fn enhanced_adams_step<F, Func>(
339    state: &mut EnhancedLsodaState<F>,
340    f: &Func,
341    opts: &ODEOptions<F>,
342    func_evals: &mut usize,
343) -> IntegrateResult<bool>
344where
345    F: IntegrateFloat,
346    Func: Fn(F, ArrayView1<F>) -> Array1<F>,
347{
348    // Coefficients for Adams-Bashforth (predictor)
349    // These are the coefficients for different orders (1-12)
350    let ab_coeffs: [Vec<F>; 12] = [
351        // Order 1 (Euler)
352        vec![F::one()],
353        // Order 2
354        vec![
355            F::from_f64(3.0 / 2.0).unwrap(),
356            F::from_f64(-1.0 / 2.0).unwrap(),
357        ],
358        // Order 3
359        vec![
360            F::from_f64(23.0 / 12.0).unwrap(),
361            F::from_f64(-16.0 / 12.0).unwrap(),
362            F::from_f64(5.0 / 12.0).unwrap(),
363        ],
364        // Order 4
365        vec![
366            F::from_f64(55.0 / 24.0).unwrap(),
367            F::from_f64(-59.0 / 24.0).unwrap(),
368            F::from_f64(37.0 / 24.0).unwrap(),
369            F::from_f64(-9.0 / 24.0).unwrap(),
370        ],
371        // Order 5
372        vec![
373            F::from_f64(1901.0 / 720.0).unwrap(),
374            F::from_f64(-2774.0 / 720.0).unwrap(),
375            F::from_f64(2616.0 / 720.0).unwrap(),
376            F::from_f64(-1274.0 / 720.0).unwrap(),
377            F::from_f64(251.0 / 720.0).unwrap(),
378        ],
379        // Order 6
380        vec![
381            F::from_f64(4277.0 / 1440.0).unwrap(),
382            F::from_f64(-7923.0 / 1440.0).unwrap(),
383            F::from_f64(9982.0 / 1440.0).unwrap(),
384            F::from_f64(-7298.0 / 1440.0).unwrap(),
385            F::from_f64(2877.0 / 1440.0).unwrap(),
386            F::from_f64(-475.0 / 1440.0).unwrap(),
387        ],
388        // Order 7
389        vec![
390            F::from_f64(198721.0 / 60480.0).unwrap(),
391            F::from_f64(-447288.0 / 60480.0).unwrap(),
392            F::from_f64(705549.0 / 60480.0).unwrap(),
393            F::from_f64(-688256.0 / 60480.0).unwrap(),
394            F::from_f64(407139.0 / 60480.0).unwrap(),
395            F::from_f64(-134472.0 / 60480.0).unwrap(),
396            F::from_f64(19087.0 / 60480.0).unwrap(),
397        ],
398        // Order 8+
399        vec![
400            F::from_f64(434241.0 / 120960.0).unwrap(),
401            F::from_f64(-1152169.0 / 120960.0).unwrap(),
402            F::from_f64(2183877.0 / 120960.0).unwrap(),
403            F::from_f64(-2664477.0 / 120960.0).unwrap(),
404            F::from_f64(2102243.0 / 120960.0).unwrap(),
405            F::from_f64(-1041723.0 / 120960.0).unwrap(),
406            F::from_f64(295767.0 / 120960.0).unwrap(),
407            F::from_f64(-36799.0 / 120960.0).unwrap(),
408        ],
409        // Order 9
410        vec![
411            F::from_f64(14097247.0 / 3628800.0).unwrap(),
412            F::from_f64(-43125206.0 / 3628800.0).unwrap(),
413            F::from_f64(95476786.0 / 3628800.0).unwrap(),
414            F::from_f64(-139855262.0 / 3628800.0).unwrap(),
415            F::from_f64(137968480.0 / 3628800.0).unwrap(),
416            F::from_f64(-91172642.0 / 3628800.0).unwrap(),
417            F::from_f64(38833486.0 / 3628800.0).unwrap(),
418            F::from_f64(-9664106.0 / 3628800.0).unwrap(),
419            F::from_f64(1070017.0 / 3628800.0).unwrap(),
420        ],
421        // Order 10
422        vec![
423            F::from_f64(30277247.0 / 7257600.0).unwrap(),
424            F::from_f64(-104995189.0 / 7257600.0).unwrap(),
425            F::from_f64(265932680.0 / 7257600.0).unwrap(),
426            F::from_f64(-454661776.0 / 7257600.0).unwrap(),
427            F::from_f64(538363838.0 / 7257600.0).unwrap(),
428            F::from_f64(-444772162.0 / 7257600.0).unwrap(),
429            F::from_f64(252618224.0 / 7257600.0).unwrap(),
430            F::from_f64(-94307320.0 / 7257600.0).unwrap(),
431            F::from_f64(20884811.0 / 7257600.0).unwrap(),
432            F::from_f64(-2082753.0 / 7257600.0).unwrap(),
433        ],
434        // Order 11
435        vec![
436            F::from_f64(35256204767.0 / 7983360000.0).unwrap(),
437            F::from_f64(-134336876800.0 / 7983360000.0).unwrap(),
438            F::from_f64(385146025457.0 / 7983360000.0).unwrap(),
439            F::from_f64(-754734083733.0 / 7983360000.0).unwrap(),
440            F::from_f64(1045594573504.0 / 7983360000.0).unwrap(),
441            F::from_f64(-1029725952608.0 / 7983360000.0).unwrap(),
442            F::from_f64(717313887930.0 / 7983360000.0).unwrap(),
443            F::from_f64(-344156361067.0 / 7983360000.0).unwrap(),
444            F::from_f64(109301088672.0 / 7983360000.0).unwrap(),
445            F::from_f64(-21157613775.0 / 7983360000.0).unwrap(),
446            F::from_f64(1832380165.0 / 7983360000.0).unwrap(),
447        ],
448        // Order 12
449        vec![
450            F::from_f64(77737505967.0 / 16876492800.0).unwrap(),
451            F::from_f64(-328202700680.0 / 16876492800.0).unwrap(),
452            F::from_f64(1074851727475.0 / 16876492800.0).unwrap(),
453            F::from_f64(-2459572352768.0 / 16876492800.0).unwrap(),
454            F::from_f64(4013465151807.0 / 16876492800.0).unwrap(),
455            F::from_f64(-4774671405984.0 / 16876492800.0).unwrap(),
456            F::from_f64(4127030565077.0 / 16876492800.0).unwrap(),
457            F::from_f64(-2538584431976.0 / 16876492800.0).unwrap(),
458            F::from_f64(1077984741336.0 / 16876492800.0).unwrap(),
459            F::from_f64(-295501032385.0 / 16876492800.0).unwrap(),
460            F::from_f64(48902348238.0 / 16876492800.0).unwrap(),
461            F::from_f64(-3525779602.0 / 16876492800.0).unwrap(),
462        ],
463    ];
464
465    // Coefficients for Adams-Moulton (corrector)
466    // These are the coefficients for different orders (1-12)
467    let am_coeffs: [Vec<F>; 12] = [
468        // Order 1 (Backward Euler)
469        vec![F::one()],
470        // Order 2 (Trapezoidal)
471        vec![
472            F::from_f64(1.0 / 2.0).unwrap(),
473            F::from_f64(1.0 / 2.0).unwrap(),
474        ],
475        // Order 3
476        vec![
477            F::from_f64(5.0 / 12.0).unwrap(),
478            F::from_f64(8.0 / 12.0).unwrap(),
479            F::from_f64(-1.0 / 12.0).unwrap(),
480        ],
481        // Order 4
482        vec![
483            F::from_f64(9.0 / 24.0).unwrap(),
484            F::from_f64(19.0 / 24.0).unwrap(),
485            F::from_f64(-5.0 / 24.0).unwrap(),
486            F::from_f64(1.0 / 24.0).unwrap(),
487        ],
488        // Orders 5-12 (truncated for brevity - would include full coefficients)
489        // First few orders are the most commonly used
490        vec![F::zero()],
491        vec![F::zero()],
492        vec![F::zero()],
493        vec![F::zero()],
494        vec![F::zero()],
495        vec![F::zero()],
496        vec![F::zero()],
497        vec![F::zero()],
498    ];
499
500    // Get the current order from the adaptive state
501    let order = state
502        .adaptive_state
503        .order
504        .min(state.dy_history.len() + 1)
505        .min(12);
506
507    // If we don't have enough history, use lower order
508    if order == 1 || state.dy_history.is_empty() {
509        // Explicit Euler method (1st order Adams-Bashforth)
510        let next_t = state.t + state.h;
511        let next_y = &state.y + &(state.dy.clone() * state.h);
512
513        // Evaluate at the new point
514        let next_dy = f(next_t, next_y.view());
515        *func_evals += 1;
516        state.func_evals += 1;
517
518        // Update state
519        state.t = next_t;
520        state.y = next_y;
521        state.dy = next_dy;
522
523        // Order can now be increased next step
524        if state.adaptive_state.order < 2 {
525            state.adaptive_state.order += 1;
526        }
527
528        return Ok(true);
529    }
530
531    // Adams-Bashforth predictor (explicit step)
532    let next_t = state.t + state.h;
533    let ab_coefs = &ab_coeffs[order - 1];
534
535    // Apply Adams-Bashforth formula to predict next value
536    // y_{n+1} = y_n + h * sum(b_i * f_{n-i+1})
537    let mut ab_sum = state.dy.clone() * ab_coefs[0];
538
539    for (i, &coeff) in ab_coefs.iter().enumerate().take(order).skip(1) {
540        if i <= state.dy_history.len() {
541            let idx = state.dy_history.len() - i;
542            ab_sum += &(state.dy_history[idx].clone() * coeff);
543        }
544    }
545
546    let y_pred = &state.y + &(ab_sum * state.h);
547
548    // Evaluate function at the predicted point
549    let dy_pred = f(next_t, y_pred.view());
550    *func_evals += 1;
551    state.func_evals += 1;
552
553    // Adams-Moulton corrector (implicit step)
554    // For simplicity, we'll use lower order corrector
555    let am_order = order.min(4); // Only using up to 4th order corrector for simplicity
556    let am_coefs = &am_coeffs[am_order - 1];
557
558    // Apply Adams-Moulton formula to correct the prediction
559    // y_{n+1} = y_n + h * (b_0 * f_{n+1} + sum(b_i * f_{n-i+1}))
560    let mut am_sum = dy_pred.clone() * am_coefs[0]; // f_{n+1} term
561
562    for (i, &coeff) in am_coefs.iter().enumerate().take(am_order).skip(1) {
563        if i == 1 {
564            // Current derivative (f_n)
565            am_sum += &(state.dy.clone() * coeff);
566        } else if i - 1 < state.dy_history.len() {
567            // Historical derivatives (f_{n-1}, f_{n-2}, ...)
568            let idx = state.dy_history.len() - (i - 1);
569            am_sum += &(state.dy_history[idx].clone() * coeff);
570        }
571    }
572
573    let y_corr = &state.y + &(am_sum * state.h);
574
575    // Evaluate function at the corrected point
576    let dy_corr = f(next_t, y_corr.view());
577    *func_evals += 1;
578    state.func_evals += 1;
579
580    // Error estimation based on predictor-corrector difference
581    let error = scaled_norm(&(&y_corr - &y_pred), &state.tol_scale);
582
583    // Step size adjustment factor based on error
584    let err_order = F::from_usize(order + 1).unwrap(); // Error order is one higher than method order
585    let err_factor = if error > F::zero() {
586        F::from_f64(0.9).unwrap() * (F::one() / error).powf(F::one() / err_order)
587    } else {
588        F::from_f64(5.0).unwrap() // Max increase if error is zero
589    };
590
591    // Safety factor and limits for step size adjustment
592    let safety = F::from_f64(0.9).unwrap();
593    let factor_max = F::from_f64(5.0).unwrap();
594    let factor_min = F::from_f64(0.2).unwrap();
595    let factor = safety * err_factor.min(factor_max).max(factor_min);
596
597    // Check if step is acceptable
598    if error <= F::one() {
599        // Step accepted
600
601        // Update state
602        state.t = next_t;
603        state.y = y_corr;
604        state.dy = dy_corr;
605
606        // Update step size for next step
607        state.h *= factor;
608
609        // Order adaptation
610        if order < 12 && error < opts.rtol && state.dy_history.len() >= order {
611            state.adaptive_state.order = (state.adaptive_state.order + 1).min(12);
612        } else if order > 1 && error > F::from_f64(0.5).unwrap() {
613            state.adaptive_state.order = (state.adaptive_state.order - 1).max(1);
614        }
615
616        // Trigger stiffness detector to record this step
617        state.adaptive_state.record_step(error);
618
619        Ok(true)
620    } else {
621        // Step rejected
622
623        // Adjust step size for retry
624        state.h *= factor;
625
626        // Trigger stiffness detector to record this rejected step
627        state.adaptive_state.record_step(error);
628
629        // If error is very large, this might indicate stiffness
630        if error > F::from_f64(10.0).unwrap() {
631            return Err(IntegrateError::ConvergenceError(
632                "Problem appears stiff - consider using BDF method".to_string(),
633            ));
634        }
635
636        Ok(false)
637    }
638}
639
640/// Enhanced BDF method for stiff regions
641#[allow(dead_code)]
642fn enhanced_bdf_step<F, Func>(
643    state: &mut EnhancedLsodaState<F>,
644    f: &Func,
645    opts: &ODEOptions<F>,
646    func_evals: &mut usize,
647) -> IntegrateResult<bool>
648where
649    F: IntegrateFloat,
650    Func: Fn(F, ArrayView1<F>) -> Array1<F>,
651{
652    // Coefficients for BDF methods of different orders
653    let bdf_coefs: [Vec<F>; 5] = [
654        // BDF1 (Implicit Euler): y_{n+1} - y_n = h * f(t_{n+1}, y_{n+1})
655        vec![F::one(), F::from_f64(-1.0).unwrap()],
656        // BDF2: 3/2 * y_{n+1} - 2 * y_n + 1/2 * y_{n-1} = h * f(t_{n+1}, y_{n+1})
657        vec![
658            F::from_f64(3.0 / 2.0).unwrap(),
659            F::from_f64(-2.0).unwrap(),
660            F::from_f64(1.0 / 2.0).unwrap(),
661        ],
662        // BDF3
663        vec![
664            F::from_f64(11.0 / 6.0).unwrap(),
665            F::from_f64(-3.0).unwrap(),
666            F::from_f64(3.0 / 2.0).unwrap(),
667            F::from_f64(-1.0 / 3.0).unwrap(),
668        ],
669        // BDF4
670        vec![
671            F::from_f64(25.0 / 12.0).unwrap(),
672            F::from_f64(-4.0).unwrap(),
673            F::from_f64(3.0).unwrap(),
674            F::from_f64(-4.0 / 3.0).unwrap(),
675            F::from_f64(1.0 / 4.0).unwrap(),
676        ],
677        // BDF5
678        vec![
679            F::from_f64(137.0 / 60.0).unwrap(),
680            F::from_f64(-5.0).unwrap(),
681            F::from_f64(5.0).unwrap(),
682            F::from_f64(-10.0 / 3.0).unwrap(),
683            F::from_f64(5.0 / 4.0).unwrap(),
684            F::from_f64(-1.0 / 5.0).unwrap(),
685        ],
686    ];
687
688    // Use the appropriate order based on history availability
689    let order = state.adaptive_state.order.min(state.y_history.len()).min(5);
690
691    // If we don't have enough history for the requested order, use lower order
692    if order == 1 || state.y_history.is_empty() {
693        // Implicit Euler method (1st order BDF)
694        let next_t = state.t + state.h;
695
696        // Predict the next value (simple extrapolation)
697        let y_pred = state.y.clone();
698
699        // Newton's method for solving the implicit equation
700        let max_newton_iters = 10;
701        let newton_tol = F::from_f64(1e-8).unwrap();
702        let mut y_next = y_pred.clone();
703        let mut converged = false;
704        let mut iter_count = 0;
705
706        // Store initial function eval for potential Jacobian computation
707        let mut f_eval = f(next_t, y_next.view());
708        *func_evals += 1;
709        state.func_evals += 1;
710
711        while iter_count < max_newton_iters {
712            // Compute residual for BDF1: y_{n+1} - y_n - h * f(t_{n+1}, y_{n+1}) = 0
713            let residual = &y_next - &state.y - &(f_eval.clone() * state.h);
714
715            // Check convergence
716            let error = scaled_norm(&residual, &state.tol_scale);
717
718            if error <= newton_tol {
719                converged = true;
720                break;
721            }
722
723            // Compute or reuse Jacobian
724            let eps = F::from_f64(1e-8).unwrap();
725            let n_dim = y_next.len();
726
727            // Create approximate Jacobian using finite differences if needed
728            let compute_new_jacobian =
729                state.jacobian.is_none() || state.jacobian_age > 20 || iter_count == 0;
730            let jacobian = if compute_new_jacobian {
731                state.n_jac += 1;
732
733                // Create finite difference Jacobian
734                let new_jacobian = finite_difference_jacobian(f, next_t, &y_next, &f_eval, eps);
735
736                // Modify for solving BDF: I - h*J
737                let mut jac = Array2::<F>::eye(n_dim);
738                for i in 0..n_dim {
739                    for j in 0..n_dim {
740                        jac[[i, j]] = if i == j { F::one() } else { F::zero() };
741                        jac[[i, j]] -= state.h * new_jacobian[[i, j]];
742                    }
743                }
744
745                // Store the Jacobian for potential reuse
746                state.jacobian = Some(jac.clone());
747                state.jacobian_age = 0;
748                jac
749            } else {
750                // Reuse previous Jacobian
751                state.jacobian.clone().unwrap()
752            };
753
754            // Solve the linear system J*delta_y = residual
755            state.n_lu += 1;
756
757            // Use our more robust linear solver
758            let delta_y = match solve_linear_system(&jacobian, &residual) {
759                Ok(delta) => delta,
760                Err(_) => {
761                    // Nearly singular, reduce step size and try again
762                    state.h *= F::from_f64(0.5).unwrap();
763                    return Ok(false);
764                }
765            };
766
767            // Update solution
768            y_next = &y_next - &delta_y;
769
770            // Evaluate function at new point
771            f_eval = f(next_t, y_next.view());
772            *func_evals += 1;
773            state.func_evals += 1;
774
775            iter_count += 1;
776
777            // Record Newton iteration count for stiffness detection
778            state.adaptive_state.record_step(error);
779        }
780
781        if !converged {
782            // Newton iteration failed, reduce step size
783            state.h *= F::from_f64(0.5).unwrap();
784
785            // If we've reduced step size too much, the problem might be non-stiff
786            if state.h < opts.min_step.unwrap_or(F::from_f64(1e-10).unwrap()) {
787                return Err(IntegrateError::ConvergenceError(
788                    "BDF1 failed to converge - problem might be non-stiff".to_string(),
789                ));
790            }
791
792            return Ok(false);
793        }
794
795        // Step accepted
796
797        // Update state
798        state.t = next_t;
799        state.y = y_next;
800        state.dy = f_eval;
801
802        // Order can now be increased next step
803        if state.adaptive_state.order < 2 {
804            state.adaptive_state.order += 1;
805        }
806
807        return Ok(true);
808    }
809
810    // Higher-order BDF methods (2-5)
811
812    // Get BDF coefficients for the current order
813    let coeffs = &bdf_coefs[order - 1];
814
815    // Next time and step size
816    let next_t = state.t + state.h;
817
818    // Predict initial value using extrapolation from previous points
819    let mut y_pred = state.y.clone();
820
821    // For higher orders, use previous points for prediction
822    if order > 1 && !state.y_history.is_empty() {
823        let _y_prev = &state.y_history[state.y_history.len() - 1];
824
825        // Use more sophisticated extrapolation
826        y_pred = extrapolate(&state.t_history[..], &state.y_history[..], next_t)?;
827    }
828
829    // Newton's method for solving the BDF equation
830    let max_newton_iters = 10;
831    let newton_tol = F::from_f64(1e-8).unwrap();
832    let mut y_next = y_pred.clone();
833    let mut converged = false;
834    let mut iter_count = 0;
835
836    // Initial function evaluation
837    let mut f_eval = f(next_t, y_next.view());
838    *func_evals += 1;
839    state.func_evals += 1;
840
841    while iter_count < max_newton_iters {
842        // Compute residual for BDF: c_0 * y_{n+1} - sum(c_j * y_{n+1-j}) - h * f(t_{n+1}, y_{n+1}) = 0
843        let mut residual = y_next.clone() * coeffs[0];
844
845        // Subtract previous terms
846        residual -= &(state.y.clone() * coeffs[1]);
847
848        for (j, &coeff) in coeffs.iter().enumerate().skip(2) {
849            if j - 1 < state.y_history.len() {
850                let idx = state.y_history.len() - (j - 1);
851                residual -= &(state.y_history[idx].clone() * coeff);
852            }
853        }
854
855        // Subtract h * f term
856        residual -= &(f_eval.clone() * state.h);
857
858        // Check convergence
859        let error = scaled_norm(&residual, &state.tol_scale);
860
861        if error <= newton_tol {
862            converged = true;
863            break;
864        }
865
866        // Compute or reuse Jacobian
867        let eps = F::from_f64(1e-8).unwrap();
868        let n_dim = y_next.len();
869
870        // Create approximate Jacobian using finite differences if needed
871        let compute_new_jacobian =
872            state.jacobian.is_none() || state.jacobian_age > 20 || iter_count == 0;
873        let jacobian = if compute_new_jacobian {
874            state.n_jac += 1;
875
876            // Create finite difference Jacobian
877            let new_jacobian = finite_difference_jacobian(f, next_t, &y_next, &f_eval, eps);
878
879            // Modify for solving BDF: c_0*I - h*J
880            let mut jac = Array2::<F>::zeros((n_dim, n_dim));
881            for i in 0..n_dim {
882                for j in 0..n_dim {
883                    jac[[i, j]] = if i == j { coeffs[0] } else { F::zero() };
884                    jac[[i, j]] -= state.h * new_jacobian[[i, j]];
885                }
886            }
887
888            // Store the Jacobian for potential reuse
889            state.jacobian = Some(jac.clone());
890            state.jacobian_age = 0;
891            jac
892        } else {
893            // Reuse previous Jacobian
894            state.jacobian.clone().unwrap()
895        };
896
897        // Solve the linear system J*delta_y = residual
898        state.n_lu += 1;
899
900        // Use our more robust linear solver
901        let delta_y = match solve_linear_system(&jacobian, &residual) {
902            Ok(delta) => delta,
903            Err(_) => {
904                // Nearly singular, reduce step size and try again
905                state.h *= F::from_f64(0.5).unwrap();
906                return Ok(false);
907            }
908        };
909
910        // Update solution
911        y_next = &y_next - &delta_y;
912
913        // Evaluate function at new point
914        f_eval = f(next_t, y_next.view());
915        *func_evals += 1;
916        state.func_evals += 1;
917
918        iter_count += 1;
919
920        // Record Newton iteration count for stiffness detection
921        state.adaptive_state.record_step(error);
922    }
923
924    if !converged {
925        // Newton iteration failed, reduce step size
926        state.h *= F::from_f64(0.5).unwrap();
927
928        // If the problem is consistently difficult to solve, it might not be stiff
929        if iter_count >= max_newton_iters - 1 {
930            // Record as potential non-stiffness indicator
931            // Use the last computed error from the failed Newton iteration
932            let final_residual = &(y_next.clone() * coeffs[0])
933                + &(state.y.clone() * coeffs[1])
934                + &(state.y_history.last().unwrap_or(&state.y).clone() * coeffs[2]);
935            let final_error = scaled_norm(&final_residual, &state.tol_scale);
936
937            state.adaptive_state.record_step(final_error);
938        }
939
940        // If we've reduced step size too much, the problem might not be stiff
941        if state.h < opts.min_step.unwrap_or(F::from_f64(1e-10).unwrap()) {
942            return Err(IntegrateError::ConvergenceError(
943                "BDF failed to converge - problem might be non-stiff".to_string(),
944            ));
945        }
946
947        return Ok(false);
948    }
949
950    // Step accepted
951
952    // Update state
953    state.t = next_t;
954    state.y = y_next;
955    state.dy = f_eval;
956
957    // Error estimation is based on Newton convergence for now
958    // A more sophisticated error estimator could be implemented later
959
960    // Step size and order adaptation based on convergence rate
961    if iter_count <= 2 {
962        // Converged quickly - can increase step size
963        state.h *= F::from_f64(1.1).unwrap();
964
965        // Maybe increase order if convergence is very good
966        if state.adaptive_state.order < 5 && state.y_history.len() >= state.adaptive_state.order {
967            state.adaptive_state.order += 1;
968        }
969    } else if iter_count >= 8 {
970        // Converged slowly - reduce step size
971        state.h *= F::from_f64(0.8).unwrap();
972
973        // Decrease order if we're struggling
974        if state.adaptive_state.order > 1 {
975            state.adaptive_state.order -= 1;
976        }
977    }
978
979    // Increment Jacobian age
980    state.jacobian_age += 1;
981
982    Ok(true)
983}