1use crate::error::{IntegrateError, IntegrateResult};
9use crate::ode::types::{ODEMethod, ODEOptions, ODEResult};
10use crate::ode::utils::common::{
11 calculate_error_weights, estimate_initial_step, extrapolate, finite_difference_jacobian,
12 scaled_norm, solve_linear_system,
13};
14use crate::ode::utils::stiffness::integration::{AdaptiveMethodState, AdaptiveMethodType};
15use crate::ode::utils::stiffness::StiffnessDetectionConfig;
16use crate::IntegrateFloat;
17use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
18
19struct EnhancedLsodaState<F: IntegrateFloat> {
21 t: F,
23 y: Array1<F>,
25 dy: Array1<F>,
27 h: F,
29 t_history: Vec<F>,
31 y_history: Vec<Array1<F>>,
33 dy_history: Vec<Array1<F>>,
35 adaptive_state: AdaptiveMethodState<F>,
37 jacobian: Option<Array2<F>>,
39 jacobian_age: usize,
41 func_evals: usize,
43 n_lu: usize,
45 n_jac: usize,
47 steps: usize,
49 accepted_steps: usize,
51 rejected_steps: usize,
53 tol_scale: Array1<F>,
55}
56
57impl<F: IntegrateFloat> EnhancedLsodaState<F> {
58 fn new(t: F, y: Array1<F>, dy: Array1<F>, h: F, rtol: F, atol: F) -> Self {
60 let _n_dim = y.len();
61
62 let tol_scale = calculate_error_weights(&y, atol, rtol);
64
65 let stiffness_config = StiffnessDetectionConfig::default();
67
68 EnhancedLsodaState {
69 t,
70 y: y.clone(),
71 dy: dy.clone(),
72 h,
73 t_history: vec![t],
74 y_history: vec![y],
75 dy_history: vec![dy],
76 adaptive_state: AdaptiveMethodState::with_config(stiffness_config),
77 jacobian: None,
78 jacobian_age: 0,
79 func_evals: 0,
80 n_lu: 0,
81 n_jac: 0,
82 steps: 0,
83 accepted_steps: 0,
84 rejected_steps: 0,
85 tol_scale,
86 }
87 }
88
89 fn update_tol_scale(&mut self, rtol: F, atol: F) {
91 self.tol_scale = calculate_error_weights(&self.y, atol, rtol);
92 }
93
94 fn add_to_history(&mut self) {
96 self.t_history.push(self.t);
97 self.y_history.push(self.y.clone());
98 self.dy_history.push(self.dy.clone());
99
100 let max_history = match self.adaptive_state.method_type {
102 AdaptiveMethodType::Explicit => 12, AdaptiveMethodType::Implicit => 5, AdaptiveMethodType::Adams => 12, AdaptiveMethodType::BDF => 5, AdaptiveMethodType::RungeKutta => 4, };
108
109 if self.t_history.len() > max_history {
110 self.t_history.remove(0);
111 self.y_history.remove(0);
112 self.dy_history.remove(0);
113 }
114 }
115
116 fn switch_method(&mut self, _newmethod: AdaptiveMethodType) -> IntegrateResult<()> {
118 self.adaptive_state.switch_method(_newmethod, self.steps)?;
120
121 match _newmethod {
123 AdaptiveMethodType::Implicit | AdaptiveMethodType::BDF => {
124 self.jacobian = None;
126 self.jacobian_age = 0;
127 }
128 AdaptiveMethodType::Explicit | AdaptiveMethodType::Adams => {
129 if self.rejected_steps > 2 {
131 self.h *= F::from_f64(0.5).unwrap();
132 }
133 }
134 AdaptiveMethodType::RungeKutta => {
135 self.h *= F::from_f64(0.8).unwrap();
137 }
138 }
139
140 Ok(())
141 }
142}
143
144#[allow(dead_code)]
156pub fn enhanced_lsoda_method<F, Func>(
157 f: Func,
158 t_span: [F; 2],
159 y0: Array1<F>,
160 opts: ODEOptions<F>,
161) -> IntegrateResult<ODEResult<F>>
162where
163 F: IntegrateFloat,
164 Func: Fn(F, ArrayView1<F>) -> Array1<F>,
165{
166 let [t_start, t_end] = t_span;
168 let _n_dim = y0.len();
169
170 let dy0 = f(t_start, y0.view());
172 let mut func_evals = 1;
173
174 let h0 = opts.h0.unwrap_or_else(|| {
176 let tol = opts.atol + opts.rtol;
178 estimate_initial_step(&f, t_start, &y0, &dy0, tol, t_end)
179 });
180
181 let min_step = opts.min_step.unwrap_or_else(|| {
183 let _span = t_end - t_start;
184 _span * F::from_f64(1e-10).unwrap() });
186
187 let max_step = opts.max_step.unwrap_or_else(|| {
188 t_end - t_start });
190
191 let mut state = EnhancedLsodaState::new(t_start, y0.clone(), dy0, h0, opts.rtol, opts.atol);
193
194 let mut t_values = vec![t_start];
196 let mut y_values = vec![y0.clone()];
197
198 while state.t < t_end && state.steps < opts.max_steps {
200 if state.t + state.h > t_end {
202 state.h = t_end - state.t;
203 }
204
205 state.h = state.h.min(max_step).max(min_step);
207
208 let step_result = match state.adaptive_state.method_type {
210 AdaptiveMethodType::Explicit | AdaptiveMethodType::Adams => {
211 enhanced_adams_step(&mut state, &f, &opts, &mut func_evals)
212 }
213 AdaptiveMethodType::Implicit | AdaptiveMethodType::BDF => {
214 enhanced_bdf_step(&mut state, &f, &opts, &mut func_evals)
215 }
216 AdaptiveMethodType::RungeKutta => {
217 enhanced_adams_step(&mut state, &f, &opts, &mut func_evals)
219 }
220 };
221
222 state.steps += 1;
223 state.adaptive_state.steps_since_switch += 1;
224
225 match step_result {
226 Ok(accepted) => {
227 if accepted {
228 state.add_to_history();
232 t_values.push(state.t);
233 y_values.push(state.y.clone());
234
235 state.accepted_steps += 1;
236
237 let error = F::zero(); let _newton_iterations = 0; state.adaptive_state.record_step(error);
241
242 if let Some(_new_method) = state.adaptive_state.check_method_switch() {
244 }
246
247 state.update_tol_scale(opts.rtol, opts.atol);
249
250 if state.adaptive_state.method_type == AdaptiveMethodType::Implicit
252 && state.jacobian.is_some()
253 {
254 state.jacobian_age += 1;
255 }
256 } else {
257 state.rejected_steps += 1;
259
260 let error = F::one(); let _newton_iterations = 0; state.adaptive_state.record_step(error);
264 }
265 }
266 Err(e) => {
267 match &e {
269 IntegrateError::ConvergenceError(msg) if msg.contains("stiff") => {
270 if state.adaptive_state.method_type == AdaptiveMethodType::Explicit {
271 state.switch_method(AdaptiveMethodType::Implicit)?;
273
274 state.h *= F::from_f64(0.5).unwrap();
276 if state.h < min_step {
277 return Err(IntegrateError::ConvergenceError(
278 "Step size too small after method switch".to_string(),
279 ));
280 }
281 } else {
282 return Err(e);
284 }
285 }
286 IntegrateError::ConvergenceError(msg) if msg.contains("non-stiff") => {
287 if state.adaptive_state.method_type == AdaptiveMethodType::Implicit {
288 state.switch_method(AdaptiveMethodType::Explicit)?;
290
291 state.h *= F::from_f64(0.5).unwrap();
293 if state.h < min_step {
294 return Err(IntegrateError::ConvergenceError(
295 "Step size too small after method switch".to_string(),
296 ));
297 }
298 } else {
299 return Err(e);
301 }
302 }
303 _ => return Err(e), }
305 }
306 }
307 }
308
309 let success = state.t >= t_end;
310 let message = if !success {
311 Some(format!(
312 "Maximum number of steps ({}) reached",
313 opts.max_steps
314 ))
315 } else {
316 Some(state.adaptive_state.generate_diagnostic_message())
318 };
319
320 Ok(ODEResult {
322 t: t_values,
323 y: y_values,
324 success,
325 message,
326 n_eval: func_evals,
327 n_steps: state.steps,
328 n_accepted: state.accepted_steps,
329 n_rejected: state.rejected_steps,
330 n_lu: state.n_lu,
331 n_jac: state.n_jac,
332 method: ODEMethod::LSODA,
333 })
334}
335
336#[allow(dead_code)]
338fn enhanced_adams_step<F, Func>(
339 state: &mut EnhancedLsodaState<F>,
340 f: &Func,
341 opts: &ODEOptions<F>,
342 func_evals: &mut usize,
343) -> IntegrateResult<bool>
344where
345 F: IntegrateFloat,
346 Func: Fn(F, ArrayView1<F>) -> Array1<F>,
347{
348 let ab_coeffs: [Vec<F>; 12] = [
351 vec![F::one()],
353 vec![
355 F::from_f64(3.0 / 2.0).unwrap(),
356 F::from_f64(-1.0 / 2.0).unwrap(),
357 ],
358 vec![
360 F::from_f64(23.0 / 12.0).unwrap(),
361 F::from_f64(-16.0 / 12.0).unwrap(),
362 F::from_f64(5.0 / 12.0).unwrap(),
363 ],
364 vec![
366 F::from_f64(55.0 / 24.0).unwrap(),
367 F::from_f64(-59.0 / 24.0).unwrap(),
368 F::from_f64(37.0 / 24.0).unwrap(),
369 F::from_f64(-9.0 / 24.0).unwrap(),
370 ],
371 vec![
373 F::from_f64(1901.0 / 720.0).unwrap(),
374 F::from_f64(-2774.0 / 720.0).unwrap(),
375 F::from_f64(2616.0 / 720.0).unwrap(),
376 F::from_f64(-1274.0 / 720.0).unwrap(),
377 F::from_f64(251.0 / 720.0).unwrap(),
378 ],
379 vec![
381 F::from_f64(4277.0 / 1440.0).unwrap(),
382 F::from_f64(-7923.0 / 1440.0).unwrap(),
383 F::from_f64(9982.0 / 1440.0).unwrap(),
384 F::from_f64(-7298.0 / 1440.0).unwrap(),
385 F::from_f64(2877.0 / 1440.0).unwrap(),
386 F::from_f64(-475.0 / 1440.0).unwrap(),
387 ],
388 vec![
390 F::from_f64(198721.0 / 60480.0).unwrap(),
391 F::from_f64(-447288.0 / 60480.0).unwrap(),
392 F::from_f64(705549.0 / 60480.0).unwrap(),
393 F::from_f64(-688256.0 / 60480.0).unwrap(),
394 F::from_f64(407139.0 / 60480.0).unwrap(),
395 F::from_f64(-134472.0 / 60480.0).unwrap(),
396 F::from_f64(19087.0 / 60480.0).unwrap(),
397 ],
398 vec![
400 F::from_f64(434241.0 / 120960.0).unwrap(),
401 F::from_f64(-1152169.0 / 120960.0).unwrap(),
402 F::from_f64(2183877.0 / 120960.0).unwrap(),
403 F::from_f64(-2664477.0 / 120960.0).unwrap(),
404 F::from_f64(2102243.0 / 120960.0).unwrap(),
405 F::from_f64(-1041723.0 / 120960.0).unwrap(),
406 F::from_f64(295767.0 / 120960.0).unwrap(),
407 F::from_f64(-36799.0 / 120960.0).unwrap(),
408 ],
409 vec![
411 F::from_f64(14097247.0 / 3628800.0).unwrap(),
412 F::from_f64(-43125206.0 / 3628800.0).unwrap(),
413 F::from_f64(95476786.0 / 3628800.0).unwrap(),
414 F::from_f64(-139855262.0 / 3628800.0).unwrap(),
415 F::from_f64(137968480.0 / 3628800.0).unwrap(),
416 F::from_f64(-91172642.0 / 3628800.0).unwrap(),
417 F::from_f64(38833486.0 / 3628800.0).unwrap(),
418 F::from_f64(-9664106.0 / 3628800.0).unwrap(),
419 F::from_f64(1070017.0 / 3628800.0).unwrap(),
420 ],
421 vec![
423 F::from_f64(30277247.0 / 7257600.0).unwrap(),
424 F::from_f64(-104995189.0 / 7257600.0).unwrap(),
425 F::from_f64(265932680.0 / 7257600.0).unwrap(),
426 F::from_f64(-454661776.0 / 7257600.0).unwrap(),
427 F::from_f64(538363838.0 / 7257600.0).unwrap(),
428 F::from_f64(-444772162.0 / 7257600.0).unwrap(),
429 F::from_f64(252618224.0 / 7257600.0).unwrap(),
430 F::from_f64(-94307320.0 / 7257600.0).unwrap(),
431 F::from_f64(20884811.0 / 7257600.0).unwrap(),
432 F::from_f64(-2082753.0 / 7257600.0).unwrap(),
433 ],
434 vec![
436 F::from_f64(35256204767.0 / 7983360000.0).unwrap(),
437 F::from_f64(-134336876800.0 / 7983360000.0).unwrap(),
438 F::from_f64(385146025457.0 / 7983360000.0).unwrap(),
439 F::from_f64(-754734083733.0 / 7983360000.0).unwrap(),
440 F::from_f64(1045594573504.0 / 7983360000.0).unwrap(),
441 F::from_f64(-1029725952608.0 / 7983360000.0).unwrap(),
442 F::from_f64(717313887930.0 / 7983360000.0).unwrap(),
443 F::from_f64(-344156361067.0 / 7983360000.0).unwrap(),
444 F::from_f64(109301088672.0 / 7983360000.0).unwrap(),
445 F::from_f64(-21157613775.0 / 7983360000.0).unwrap(),
446 F::from_f64(1832380165.0 / 7983360000.0).unwrap(),
447 ],
448 vec![
450 F::from_f64(77737505967.0 / 16876492800.0).unwrap(),
451 F::from_f64(-328202700680.0 / 16876492800.0).unwrap(),
452 F::from_f64(1074851727475.0 / 16876492800.0).unwrap(),
453 F::from_f64(-2459572352768.0 / 16876492800.0).unwrap(),
454 F::from_f64(4013465151807.0 / 16876492800.0).unwrap(),
455 F::from_f64(-4774671405984.0 / 16876492800.0).unwrap(),
456 F::from_f64(4127030565077.0 / 16876492800.0).unwrap(),
457 F::from_f64(-2538584431976.0 / 16876492800.0).unwrap(),
458 F::from_f64(1077984741336.0 / 16876492800.0).unwrap(),
459 F::from_f64(-295501032385.0 / 16876492800.0).unwrap(),
460 F::from_f64(48902348238.0 / 16876492800.0).unwrap(),
461 F::from_f64(-3525779602.0 / 16876492800.0).unwrap(),
462 ],
463 ];
464
465 let am_coeffs: [Vec<F>; 12] = [
468 vec![F::one()],
470 vec![
472 F::from_f64(1.0 / 2.0).unwrap(),
473 F::from_f64(1.0 / 2.0).unwrap(),
474 ],
475 vec![
477 F::from_f64(5.0 / 12.0).unwrap(),
478 F::from_f64(8.0 / 12.0).unwrap(),
479 F::from_f64(-1.0 / 12.0).unwrap(),
480 ],
481 vec![
483 F::from_f64(9.0 / 24.0).unwrap(),
484 F::from_f64(19.0 / 24.0).unwrap(),
485 F::from_f64(-5.0 / 24.0).unwrap(),
486 F::from_f64(1.0 / 24.0).unwrap(),
487 ],
488 vec![F::zero()],
491 vec![F::zero()],
492 vec![F::zero()],
493 vec![F::zero()],
494 vec![F::zero()],
495 vec![F::zero()],
496 vec![F::zero()],
497 vec![F::zero()],
498 ];
499
500 let order = state
502 .adaptive_state
503 .order
504 .min(state.dy_history.len() + 1)
505 .min(12);
506
507 if order == 1 || state.dy_history.is_empty() {
509 let next_t = state.t + state.h;
511 let next_y = &state.y + &(state.dy.clone() * state.h);
512
513 let next_dy = f(next_t, next_y.view());
515 *func_evals += 1;
516 state.func_evals += 1;
517
518 state.t = next_t;
520 state.y = next_y;
521 state.dy = next_dy;
522
523 if state.adaptive_state.order < 2 {
525 state.adaptive_state.order += 1;
526 }
527
528 return Ok(true);
529 }
530
531 let next_t = state.t + state.h;
533 let ab_coefs = &ab_coeffs[order - 1];
534
535 let mut ab_sum = state.dy.clone() * ab_coefs[0];
538
539 for (i, &coeff) in ab_coefs.iter().enumerate().take(order).skip(1) {
540 if i <= state.dy_history.len() {
541 let idx = state.dy_history.len() - i;
542 ab_sum += &(state.dy_history[idx].clone() * coeff);
543 }
544 }
545
546 let y_pred = &state.y + &(ab_sum * state.h);
547
548 let dy_pred = f(next_t, y_pred.view());
550 *func_evals += 1;
551 state.func_evals += 1;
552
553 let am_order = order.min(4); let am_coefs = &am_coeffs[am_order - 1];
557
558 let mut am_sum = dy_pred.clone() * am_coefs[0]; for (i, &coeff) in am_coefs.iter().enumerate().take(am_order).skip(1) {
563 if i == 1 {
564 am_sum += &(state.dy.clone() * coeff);
566 } else if i - 1 < state.dy_history.len() {
567 let idx = state.dy_history.len() - (i - 1);
569 am_sum += &(state.dy_history[idx].clone() * coeff);
570 }
571 }
572
573 let y_corr = &state.y + &(am_sum * state.h);
574
575 let dy_corr = f(next_t, y_corr.view());
577 *func_evals += 1;
578 state.func_evals += 1;
579
580 let error = scaled_norm(&(&y_corr - &y_pred), &state.tol_scale);
582
583 let err_order = F::from_usize(order + 1).unwrap(); let err_factor = if error > F::zero() {
586 F::from_f64(0.9).unwrap() * (F::one() / error).powf(F::one() / err_order)
587 } else {
588 F::from_f64(5.0).unwrap() };
590
591 let safety = F::from_f64(0.9).unwrap();
593 let factor_max = F::from_f64(5.0).unwrap();
594 let factor_min = F::from_f64(0.2).unwrap();
595 let factor = safety * err_factor.min(factor_max).max(factor_min);
596
597 if error <= F::one() {
599 state.t = next_t;
603 state.y = y_corr;
604 state.dy = dy_corr;
605
606 state.h *= factor;
608
609 if order < 12 && error < opts.rtol && state.dy_history.len() >= order {
611 state.adaptive_state.order = (state.adaptive_state.order + 1).min(12);
612 } else if order > 1 && error > F::from_f64(0.5).unwrap() {
613 state.adaptive_state.order = (state.adaptive_state.order - 1).max(1);
614 }
615
616 state.adaptive_state.record_step(error);
618
619 Ok(true)
620 } else {
621 state.h *= factor;
625
626 state.adaptive_state.record_step(error);
628
629 if error > F::from_f64(10.0).unwrap() {
631 return Err(IntegrateError::ConvergenceError(
632 "Problem appears stiff - consider using BDF method".to_string(),
633 ));
634 }
635
636 Ok(false)
637 }
638}
639
640#[allow(dead_code)]
642fn enhanced_bdf_step<F, Func>(
643 state: &mut EnhancedLsodaState<F>,
644 f: &Func,
645 opts: &ODEOptions<F>,
646 func_evals: &mut usize,
647) -> IntegrateResult<bool>
648where
649 F: IntegrateFloat,
650 Func: Fn(F, ArrayView1<F>) -> Array1<F>,
651{
652 let bdf_coefs: [Vec<F>; 5] = [
654 vec![F::one(), F::from_f64(-1.0).unwrap()],
656 vec![
658 F::from_f64(3.0 / 2.0).unwrap(),
659 F::from_f64(-2.0).unwrap(),
660 F::from_f64(1.0 / 2.0).unwrap(),
661 ],
662 vec![
664 F::from_f64(11.0 / 6.0).unwrap(),
665 F::from_f64(-3.0).unwrap(),
666 F::from_f64(3.0 / 2.0).unwrap(),
667 F::from_f64(-1.0 / 3.0).unwrap(),
668 ],
669 vec![
671 F::from_f64(25.0 / 12.0).unwrap(),
672 F::from_f64(-4.0).unwrap(),
673 F::from_f64(3.0).unwrap(),
674 F::from_f64(-4.0 / 3.0).unwrap(),
675 F::from_f64(1.0 / 4.0).unwrap(),
676 ],
677 vec![
679 F::from_f64(137.0 / 60.0).unwrap(),
680 F::from_f64(-5.0).unwrap(),
681 F::from_f64(5.0).unwrap(),
682 F::from_f64(-10.0 / 3.0).unwrap(),
683 F::from_f64(5.0 / 4.0).unwrap(),
684 F::from_f64(-1.0 / 5.0).unwrap(),
685 ],
686 ];
687
688 let order = state.adaptive_state.order.min(state.y_history.len()).min(5);
690
691 if order == 1 || state.y_history.is_empty() {
693 let next_t = state.t + state.h;
695
696 let y_pred = state.y.clone();
698
699 let max_newton_iters = 10;
701 let newton_tol = F::from_f64(1e-8).unwrap();
702 let mut y_next = y_pred.clone();
703 let mut converged = false;
704 let mut iter_count = 0;
705
706 let mut f_eval = f(next_t, y_next.view());
708 *func_evals += 1;
709 state.func_evals += 1;
710
711 while iter_count < max_newton_iters {
712 let residual = &y_next - &state.y - &(f_eval.clone() * state.h);
714
715 let error = scaled_norm(&residual, &state.tol_scale);
717
718 if error <= newton_tol {
719 converged = true;
720 break;
721 }
722
723 let eps = F::from_f64(1e-8).unwrap();
725 let n_dim = y_next.len();
726
727 let compute_new_jacobian =
729 state.jacobian.is_none() || state.jacobian_age > 20 || iter_count == 0;
730 let jacobian = if compute_new_jacobian {
731 state.n_jac += 1;
732
733 let new_jacobian = finite_difference_jacobian(f, next_t, &y_next, &f_eval, eps);
735
736 let mut jac = Array2::<F>::eye(n_dim);
738 for i in 0..n_dim {
739 for j in 0..n_dim {
740 jac[[i, j]] = if i == j { F::one() } else { F::zero() };
741 jac[[i, j]] -= state.h * new_jacobian[[i, j]];
742 }
743 }
744
745 state.jacobian = Some(jac.clone());
747 state.jacobian_age = 0;
748 jac
749 } else {
750 state.jacobian.clone().unwrap()
752 };
753
754 state.n_lu += 1;
756
757 let delta_y = match solve_linear_system(&jacobian, &residual) {
759 Ok(delta) => delta,
760 Err(_) => {
761 state.h *= F::from_f64(0.5).unwrap();
763 return Ok(false);
764 }
765 };
766
767 y_next = &y_next - &delta_y;
769
770 f_eval = f(next_t, y_next.view());
772 *func_evals += 1;
773 state.func_evals += 1;
774
775 iter_count += 1;
776
777 state.adaptive_state.record_step(error);
779 }
780
781 if !converged {
782 state.h *= F::from_f64(0.5).unwrap();
784
785 if state.h < opts.min_step.unwrap_or(F::from_f64(1e-10).unwrap()) {
787 return Err(IntegrateError::ConvergenceError(
788 "BDF1 failed to converge - problem might be non-stiff".to_string(),
789 ));
790 }
791
792 return Ok(false);
793 }
794
795 state.t = next_t;
799 state.y = y_next;
800 state.dy = f_eval;
801
802 if state.adaptive_state.order < 2 {
804 state.adaptive_state.order += 1;
805 }
806
807 return Ok(true);
808 }
809
810 let coeffs = &bdf_coefs[order - 1];
814
815 let next_t = state.t + state.h;
817
818 let mut y_pred = state.y.clone();
820
821 if order > 1 && !state.y_history.is_empty() {
823 let _y_prev = &state.y_history[state.y_history.len() - 1];
824
825 y_pred = extrapolate(&state.t_history[..], &state.y_history[..], next_t)?;
827 }
828
829 let max_newton_iters = 10;
831 let newton_tol = F::from_f64(1e-8).unwrap();
832 let mut y_next = y_pred.clone();
833 let mut converged = false;
834 let mut iter_count = 0;
835
836 let mut f_eval = f(next_t, y_next.view());
838 *func_evals += 1;
839 state.func_evals += 1;
840
841 while iter_count < max_newton_iters {
842 let mut residual = y_next.clone() * coeffs[0];
844
845 residual -= &(state.y.clone() * coeffs[1]);
847
848 for (j, &coeff) in coeffs.iter().enumerate().skip(2) {
849 if j - 1 < state.y_history.len() {
850 let idx = state.y_history.len() - (j - 1);
851 residual -= &(state.y_history[idx].clone() * coeff);
852 }
853 }
854
855 residual -= &(f_eval.clone() * state.h);
857
858 let error = scaled_norm(&residual, &state.tol_scale);
860
861 if error <= newton_tol {
862 converged = true;
863 break;
864 }
865
866 let eps = F::from_f64(1e-8).unwrap();
868 let n_dim = y_next.len();
869
870 let compute_new_jacobian =
872 state.jacobian.is_none() || state.jacobian_age > 20 || iter_count == 0;
873 let jacobian = if compute_new_jacobian {
874 state.n_jac += 1;
875
876 let new_jacobian = finite_difference_jacobian(f, next_t, &y_next, &f_eval, eps);
878
879 let mut jac = Array2::<F>::zeros((n_dim, n_dim));
881 for i in 0..n_dim {
882 for j in 0..n_dim {
883 jac[[i, j]] = if i == j { coeffs[0] } else { F::zero() };
884 jac[[i, j]] -= state.h * new_jacobian[[i, j]];
885 }
886 }
887
888 state.jacobian = Some(jac.clone());
890 state.jacobian_age = 0;
891 jac
892 } else {
893 state.jacobian.clone().unwrap()
895 };
896
897 state.n_lu += 1;
899
900 let delta_y = match solve_linear_system(&jacobian, &residual) {
902 Ok(delta) => delta,
903 Err(_) => {
904 state.h *= F::from_f64(0.5).unwrap();
906 return Ok(false);
907 }
908 };
909
910 y_next = &y_next - &delta_y;
912
913 f_eval = f(next_t, y_next.view());
915 *func_evals += 1;
916 state.func_evals += 1;
917
918 iter_count += 1;
919
920 state.adaptive_state.record_step(error);
922 }
923
924 if !converged {
925 state.h *= F::from_f64(0.5).unwrap();
927
928 if iter_count >= max_newton_iters - 1 {
930 let final_residual = &(y_next.clone() * coeffs[0])
933 + &(state.y.clone() * coeffs[1])
934 + &(state.y_history.last().unwrap_or(&state.y).clone() * coeffs[2]);
935 let final_error = scaled_norm(&final_residual, &state.tol_scale);
936
937 state.adaptive_state.record_step(final_error);
938 }
939
940 if state.h < opts.min_step.unwrap_or(F::from_f64(1e-10).unwrap()) {
942 return Err(IntegrateError::ConvergenceError(
943 "BDF failed to converge - problem might be non-stiff".to_string(),
944 ));
945 }
946
947 return Ok(false);
948 }
949
950 state.t = next_t;
954 state.y = y_next;
955 state.dy = f_eval;
956
957 if iter_count <= 2 {
962 state.h *= F::from_f64(1.1).unwrap();
964
965 if state.adaptive_state.order < 5 && state.y_history.len() >= state.adaptive_state.order {
967 state.adaptive_state.order += 1;
968 }
969 } else if iter_count >= 8 {
970 state.h *= F::from_f64(0.8).unwrap();
972
973 if state.adaptive_state.order > 1 {
975 state.adaptive_state.order -= 1;
976 }
977 }
978
979 state.jacobian_age += 1;
981
982 Ok(true)
983}