scirs2_integrate/ode/utils/jacobian/mod.rs
1//! Jacobian approximation and handling utilities for ODE solvers
2//!
3//! This module provides tools for computing, updating, and reusing Jacobian matrices
4//! in implicit ODE solvers. It implements various approximation strategies, reuse logic,
5//! and specialized techniques for different problem types.
6
7mod autodiff;
8mod newton;
9mod parallel;
10mod specialized;
11
12pub use autodiff::*;
13pub use newton::*;
14pub use parallel::*;
15pub use specialized::*;
16
17use crate::common::IntegrateFloat;
18use crate::error::IntegrateResult;
19use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
20
21/// Strategy for Jacobian approximation
22#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
23pub enum JacobianStrategy {
24 /// Standard finite difference approximation
25 FiniteDifference,
26 /// Sparse finite difference (more efficient for sparse Jacobians)
27 SparseFiniteDifference,
28 /// Broyden's update (quasi-Newton)
29 BroydenUpdate,
30 /// Modified Newton (reuse Jacobian for multiple steps)
31 ModifiedNewton,
32 /// Colored finite difference (for systems with structure)
33 ColoredFiniteDifference,
34 /// Parallel finite difference (for large systems)
35 ParallelFiniteDifference,
36 /// Parallel sparse finite difference (for large sparse systems)
37 ParallelSparseFiniteDifference,
38 /// Automatic differentiation (exact derivatives)
39 AutoDiff,
40 /// Adaptive selection (uses autodiff if available, falls back to finite difference)
41 #[default]
42 Adaptive,
43}
44
45/// Compute Jacobian using finite differences (for compatibility)
46#[allow(dead_code)]
47pub fn compute_jacobian<F, Func>(f: &Func, t: F, y: &Array1<F>) -> IntegrateResult<Array2<F>>
48where
49 F: IntegrateFloat,
50 Func: Fn(F, &ArrayView1<F>) -> IntegrateResult<Array1<F>>,
51{
52 let f_current = f(t, &y.view())?;
53 // Convert the function to the right signature for finite_difference_jacobian
54 let f_unwrapped = |t: F, y: ArrayView1<F>| -> Array1<F> {
55 match f(t, &y) {
56 Ok(val) => val,
57 Err(_) => Array1::zeros(y.len()), // Fallback to zeros on error
58 }
59 };
60 Ok(crate::ode::utils::common::finite_difference_jacobian(
61 &f_unwrapped,
62 t,
63 y,
64 &f_current,
65 F::from_f64(1e-8).unwrap(),
66 ))
67}
68
69// Re-export finite_difference_jacobian for direct use
70pub use crate::ode::utils::common::finite_difference_jacobian;
71
72/// Structure of the Jacobian matrix
73#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
74pub enum JacobianStructure {
75 /// Dense matrix (default)
76 #[default]
77 Dense,
78 /// Banded matrix (efficient for certain types of ODEs)
79 Banded { lower: usize, upper: usize },
80 /// Sparse matrix (large systems with few nonzeros)
81 Sparse,
82 /// Structured matrix (e.g., Toeplitz, circulant)
83 Structured,
84}
85
86/// Manages Jacobian computation, updates, and reuse
87#[derive(Debug, Clone)]
88pub struct JacobianManager<F: IntegrateFloat> {
89 /// The current Jacobian approximation
90 jacobian: Option<Array2<F>>,
91 /// The system state at which the Jacobian was computed
92 state_point: Option<(F, Array1<F>)>, // (t, y)
93 /// The function evaluation at the state point
94 f_eval: Option<Array1<F>>,
95 /// Number of steps since last full Jacobian computation
96 age: usize,
97 /// Maximum age before recomputation
98 max_age: usize,
99 /// Approximation strategy
100 strategy: JacobianStrategy,
101 /// Structure information
102 #[allow(dead_code)]
103 structure: JacobianStructure,
104 /// Condition number estimate (if available)
105 #[allow(dead_code)]
106 condition_estimate: Option<F>,
107 /// Factorized form (if available)
108 factorized: bool,
109 /// Whether parallel computation is available for this type
110 #[allow(dead_code)]
111 parallel_available: bool,
112}
113
114impl<F: IntegrateFloat> Default for JacobianManager<F> {
115 fn default() -> Self {
116 Self::new()
117 }
118}
119
120impl<F: IntegrateFloat> JacobianManager<F> {
121 /// Create a new Jacobian manager with default settings
122 pub fn new() -> Self {
123 JacobianManager {
124 jacobian: None,
125 state_point: None,
126 f_eval: None,
127 age: 0,
128 max_age: 50, // Recompute after 50 steps by default
129 strategy: JacobianStrategy::default(),
130 structure: JacobianStructure::default(),
131 condition_estimate: None,
132 factorized: false,
133 parallel_available: false,
134 }
135 }
136
137 /// Create a new Jacobian manager with specific strategy and structure
138 pub fn with_strategy(strategy: JacobianStrategy, structure: JacobianStructure) -> Self {
139 JacobianManager {
140 jacobian: None,
141 state_point: None,
142 f_eval: None,
143 age: 0,
144 max_age: match strategy {
145 JacobianStrategy::ModifiedNewton => 50,
146 JacobianStrategy::BroydenUpdate => 20,
147 JacobianStrategy::ParallelFiniteDifference => 1,
148 JacobianStrategy::ParallelSparseFiniteDifference => 1,
149 JacobianStrategy::FiniteDifference => 1,
150 JacobianStrategy::SparseFiniteDifference => 1,
151 JacobianStrategy::ColoredFiniteDifference => 1,
152 JacobianStrategy::AutoDiff => 1,
153 JacobianStrategy::Adaptive => 1,
154 },
155 strategy,
156 structure,
157 condition_estimate: None,
158 factorized: false,
159 parallel_available: false,
160 }
161 }
162
163 /// Create a Jacobian manager with automatically selected strategy
164 /// based on system size and structure
165 pub fn with_auto_strategy(_n_dim: usize, isbanded: bool) -> Self {
166 let (strategy, structure) = if _n_dim > 100 {
167 // For very large systems, use parallel computation
168 let parallel_available = cfg!(feature = "parallel");
169
170 if parallel_available {
171 if isbanded {
172 (
173 JacobianStrategy::ParallelSparseFiniteDifference,
174 JacobianStructure::Banded {
175 lower: _n_dim / 10,
176 upper: _n_dim / 10,
177 },
178 )
179 } else {
180 (
181 JacobianStrategy::ParallelFiniteDifference,
182 JacobianStructure::Dense,
183 )
184 }
185 } else {
186 // Fall back to modified Newton for large systems without parallel
187 (JacobianStrategy::ModifiedNewton, JacobianStructure::Dense)
188 }
189 } else if _n_dim > 20 {
190 // For medium-sized systems, use Broyden updates
191 (JacobianStrategy::BroydenUpdate, JacobianStructure::Dense)
192 } else {
193 // For small systems, try autodiff if available
194 let autodiff_available = is_autodiff_available();
195
196 if autodiff_available {
197 (JacobianStrategy::AutoDiff, JacobianStructure::Dense)
198 } else {
199 (JacobianStrategy::FiniteDifference, JacobianStructure::Dense)
200 }
201 };
202
203 Self::with_strategy(strategy, structure)
204 }
205
206 /// Check if the Jacobian needs to be recomputed
207 pub fn needs_update(&self, t: F, y: &Array1<F>, forceage: Option<usize>) -> bool {
208 // Always recompute if we don't have a Jacobian yet
209 if self.jacobian.is_none() {
210 return true;
211 }
212
213 // Check _age against threshold (possibly overridden)
214 let max_age = forceage.unwrap_or(self.max_age);
215 if self.age >= max_age {
216 return true;
217 }
218
219 // For certain strategies, we always recompute
220 match self.strategy {
221 JacobianStrategy::FiniteDifference => self.age > 0,
222 _ => {
223 // For other strategies, check if state has changed significantly
224 if let Some((old_t, old_y)) = &self.state_point {
225 // Calculate relative distance between current and previous state
226 let t_diff = (t - *old_t).abs();
227 let mut y_diff = F::zero();
228 for i in 0..y.len() {
229 let rel_diff = if old_y[i].abs() > F::from_f64(1e-10).unwrap() {
230 (y[i] - old_y[i]).abs() / old_y[i].abs()
231 } else {
232 (y[i] - old_y[i]).abs()
233 };
234 y_diff = y_diff.max(rel_diff);
235 }
236
237 // Determine thresholds based on strategy
238 let (t_threshold, y_threshold) = match self.strategy {
239 JacobianStrategy::ModifiedNewton => (
240 F::from_f64(0.3).unwrap(), // 30% change in time
241 F::from_f64(0.3).unwrap(), // 30% change in y
242 ),
243 JacobianStrategy::BroydenUpdate => (
244 F::from_f64(0.1).unwrap(), // 10% change in time
245 F::from_f64(0.1).unwrap(), // 10% change in y
246 ),
247 _ => (
248 F::from_f64(0.01).unwrap(), // 1% change in time
249 F::from_f64(0.01).unwrap(), // 1% change in y
250 ),
251 };
252
253 // Check if state has changed enough to warrant recomputation
254 t_diff > t_threshold || y_diff > y_threshold
255 } else {
256 // No previous state, so recompute
257 true
258 }
259 }
260 }
261 }
262
263 /// Compute or update the Jacobian matrix
264 pub fn update_jacobian<Func>(
265 &mut self,
266 t: F,
267 y: &Array1<F>,
268 f: &Func,
269 scale: Option<F>,
270 ) -> IntegrateResult<&Array2<F>>
271 where
272 Func: Fn(F, ArrayView1<F>) -> Array1<F> + Clone,
273 {
274 let scale_val = scale.unwrap_or_else(|| F::from_f64(1.0).unwrap());
275 let n = y.len();
276
277 match self.strategy {
278 JacobianStrategy::FiniteDifference | JacobianStrategy::SparseFiniteDifference => {
279 // Compute function at current point (if not available)
280 let f_current = if let Some(f_val) = &self.f_eval {
281 f_val.clone()
282 } else {
283 f(t, y.view())
284 };
285
286 // Create or resize Jacobian if needed
287 let mut jac = if let Some(j) = &self.jacobian {
288 if j.shape() == [n, n] {
289 j.clone()
290 } else {
291 Array2::zeros((n, n))
292 }
293 } else {
294 Array2::zeros((n, n))
295 };
296
297 // Perturbation size, scaled by variable magnitude
298 let base_eps = F::from_f64(1e-8).unwrap();
299
300 if self.strategy == JacobianStrategy::SparseFiniteDifference {
301 // For sparse problems, use specialized algorithm (future work)
302 // This would involve using graph coloring to reduce evaluations
303 // For now, fall back to standard finite difference
304 self.compute_dense_finite_difference(t, y, &f_current, f, &mut jac, base_eps);
305 } else {
306 // Standard finite difference for each column
307 self.compute_dense_finite_difference(t, y, &f_current, f, &mut jac, base_eps);
308 }
309
310 // Apply scaling if needed (e.g., for implicit integration methods)
311 if scale_val != F::one() {
312 for i in 0..n {
313 for j in 0..n {
314 if i == j {
315 jac[[i, j]] = F::one() - scale_val * jac[[i, j]];
316 } else {
317 jac[[i, j]] = -scale_val * jac[[i, j]];
318 }
319 }
320 }
321 }
322
323 // Update state information
324 self.jacobian = Some(jac);
325 self.state_point = Some((t, y.clone()));
326 self.f_eval = Some(f_current);
327 self.age = 0;
328 self.factorized = false;
329
330 // Return reference to the jacobian
331 Ok(self.jacobian.as_ref().unwrap())
332 }
333 JacobianStrategy::ParallelFiniteDifference
334 | JacobianStrategy::ParallelSparseFiniteDifference => {
335 // Compute function at current point (if not available)
336 let f_current = if let Some(f_val) = &self.f_eval {
337 f_val.clone()
338 } else {
339 f(t, y.view())
340 };
341
342 // For parallel strategies, we need additional trait bounds (F: Send + Sync, Func: Sync).
343 // Since we can't guarantee them at compile time in this generic method, we'll use
344 // the serial implementation as a fallback. For parallel computation, use the
345 // update_jacobian_parallel method which has the proper trait bounds.
346 let jac =
347 finite_difference_jacobian(&f, t, y, &f_current, F::from_f64(1e-8).unwrap());
348
349 // Apply scaling if needed
350 let scaled_jac = if scale_val != F::one() {
351 let mut scaled = Array2::<F>::zeros((n, n));
352 for i in 0..n {
353 for j in 0..n {
354 if i == j {
355 scaled[[i, j]] = F::one() - scale_val * jac[[i, j]];
356 } else {
357 scaled[[i, j]] = -scale_val * jac[[i, j]];
358 }
359 }
360 }
361 scaled
362 } else {
363 jac
364 };
365
366 // Update state information
367 self.jacobian = Some(scaled_jac);
368 self.state_point = Some((t, y.clone()));
369 self.f_eval = Some(f_current);
370 self.age = 0;
371 self.factorized = false;
372
373 // Return reference to the jacobian
374 Ok(self.jacobian.as_ref().unwrap())
375 }
376 JacobianStrategy::BroydenUpdate => {
377 // Check if we need to do a full recomputation
378 if self.jacobian.is_none() || self.age >= self.max_age || self.state_point.is_none()
379 {
380 // Do a full computation using finite differences
381 return self.update_jacobian_with_strategy(
382 t,
383 y,
384 f,
385 scale,
386 JacobianStrategy::FiniteDifference,
387 );
388 }
389
390 // Perform a Broyden update to the existing Jacobian
391 let (_old_t, old_y) = self.state_point.as_ref().unwrap();
392 let old_f = self.f_eval.as_ref().unwrap();
393
394 // Calculate new function value
395 let new_f = f(t, y.view());
396
397 // Calculate differences
398 let delta_y = y - old_y;
399 let delta_f = &new_f - old_f;
400
401 // Get existing Jacobian
402 let mut jac = self.jacobian.as_ref().unwrap().clone();
403
404 // Broyden's update formula: J_new = J_old + (df - J_old * dy) * dy^T / (dy^T * dy)
405 let mut jac_dy = Array1::zeros(n);
406 for i in 0..n {
407 for j in 0..n {
408 jac_dy[i] += jac[[i, j]] * delta_y[j];
409 }
410 }
411
412 let dy_norm_squared: F = delta_y.iter().map(|&x| x * x).sum();
413 if dy_norm_squared > F::from_f64(1e-14).unwrap() {
414 for i in 0..n {
415 for j in 0..n {
416 jac[[i, j]] += (delta_f[i] - jac_dy[i]) * delta_y[j] / dy_norm_squared;
417 }
418 }
419 }
420
421 // Apply scaling if needed
422 if scale_val != F::one() {
423 for i in 0..n {
424 for j in 0..n {
425 if i == j {
426 jac[[i, j]] = F::one() - scale_val * jac[[i, j]];
427 } else {
428 jac[[i, j]] = -scale_val * jac[[i, j]];
429 }
430 }
431 }
432 }
433
434 // Update state information
435 self.jacobian = Some(jac);
436 self.state_point = Some((t, y.clone()));
437 self.f_eval = Some(new_f);
438 self.age += 1;
439 self.factorized = false;
440
441 Ok(self.jacobian.as_ref().unwrap())
442 }
443 JacobianStrategy::ModifiedNewton => {
444 // Check if we need to do a full recomputation
445 if self.jacobian.is_none() || self.age >= self.max_age {
446 // Compute a new Jacobian using finite differences
447 return self.update_jacobian_with_strategy(
448 t,
449 y,
450 f,
451 scale,
452 JacobianStrategy::FiniteDifference,
453 );
454 }
455
456 // Otherwise just reuse the existing Jacobian
457 self.age += 1;
458
459 // Update function evaluation at current point
460 let f_current = f(t, y.view());
461 self.f_eval = Some(f_current);
462
463 Ok(self.jacobian.as_ref().unwrap())
464 }
465 JacobianStrategy::ColoredFiniteDifference => {
466 // This is for future implementation - coloring would reduce the number of
467 // function evaluations needed for systems with structure
468 // For now, fall back to standard finite difference
469 self.update_jacobian_with_strategy(
470 t,
471 y,
472 f,
473 scale,
474 JacobianStrategy::FiniteDifference,
475 )
476 }
477 JacobianStrategy::AutoDiff => {
478 // Compute function at current point (if not available)
479 let f_current = if let Some(f_val) = &self.f_eval {
480 f_val.clone()
481 } else {
482 f(t, y.view())
483 };
484
485 // Use autodiff to compute exact Jacobian
486 let jac = autodiff_jacobian(f, t, y, &f_current, F::one())?;
487
488 // Apply scaling if needed
489 let scaled_jac = if scale_val != F::one() {
490 let mut scaled = Array2::<F>::zeros((n, n));
491 for i in 0..n {
492 for j in 0..n {
493 if i == j {
494 scaled[[i, j]] = F::one() - scale_val * jac[[i, j]];
495 } else {
496 scaled[[i, j]] = -scale_val * jac[[i, j]];
497 }
498 }
499 }
500 scaled
501 } else {
502 jac
503 };
504
505 // Update state information
506 self.jacobian = Some(scaled_jac);
507 self.state_point = Some((t, y.clone()));
508 self.f_eval = Some(f_current);
509 self.age = 0;
510 self.factorized = false;
511
512 Ok(self.jacobian.as_ref().unwrap())
513 }
514 JacobianStrategy::Adaptive => {
515 // For adaptive strategy, try autodiff first if available
516 #[cfg(feature = "autodiff")]
517 {
518 // Try using adaptive_jacobian which handles autodiff with proper bounds
519 let f_current = f(t, y.view());
520 self.jacobian = Some(adaptive_jacobian(
521 f,
522 t,
523 y,
524 &f_current,
525 scale.unwrap_or(F::one()),
526 )?);
527 }
528 #[cfg(not(feature = "autodiff"))]
529 {
530 // Use finite differences directly if autodiff is not available
531 let f_current = f(t, y.view());
532 self.jacobian = Some(finite_difference_jacobian(
533 f,
534 t,
535 y,
536 &f_current,
537 F::from(1e-8).unwrap(),
538 ));
539 }
540
541 self.age = 0;
542 self.factorized = false;
543
544 Ok(self.jacobian.as_ref().unwrap())
545 }
546 }
547 }
548
549 /// Helper function to switch strategy temporarily
550 fn update_jacobian_with_strategy<Func>(
551 &mut self,
552 t: F,
553 y: &Array1<F>,
554 f: &Func,
555 scale: Option<F>,
556 strategy: JacobianStrategy,
557 ) -> IntegrateResult<&Array2<F>>
558 where
559 Func: Fn(F, ArrayView1<F>) -> Array1<F> + Clone,
560 {
561 let original_strategy = self.strategy;
562 self.strategy = strategy;
563 self.update_jacobian(t, y, f, scale)?;
564 self.strategy = original_strategy;
565 Ok(self.jacobian.as_ref().unwrap())
566 }
567
568 /// Helper function to compute dense finite difference Jacobian
569 fn compute_dense_finite_difference<Func>(
570 &self,
571 t: F,
572 y: &Array1<F>,
573 f_current: &Array1<F>,
574 f: &Func,
575 jac: &mut Array2<F>,
576 base_eps: F,
577 ) where
578 Func: Fn(F, ArrayView1<F>) -> Array1<F>,
579 {
580 let n = y.len();
581
582 for j in 0..n {
583 // Compute perturbation size scaled by variable magnitude
584 let _eps = base_eps * (F::one() + y[j].abs()).max(F::one());
585
586 // Perturb the j-th component
587 let mut y_perturbed = y.clone();
588 y_perturbed[j] += _eps;
589
590 // Evaluate function at perturbed point
591 let f_perturbed = f(t, y_perturbed.view());
592
593 // Compute j-th column of Jacobian
594 for i in 0..n {
595 jac[[i, j]] = (f_perturbed[i] - f_current[i]) / _eps;
596 }
597 }
598 }
599
600 /// Get the current Jacobian (returns None if not computed)
601 pub fn jacobian(&mut self) -> Option<&Array2<F>> {
602 self.jacobian.as_ref()
603 }
604
605 /// Get the age of the current Jacobian
606 pub fn age(&mut self) -> usize {
607 self.age
608 }
609
610 /// Update the age threshold for recomputation
611 pub fn set_max_age(&mut self, _maxage: usize) {
612 self.max_age = _maxage;
613 }
614
615 /// Mark the Jacobian as factorized
616 pub fn mark_factorized(&mut self, factorized: bool) {
617 self.factorized = factorized;
618 }
619
620 /// Check if the Jacobian is factorized
621 pub fn is_factorized(&self) -> bool {
622 self.factorized
623 }
624
625 /// Compute or update the Jacobian matrix with parallel support
626 /// This version requires additional trait bounds for parallel computation
627 pub fn update_jacobian_parallel<Func>(
628 &mut self,
629 t: F,
630 y: &Array1<F>,
631 f: &Func,
632 scale: Option<F>,
633 ) -> IntegrateResult<&Array2<F>>
634 where
635 F: Send + Sync,
636 Func: Fn(F, ArrayView1<F>) -> Array1<F> + Clone + Sync,
637 {
638 let scale_val = scale.unwrap_or_else(|| F::from_f64(1.0).unwrap());
639 let n = y.len();
640
641 // Handle parallel strategies with proper trait bounds
642 match self.strategy {
643 JacobianStrategy::ParallelFiniteDifference => {
644 let f_current = if let Some(f_val) = &self.f_eval {
645 f_val.clone()
646 } else {
647 f(t, y.view())
648 };
649
650 let jac = parallel_finite_difference_jacobian(
651 &f,
652 t,
653 y,
654 &f_current,
655 F::from_f64(1e-8).unwrap(),
656 )?;
657
658 // Apply scaling if needed
659 let scaled_jac = if scale_val != F::one() {
660 let mut scaled = Array2::<F>::zeros((n, n));
661 for i in 0..n {
662 for j in 0..n {
663 if i == j {
664 scaled[[i, j]] = F::one() - scale_val * jac[[i, j]];
665 } else {
666 scaled[[i, j]] = -scale_val * jac[[i, j]];
667 }
668 }
669 }
670 scaled
671 } else {
672 jac
673 };
674
675 self.jacobian = Some(scaled_jac);
676 self.state_point = Some((t, y.clone()));
677 self.f_eval = Some(f_current);
678 self.age = 0;
679 self.factorized = false;
680
681 Ok(self.jacobian.as_ref().unwrap())
682 }
683 JacobianStrategy::ParallelSparseFiniteDifference => {
684 let f_current = if let Some(f_val) = &self.f_eval {
685 f_val.clone()
686 } else {
687 f(t, y.view())
688 };
689
690 let jac = parallel_sparse_jacobian(
691 &f,
692 t,
693 y,
694 &f_current,
695 None,
696 F::from_f64(1e-8).unwrap(),
697 )?;
698
699 // Apply scaling if needed
700 let scaled_jac = if scale_val != F::one() {
701 let mut scaled = Array2::<F>::zeros((n, n));
702 for i in 0..n {
703 for j in 0..n {
704 if i == j {
705 scaled[[i, j]] = F::one() - scale_val * jac[[i, j]];
706 } else {
707 scaled[[i, j]] = -scale_val * jac[[i, j]];
708 }
709 }
710 }
711 scaled
712 } else {
713 jac
714 };
715
716 self.jacobian = Some(scaled_jac);
717 self.state_point = Some((t, y.clone()));
718 self.f_eval = Some(f_current);
719 self.age = 0;
720 self.factorized = false;
721
722 Ok(self.jacobian.as_ref().unwrap())
723 }
724 _ => {
725 // For non-parallel strategies, use the regular method
726 self.update_jacobian(t, y, f, scale)
727 }
728 }
729 }
730}