differential_equations/methods/irk/adaptive/
ordinary.rs

1//! Adaptive Runge-Kutta methods for ODEs
2
3use super::{ImplicitRungeKutta, Ordinary, Adaptive};
4use crate::{
5    Error, Status,
6    alias::Evals,
7    methods::h_init::InitialStepSize,
8    interpolate::{Interpolation, cubic_hermite_interpolate},
9    ode::{OrdinaryNumericalMethod, ODE},
10    traits::{CallBackData, Real, State},
11    utils::{constrain_step_size, validate_step_size_parameters},
12};
13
14impl<T: Real, V: State<T>, D: CallBackData, const O: usize, const S: usize, const I: usize> OrdinaryNumericalMethod<T, V, D> for ImplicitRungeKutta<Ordinary, Adaptive, T, V, D, O, S, I> {
15    fn init<F>(&mut self, ode: &F, t0: T, tf: T, y0: &V) -> Result<Evals, Error<T, V>>
16    where
17        F: ODE<T, V, D>,
18    {
19        let mut evals = Evals::new();        
20        
21        // If h0 is zero, calculate initial step size
22        if self.h0 == T::zero() {
23            // Use adaptive step size calculation for implicit methods
24            self.h0 = InitialStepSize::<Ordinary>::compute(ode, t0, tf, y0, self.order, self.rtol, self.atol, self.h_min, self.h_max, &mut evals);
25        }
26
27        // Check bounds
28        match validate_step_size_parameters::<T, V, D>(self.h0, self.h_min, self.h_max, t0, tf) {
29            Ok(h0) => self.h = h0,
30            Err(status) => return Err(status),
31        }
32        
33        // Initialize Statistics
34        self.stiffness_counter = 0;
35        self.newton_iterations = 0;
36        self.jacobian_evaluations = 0;
37        self.lu_decompositions = 0;
38
39        // Initialize State
40        self.t = t0;
41        self.y = *y0;
42        ode.diff(self.t, &self.y, &mut self.dydt);
43        evals.fcn += 1;
44
45        // Initialize previous state
46        self.t_prev = self.t;
47        self.y_prev = self.y;
48        self.dydt_prev = self.dydt;
49
50        // Initialize linear algebra workspace with proper dimensions
51        let dim = y0.len();
52        let newton_system_size = self.stages * dim;
53        self.jacobian_matrix = nalgebra::DMatrix::zeros(dim, dim);
54        self.newton_matrix = nalgebra::DMatrix::zeros(newton_system_size, newton_system_size);
55        self.rhs_newton = nalgebra::DVector::zeros(newton_system_size);
56        self.delta_k_vec = nalgebra::DVector::zeros(newton_system_size);
57        self.jacobian_age = 0;
58
59        // Initialize Status
60        self.status = Status::Initialized;
61
62        Ok(evals)
63    }
64
65    fn step<F>(&mut self, ode: &F) -> Result<Evals, Error<T, V>>
66    where
67        F: ODE<T, V, D>,
68    {
69        let mut evals = Evals::new();
70
71        // Check step size
72        if self.h.abs() < self.h_prev.abs() * T::from_f64(1e-14).unwrap() {
73            self.status = Status::Error(Error::StepSize {
74                t: self.t, y: self.y
75            });
76            return Err(Error::StepSize {
77                t: self.t, y: self.y
78            });
79        }
80
81        // Check max steps
82        if self.steps >= self.max_steps {
83            self.status = Status::Error(Error::MaxSteps {
84                t: self.t, y: self.y
85            });
86            return Err(Error::MaxSteps {
87                t: self.t, y: self.y
88            });
89        }
90        self.steps += 1;
91
92        // Initial guess for stage values (predictor step)
93        for i in 0..self.stages {
94            self.y_stages[i] = self.y;
95        }        
96        
97        // Newton iteration to solve the implicit system
98        let mut newton_converged = false;
99        let mut newton_iter = 0;
100        let dim = self.y.len();        
101        
102        // Compute Jacobian using the ODE trait's jacobian method
103        ode.jacobian(self.t, &self.y, &mut self.jacobian_matrix);
104        evals.jac += 1;
105
106        while !newton_converged && newton_iter < self.max_newton_iter {
107            newton_iter += 1;
108            self.newton_iterations += 1;
109
110            // Compute stage derivatives with current stage values
111            for i in 0..self.stages {
112                ode.diff(self.t + self.c[i] * self.h, &self.y_stages[i], &mut self.k[i]);
113            }
114            evals.fcn += self.stages;
115
116            // Compute residual phi(k) = k_i - f(t_n + c_i*h, Y_i)
117            // where Y_i = y_n + h * sum(a_ij * k_j)
118            for i in 0..self.stages {
119                // First, compute Y_i from current k values
120                self.y_stages[i] = self.y;
121                for j in 0..self.stages {
122                    self.y_stages[i] += self.k[j] * (self.a[i][j] * self.h);
123                }
124                
125                // Evaluate f at Y_i
126                let mut f_at_stage = V::zeros();
127                ode.diff(self.t + self.c[i] * self.h, &self.y_stages[i], &mut f_at_stage);
128                evals.fcn += 1;
129                  
130                // Compute residual: phi_i = k_i - f(t_n + c_i*h, Y_i)
131                // Store -phi_i in rhs_newton for the Newton system
132                for row_idx in 0..dim {
133                    self.rhs_newton[i * dim + row_idx] = f_at_stage.get(row_idx) - self.k[i].get(row_idx);
134                }
135            }            
136            
137            // Form Newton matrix M = I - h * (A ⊗ J)
138            // where A is the Runge-Kutta matrix and J is the Jacobian
139            for i in 0..self.stages {
140                for j in 0..self.stages {
141                    let scale_factor = -self.h * self.a[i][j];
142                    for r in 0..dim {
143                        for c_col in 0..dim {
144                            self.newton_matrix[(i * dim + r, j * dim + c_col)] = 
145                                self.jacobian_matrix[(r, c_col)] * scale_factor;
146                        }
147                    }
148                    
149                    // Add identity for diagonal blocks
150                    if i == j {
151                        for d_idx in 0..dim {
152                            self.newton_matrix[(i * dim + d_idx, j * dim + d_idx)] += T::one();
153                        }
154                    }
155                }
156            }            
157            
158            // Solve M * delta_k = rhs_newton
159            let lu_decomp = nalgebra::LU::new(self.newton_matrix.clone());
160            if let Some(solution) = lu_decomp.solve(&self.rhs_newton) {
161                self.delta_k_vec.copy_from(&solution);
162            } else {
163                // LU decomposition failed - matrix is singular
164                newton_converged = false;
165                break;
166            }            
167            
168            // Update k values: k_i += delta_k_i
169            let mut norm_delta_k_sq = T::zero();
170            for i in 0..self.stages {
171                for row_idx in 0..dim {
172                    let delta_val = self.delta_k_vec[i * dim + row_idx];
173                    let current_val = self.k[i].get(row_idx);
174                    self.k[i].set(row_idx, current_val + delta_val);
175                    norm_delta_k_sq += delta_val * delta_val;
176                }
177            }
178
179            // Check convergence: ||delta_k|| < tolerance
180            if norm_delta_k_sq < self.newton_tol * self.newton_tol {
181                newton_converged = true;
182            }
183        }        
184        
185        // Check if Newton iteration failed to converge
186        if !newton_converged {
187            // Reduce step size and try again
188            self.h *= T::from_f64(0.25).unwrap();
189            self.h = constrain_step_size(self.h, self.h_min, self.h_max);
190            self.status = Status::RejectedStep;
191            self.stiffness_counter += 1;
192
193            if self.stiffness_counter >= self.max_rejects {
194                self.status = Status::Error(Error::Stiffness {
195                    t: self.t,
196                    y: self.y,
197                });
198                return Err(Error::Stiffness {
199                    t: self.t,
200                    y: self.y,
201                });
202            }
203            return Ok(evals);
204        }
205
206        // Final evaluation of stage derivatives with converged values
207        for i in 0..self.stages {
208            // Recompute Y_i with final k values
209            self.y_stages[i] = self.y;
210            for j in 0..self.stages {
211                self.y_stages[i] += self.k[j] * (self.a[i][j] * self.h);
212            }
213            ode.diff(self.t + self.c[i] * self.h, &self.y_stages[i], &mut self.k[i]);
214        }
215        evals.fcn += self.stages;        
216        
217        // For adaptive methods with error estimation
218        // Compute higher order solution
219        let mut y_high = self.y;
220        for i in 0..self.stages {
221            y_high += self.k[i] * (self.b[i] * self.h);
222        }
223
224        // Compute lower order solution for error estimation
225        let mut y_low = self.y;
226        if let Some(bh) = &self.bh {
227            for i in 0..self.stages {
228                y_low += self.k[i] * (bh[i] * self.h);
229            }
230        }
231
232        // Compute error estimate
233        let err = y_high - y_low;
234
235        // Calculate error norm with proper scaling
236        let mut err_norm: T = T::zero();
237        for n in 0..self.y.len() {
238            let scale = self.atol + self.rtol * self.y.get(n).abs().max(y_high.get(n).abs());
239            if scale > T::zero() {
240                err_norm = err_norm.max((err.get(n) / scale).abs());
241            }
242        }
243        
244        // Ensure error norm is not too small (add a small minimum)
245        err_norm = err_norm.max(T::default_epsilon() * T::from_f64(100.0).unwrap());
246
247        // Step size scale factor
248        let order = T::from_usize(self.order).unwrap();
249        let error_exponent = T::one() / order;
250        let mut scale = self.safety_factor * err_norm.powf(-error_exponent);
251        
252        // Clamp scale factor to prevent extreme step size changes
253        scale = scale.max(self.min_scale).min(self.max_scale);
254        
255        // Determine if step is accepted
256        if err_norm <= T::one() {
257            // Step accepted
258            self.status = Status::Solving;
259
260            // Log previous state
261            self.t_prev = self.t;
262            self.y_prev = self.y;
263            self.dydt_prev = self.dydt;
264            self.h_prev = self.h;
265
266            // Update state with the higher-order solution
267            self.t += self.h;
268            self.y = y_high;
269
270            // Compute the derivative for the next step
271            ode.diff(self.t, &self.y, &mut self.dydt);
272            evals.fcn += 1;
273
274            // Reset stiffness counter if we had rejections before
275            if let Status::RejectedStep = self.status {
276                self.stiffness_counter = 0;
277
278                // Limit step size growth to avoid oscillations between accepted and rejected steps
279                scale = scale.min(T::one());
280            }
281        } else {
282            // Step rejected
283            self.status = Status::RejectedStep;
284            self.stiffness_counter += 1;
285
286            // Check for excessive rejections
287            if self.stiffness_counter >= self.max_rejects {
288                self.status = Status::Error(Error::Stiffness {
289                    t: self.t, y: self.y
290                });
291                return Err(Error::Stiffness {
292                    t: self.t, y: self.y
293                });
294            }
295        }        
296
297        // Update step size
298        self.h *= scale;
299        
300        // Apply the new step size
301        self.h = constrain_step_size(self.h, self.h_min, self.h_max);
302
303        Ok(evals)
304    }
305
306    fn t(&self) -> T { self.t }
307    fn y(&self) -> &V { &self.y }
308    fn t_prev(&self) -> T { self.t_prev }
309    fn y_prev(&self) -> &V { &self.y_prev }
310    fn h(&self) -> T { self.h }
311    fn set_h(&mut self, h: T) { self.h = h; }
312    fn status(&self) -> &Status<T, V, D> { &self.status }
313    fn set_status(&mut self, status: Status<T, V, D>) { self.status = status; }
314}
315
316impl<T: Real, V: State<T>, D: CallBackData, const O: usize, const S: usize, const I: usize> Interpolation<T, V> for ImplicitRungeKutta<Ordinary, Adaptive, T, V, D, O, S, I> {
317    fn interpolate(&mut self, t_interp: T) -> Result<V, Error<T, V>> {
318        // Check if t is within bounds
319        if t_interp < self.t_prev || t_interp > self.t {
320            return Err(Error::OutOfBounds {
321                t_interp,
322                t_prev: self.t_prev,
323                t_curr: self.t
324            });
325        }
326
327        // Otherwise use cubic Hermite interpolation
328        let y_interp = cubic_hermite_interpolate(
329            self.t_prev, 
330            self.t, 
331            &self.y_prev, 
332            &self.y, 
333            &self.dydt_prev, 
334            &self.dydt, 
335            t_interp
336        );
337
338        Ok(y_interp)
339    }
340}