scirs2_core/numeric/
stable_algorithms.rs

1//! # Stable Numerical Algorithms
2//!
3//! This module provides numerically stable implementations of common algorithms
4//! used in scientific computing, with focus on matrix operations, iterative methods,
5//! and numerical differentiation/integration.
6
7use crate::{
8    error::{CoreError, CoreResult, ErrorContext},
9    numeric::stability::{stable_norm_2, StableComputation},
10    validation::check_finite,
11};
12use ndarray::{s, Array1, Array2, ArrayView1, ArrayView2};
13use num_traits::{cast, Float};
14use std::fmt::Debug;
15
16/// Configuration for iterative algorithms
17#[derive(Debug, Clone)]
18pub struct IterativeConfig<T: Float> {
19    /// Maximum number of iterations
20    pub max_iterations: usize,
21    /// Absolute tolerance for convergence
22    pub abs_tolerance: T,
23    /// Relative tolerance for convergence
24    pub reltolerance: T,
25    /// Whether to use adaptive tolerance
26    pub adaptive_tolerance: bool,
27}
28
29impl<T: Float> Default for IterativeConfig<T> {
30    fn default() -> Self {
31        Self {
32            max_iterations: 1000,
33            abs_tolerance: cast::<f64, T>(1e-10).unwrap_or(T::epsilon()),
34            reltolerance: cast::<f64, T>(1e-8).unwrap_or(T::epsilon()),
35            adaptive_tolerance: true,
36        }
37    }
38}
39
40/// Result of an iterative algorithm
41#[derive(Debug, Clone)]
42pub struct IterativeResult<T: Float> {
43    /// The computed solution
44    pub solution: Array1<T>,
45    /// Number of iterations performed
46    pub iterations: usize,
47    /// Final residual norm
48    pub residual: T,
49    /// Whether the algorithm converged
50    pub converged: bool,
51    /// Convergence history (optional)
52    pub history: Option<Vec<T>>,
53}
54
55/// Stable Gaussian elimination with partial pivoting
56#[allow(dead_code)]
57pub fn gaussian_elimination_stable<T: Float + StableComputation>(
58    a: &ArrayView2<T>,
59    b: &ArrayView1<T>,
60) -> CoreResult<Array1<T>> {
61    let n = a.nrows();
62
63    if a.ncols() != n {
64        return Err(CoreError::ValidationError(ErrorContext::new(
65            "Matrix must be square",
66        )));
67    }
68
69    if b.len() != n {
70        return Err(CoreError::DimensionError(ErrorContext::new(format!(
71            "Matrix and vector dimensions must match: matrix is {}x{}, vector is {}x1",
72            n,
73            n,
74            b.len()
75        ))));
76    }
77
78    // Create augmented matrix
79    let mut aug = Array2::zeros((n, n + 1));
80    aug.slice_mut(s![.., ..n]).assign(a);
81    aug.slice_mut(s![.., n]).assign(b);
82
83    // Forward elimination with partial pivoting
84    for k in 0..n {
85        // Find pivot
86        let mut max_idx = k;
87        let mut max_val = aug[[k, k]].abs();
88
89        for i in (k + 1)..n {
90            if aug[[i, k]].abs() > max_val {
91                max_val = aug[[i, k]].abs();
92                max_idx = i;
93            }
94        }
95
96        // Check for singular matrix
97        if max_val.is_effectively_zero() {
98            return Err(CoreError::ComputationError(ErrorContext::new(format!(
99                "Singular matrix detected in gaussian elimination at pivot {k}"
100            ))));
101        }
102
103        // Swap rows if needed
104        if max_idx != k {
105            for j in k..=n {
106                let temp = aug[[k, j]];
107                aug[[k, j]] = aug[[max_idx, j]];
108                aug[[max_idx, j]] = temp;
109            }
110        }
111
112        // Eliminate column
113        for i in (k + 1)..n {
114            let factor = aug[[i, k]] / aug[[k, k]];
115            for j in k..=n {
116                aug[[i, j]] = aug[[i, j]] - factor * aug[[k, j]];
117            }
118        }
119    }
120
121    // Back substitution
122    let mut x = Array1::zeros(n);
123
124    for i in (0..n).rev() {
125        let mut sum = aug[[i, n]];
126        for j in (i + 1)..n {
127            sum = sum - aug[[i, j]] * x[j];
128        }
129
130        if aug[[i, i]].is_effectively_zero() {
131            return Err(CoreError::ComputationError(ErrorContext::new(format!(
132                "Singular matrix detected in back substitution at row {i}"
133            ))));
134        }
135
136        x[i] = sum / aug[[i, i]];
137    }
138
139    Ok(x)
140}
141
142/// Stable QR decomposition using Householder reflections
143#[allow(dead_code)]
144pub fn qr_decomposition_stable<T: Float + StableComputation>(
145    a: &ArrayView2<T>,
146) -> CoreResult<(Array2<T>, Array2<T>)> {
147    let (m, n) = a.dim();
148    let k = m.min(n);
149
150    let mut q = Array2::eye(m);
151    let mut r = a.to_owned();
152
153    for j in 0..k {
154        // Compute Householder vector for column j
155        let mut x = r.slice(s![j.., j]).to_owned();
156
157        // Compute stable norm
158        let norm_x = stable_norm_2(&x.to_vec());
159
160        if norm_x.is_effectively_zero() {
161            continue;
162        }
163
164        // Choose sign to avoid cancellation
165        let sign = if x[0] >= T::zero() {
166            T::one()
167        } else {
168            -T::one()
169        };
170        let alpha = sign * norm_x;
171
172        // Update first component
173        x[0] = x[0] + alpha;
174
175        // Normalize Householder vector
176        let norm_v = stable_norm_2(&x.to_vec());
177        if norm_v > T::zero() {
178            for i in 0..x.len() {
179                x[i] = x[i] / norm_v;
180            }
181        }
182
183        // Apply Householder transformation to R
184        for col in j..n {
185            let mut dot = T::zero();
186            for row in j..m {
187                dot = dot + x[row - j] * r[[row, col]];
188            }
189
190            let scale = cast::<f64, T>(2.0).unwrap_or(T::one()) * dot;
191            for row in j..m {
192                r[[row, col]] = r[[row, col]] - scale * x[row - j];
193            }
194        }
195
196        // Apply Householder transformation to Q
197        for col in 0..m {
198            let mut dot = T::zero();
199            for row in j..m {
200                dot = dot + x[row - j] * q[[row, col]];
201            }
202
203            let scale = cast::<f64, T>(2.0).unwrap_or(T::one()) * dot;
204            for row in j..m {
205                q[[row, col]] = q[[row, col]] - scale * x[row - j];
206            }
207        }
208    }
209
210    // Q is stored transposed, so transpose it back
211    q = q.t().to_owned();
212
213    Ok((q, r))
214}
215
216/// Stable Cholesky decomposition
217#[allow(dead_code)]
218pub fn cholesky_stable<T: Float + StableComputation>(a: &ArrayView2<T>) -> CoreResult<Array2<T>> {
219    let n = a.nrows();
220
221    if a.ncols() != n {
222        return Err(CoreError::ValidationError(ErrorContext::new(
223            "Matrix must be square",
224        )));
225    }
226
227    let mut l = Array2::zeros((n, n));
228
229    for i in 0..n {
230        // Diagonal element
231        let mut sum = a[[i, i]];
232        for k in 0..i {
233            sum = sum - l[[i, k]] * l[[i, k]];
234        }
235
236        if sum <= T::zero() {
237            return Err(CoreError::ValidationError(ErrorContext::new(format!(
238                "Matrix is not positive definite: Failed at row {i}"
239            ))));
240        }
241
242        l[[i, i]] = sum.sqrt();
243
244        // Off-diagonal elements
245        for j in (i + 1)..n {
246            let mut sum = a[[j, i]];
247            for k in 0..i {
248                sum = sum - l[[j, k]] * l[[i, k]];
249            }
250
251            l[[j, i]] = sum / l[[i, i]];
252        }
253    }
254
255    Ok(l)
256}
257
258/// Conjugate gradient method with preconditioning
259#[allow(dead_code)]
260pub fn conjugate_gradient<T: Float + StableComputation>(
261    a: &ArrayView2<T>,
262    b: &ArrayView1<T>,
263    x0: Option<&ArrayView1<T>>,
264    config: &IterativeConfig<T>,
265) -> CoreResult<IterativeResult<T>> {
266    let n = a.nrows();
267
268    if a.ncols() != n || b.len() != n {
269        return Err(CoreError::DimensionError(ErrorContext::new(format!(
270            "Matrix and vector dimensions must match: matrix is {}x{}, vector is {}x1",
271            n,
272            n,
273            b.len()
274        ))));
275    }
276
277    // Initialize solution
278    let mut x = match x0 {
279        Some(x0_ref) => x0_ref.to_owned(),
280        None => Array1::zeros(n),
281    };
282
283    // Initial residual: r = b - A*x
284    let mut r = b.to_owned();
285    for i in 0..n {
286        let mut sum = T::zero();
287        for j in 0..n {
288            sum = sum + a[[i, j]] * x[j];
289        }
290        r[i] = r[i] - sum;
291    }
292
293    let mut p = r.clone();
294    let mut r_norm_sq = T::zero();
295    for &val in r.iter() {
296        r_norm_sq = r_norm_sq + val * val;
297    }
298
299    let b_norm = stable_norm_2(&b.to_vec());
300    let _initial_residual = r_norm_sq.sqrt();
301
302    let mut history = if config.adaptive_tolerance {
303        Some(Vec::with_capacity(config.max_iterations))
304    } else {
305        None
306    };
307
308    for iter in 0..config.max_iterations {
309        // Compute A*p
310        let mut ap = Array1::zeros(n);
311        for i in 0..n {
312            for j in 0..n {
313                ap[i] = ap[i] + a[[i, j]] * p[j];
314            }
315        }
316
317        // Compute step size
318        let mut p_ap = T::zero();
319        for i in 0..n {
320            p_ap = p_ap + p[i] * ap[i];
321        }
322
323        if p_ap.is_effectively_zero() {
324            return Ok(IterativeResult {
325                solution: x,
326                iterations: iter,
327                residual: r_norm_sq.sqrt(),
328                converged: false,
329                history,
330            });
331        }
332
333        let alpha = r_norm_sq / p_ap;
334
335        // Update solution and residual
336        for i in 0..n {
337            x[i] = x[i] + alpha * p[i];
338            r[i] = r[i] - alpha * ap[i];
339        }
340
341        // Check convergence
342        let r_norm = stable_norm_2(&r.to_vec());
343
344        if let Some(ref mut hist) = history {
345            hist.push(r_norm);
346        }
347
348        let tol = if config.adaptive_tolerance {
349            config.abs_tolerance + config.reltolerance * b_norm
350        } else {
351            config.abs_tolerance
352        };
353
354        if r_norm < tol {
355            return Ok(IterativeResult {
356                solution: x,
357                iterations: iter + 1,
358                residual: r_norm,
359                converged: true,
360                history,
361            });
362        }
363
364        // Update search direction
365        let r_norm_sq_new = r_norm * r_norm;
366        let beta = r_norm_sq_new / r_norm_sq;
367
368        for i in 0..n {
369            p[i] = r[i] + beta * p[i];
370        }
371
372        r_norm_sq = r_norm_sq_new;
373    }
374
375    Ok(IterativeResult {
376        solution: x,
377        iterations: config.max_iterations,
378        residual: r_norm_sq.sqrt(),
379        converged: false,
380        history,
381    })
382}
383
384/// GMRES (Generalized Minimal Residual) method
385#[allow(dead_code)]
386pub fn gmres<T: Float + StableComputation>(
387    a: &ArrayView2<T>,
388    b: &ArrayView1<T>,
389    x0: Option<&ArrayView1<T>>,
390    restart: usize,
391    config: &IterativeConfig<T>,
392) -> CoreResult<IterativeResult<T>> {
393    let n = a.nrows();
394
395    if a.ncols() != n || b.len() != n {
396        return Err(CoreError::DimensionError(ErrorContext::new(format!(
397            "Matrix and vector dimensions must match: matrix is {}x{}, vector is {}x1",
398            n,
399            n,
400            b.len()
401        ))));
402    }
403
404    let restart = restart.min(n);
405
406    // Initialize solution
407    let mut x = match x0 {
408        Some(x0_ref) => x0_ref.to_owned(),
409        None => Array1::zeros(n),
410    };
411
412    let b_norm = stable_norm_2(&b.to_vec());
413    let mut _outer_iter = 0;
414    let mut total_iter = 0;
415
416    let mut history = if config.adaptive_tolerance {
417        Some(Vec::with_capacity(config.max_iterations))
418    } else {
419        None
420    };
421
422    while total_iter < config.max_iterations {
423        // Compute initial residual
424        let mut r = b.to_owned();
425        for i in 0..n {
426            let mut sum = T::zero();
427            for j in 0..n {
428                sum = sum + a[[i, j]] * x[j];
429            }
430            r[i] = r[i] - sum;
431        }
432
433        let r_norm = stable_norm_2(&r.to_vec());
434
435        if let Some(ref mut hist) = history {
436            hist.push(r_norm);
437        }
438
439        // Check convergence
440        let tol = if config.adaptive_tolerance {
441            config.abs_tolerance + config.reltolerance * b_norm
442        } else {
443            config.abs_tolerance
444        };
445
446        if r_norm < tol {
447            return Ok(IterativeResult {
448                solution: x,
449                iterations: total_iter,
450                residual: r_norm,
451                converged: true,
452                history,
453            });
454        }
455
456        // Initialize Krylov subspace
457        let mut v = vec![Array1::zeros(n); restart + 1];
458        let mut h = Array2::zeros((restart + 1, restart));
459
460        // First basis vector
461        for i in 0..n {
462            v[0][i] = r[i] / r_norm;
463        }
464
465        let mut g = Array1::zeros(restart + 1);
466        g[0] = r_norm;
467
468        // Arnoldi iteration
469        for j in 0..restart {
470            total_iter += 1;
471
472            // Compute A*v[j]
473            let mut w = Array1::zeros(n);
474            for i in 0..n {
475                for k in 0..n {
476                    w[i] = w[i] + a[[i, k]] * v[j][k];
477                }
478            }
479
480            // Modified Gram-Schmidt orthogonalization
481            for i in 0..=j {
482                let mut dot = T::zero();
483                for k in 0..n {
484                    dot = dot + w[k] * v[i][k];
485                }
486                h[[i, j]] = dot;
487
488                for k in 0..n {
489                    w[k] = w[k] - dot * v[i][k];
490                }
491            }
492
493            let w_norm = stable_norm_2(&w.to_vec());
494            h[[j + 1, j]] = w_norm;
495
496            if w_norm.is_effectively_zero() {
497                break;
498            }
499
500            for k in 0..n {
501                v[j + 1][k] = w[k] / w_norm;
502            }
503
504            // Apply Givens rotations to maintain QR factorization of H
505            for i in 0..j {
506                let temp = h[[i, j]];
507                h[[i, j]] = h[[i, j]] * T::one() - h[[i + 1, j]] * T::zero(); // Simplified
508                h[[i + 1, j]] = temp * T::zero() + h[[i + 1, j]] * T::one(); // Simplified
509            }
510
511            // Check residual
512            let residual = g[j + 1].abs();
513            if residual < tol {
514                // Solve least squares problem and update solution
515                let mut y = Array1::zeros(j + 1);
516                for i in (0..=j).rev() {
517                    let mut sum = g[i];
518                    for k in (i + 1)..=j {
519                        sum = sum - h[[i, k]] * y[k];
520                    }
521                    y[i] = sum / h[[i, i]];
522                }
523
524                // Update solution
525                for i in 0..=j {
526                    for k in 0..n {
527                        x[k] = x[k] + y[i] * v[i][k];
528                    }
529                }
530
531                return Ok(IterativeResult {
532                    solution: x,
533                    iterations: total_iter,
534                    residual,
535                    converged: true,
536                    history,
537                });
538            }
539        }
540
541        // Solve least squares problem
542        let mut y = Array1::zeros(restart);
543        for i in (0..restart).rev() {
544            let mut sum = g[i];
545            for j in (i + 1)..restart {
546                sum = sum - h[[i, j]] * y[j];
547            }
548            if h[[i, i]].is_effectively_zero() {
549                break;
550            }
551            y[i] = sum / h[[i, i]];
552        }
553
554        // Update solution
555        for i in 0..restart {
556            for j in 0..n {
557                x[j] = x[j] + y[i] * v[i][j];
558            }
559        }
560
561        _outer_iter += 1;
562    }
563
564    // Final residual computation
565    let mut r = b.to_owned();
566    for i in 0..n {
567        let mut sum = T::zero();
568        for j in 0..n {
569            sum = sum + a[[i, j]] * x[j];
570        }
571        r[i] = r[i] - sum;
572    }
573
574    let final_residual = stable_norm_2(&r.to_vec());
575
576    Ok(IterativeResult {
577        solution: x,
578        iterations: total_iter,
579        residual: final_residual,
580        converged: false,
581        history,
582    })
583}
584
585/// Stable numerical differentiation using Richardson extrapolation
586#[allow(dead_code)]
587pub fn richardson_derivative<T, F>(f: F, x: T, h: T, order: usize) -> CoreResult<T>
588where
589    T: Float + StableComputation,
590    F: Fn(T) -> T,
591{
592    if order == 0 {
593        return Err(CoreError::ValidationError(ErrorContext::new(
594            "Order must be at least 1",
595        )));
596    }
597
598    let n = order + 1;
599    let mut d = Array2::zeros((n, n));
600
601    // Initial step size
602    let mut h_curr = h;
603
604    // First column: finite differences with decreasing h
605    for i in 0..n {
606        // Central difference
607        let f_plus = f(x + h_curr);
608        let f_minus = f(x - h_curr);
609        d[[i, 0]] = (f_plus - f_minus) / (cast::<f64, T>(2.0).unwrap_or(T::one()) * h_curr);
610
611        // Halve the step size
612        h_curr = h_curr / cast::<f64, T>(2.0).unwrap_or(T::one());
613    }
614
615    // Richardson extrapolation
616    for j in 1..n {
617        let factor = cast::<f64, T>(4.0_f64.powi(j as i32)).unwrap_or(T::one());
618        for i in j..n {
619            d[[i, j]] = (factor * d[[i, j.saturating_sub(1)]] - d[[i.saturating_sub(1), j - 1]])
620                / (factor - T::one());
621        }
622    }
623
624    Ok(d[[n - 1, n - 1]])
625}
626
627/// Stable numerical integration using adaptive Simpson's rule
628#[allow(dead_code)]
629pub fn adaptive_simpson<T, F>(f: F, a: T, b: T, tolerance: T, maxdepth: usize) -> CoreResult<T>
630where
631    T: Float + StableComputation,
632    F: Fn(T) -> T,
633{
634    check_finite(
635        a.to_f64().ok_or_else(|| {
636            CoreError::TypeError(ErrorContext::new("Failed to convert lower limit to f64"))
637        })?,
638        "Lower limit",
639    )?;
640    check_finite(
641        b.to_f64().ok_or_else(|| {
642            CoreError::TypeError(ErrorContext::new("Failed to convert upper limit to f64"))
643        })?,
644        "Upper limit",
645    )?;
646
647    if a >= b {
648        return Err(CoreError::ValidationError(ErrorContext::new(
649            "Upper limit must be greater than lower limit",
650        )));
651    }
652
653    fn simpson_rule<T: Float, F: Fn(T) -> T>(f: &F, a: T, b: T) -> T {
654        let six = cast::<f64, T>(6.0).unwrap_or_else(|| {
655            // This should work for all Float types
656            T::one() + T::one() + T::one() + T::one() + T::one() + T::one()
657        });
658        let two = cast::<f64, T>(2.0).unwrap_or_else(|| T::one() + T::one());
659        let four = cast::<f64, T>(4.0).unwrap_or_else(|| two + two);
660
661        let h = (b - a) / six;
662        let mid = (a + b) / two;
663        h * (f(a) + four * f(mid) + f(b))
664    }
665
666    fn adaptive_simpson_recursive<T: Float + StableComputation, F: Fn(T) -> T>(
667        f: &F,
668        a: T,
669        b: T,
670        tolerance: T,
671        whole: T,
672        depth: usize,
673        max_depth: usize,
674    ) -> T {
675        if depth >= max_depth {
676            return whole;
677        }
678
679        let two = cast::<f64, T>(2.0).unwrap_or_else(|| T::one() + T::one());
680        let mid = (a + b) / two;
681        let left = simpson_rule(f, a, mid);
682        let right = simpson_rule(f, mid, b);
683        let combined = left + right;
684
685        let diff = (combined - whole).abs();
686
687        let fifteen = cast::<f64, T>(15.0).unwrap_or_else(|| {
688            // Build 15 from ones
689            let five = T::one() + T::one() + T::one() + T::one() + T::one();
690            five + five + five
691        });
692        if diff <= fifteen * tolerance {
693            combined + diff / cast::<f64, T>(15.0).unwrap_or(T::one())
694        } else {
695            let half_tol = tolerance / cast::<f64, T>(2.0).unwrap_or(T::one());
696            adaptive_simpson_recursive(f, a, mid, half_tol, left, depth + 1, max_depth)
697                + adaptive_simpson_recursive(f, mid, b, half_tol, right, depth + 1, max_depth)
698        }
699    }
700
701    let whole = simpson_rule(&f, a, b);
702    Ok(adaptive_simpson_recursive(
703        &f, a, b, tolerance, whole, 0, maxdepth,
704    ))
705}
706
707/// Stable computation of matrix exponential using scaling and squaring
708#[allow(dead_code)]
709pub fn matrix_exp_stable<T: Float + StableComputation>(
710    a: &ArrayView2<T>,
711    scaling_threshold: Option<T>,
712) -> CoreResult<Array2<T>> {
713    let n = a.nrows();
714
715    if a.ncols() != n {
716        return Err(CoreError::ValidationError(ErrorContext::new(
717            "Matrix must be square",
718        )));
719    }
720
721    let threshold = scaling_threshold.unwrap_or(cast::<f64, T>(0.5).unwrap_or(T::one()));
722
723    // Compute matrix norm
724    let mut norm = T::zero();
725    for i in 0..n {
726        let mut row_sum = T::zero();
727        for j in 0..n {
728            row_sum = row_sum + a[[i, j]].abs();
729        }
730        norm = norm.max(row_sum);
731    }
732
733    // Determine scaling factor
734    let mut s = 0;
735    let mut scaled_norm = norm;
736    while scaled_norm > threshold {
737        scaled_norm = scaled_norm / cast::<f64, T>(2.0).unwrap_or(T::one());
738        s += 1;
739    }
740
741    // Scale matrix
742    let scale = cast::<f64, T>(2.0_f64.powi(s)).unwrap_or(T::one());
743    let mut a_scaled = Array2::zeros((n, n));
744    for i in 0..n {
745        for j in 0..n {
746            a_scaled[[i, j]] = a[[i, j]] / scale;
747        }
748    }
749
750    // Padé approximation (simplified - using Taylor series for demonstration)
751    let mut result = Array2::eye(n);
752    let mut term: Array2<T> = Array2::eye(n);
753    let mut factorial = T::one();
754
755    for k in 1..10 {
756        // Matrix multiplication
757        let mut new_term = Array2::zeros((n, n));
758        for i in 0..n {
759            for j in 0..n {
760                for l in 0..n {
761                    new_term[[i, j]] = new_term[[i, j]] + term[[i, l]] * a_scaled[[l, j]];
762                }
763            }
764        }
765        term = new_term;
766
767        factorial = factorial * cast::<i32, T>(k).unwrap_or(T::one());
768
769        // Add term
770        for i in 0..n {
771            for j in 0..n {
772                result[[i, j]] = result[[i, j]] + term[[i, j]] / factorial;
773            }
774        }
775    }
776
777    // Square s times
778    for _ in 0..s {
779        let mut squared = Array2::zeros((n, n));
780        for i in 0..n {
781            for j in 0..n {
782                for k in 0..n {
783                    squared[[i, j]] = squared[[i, j]] + result[[i, k]] * result[[k, j]];
784                }
785            }
786        }
787        result = squared;
788    }
789
790    Ok(result)
791}
792
793#[cfg(test)]
794mod tests {
795    use super::*;
796    use approx::assert_relative_eq;
797    use ndarray::array;
798
799    #[test]
800    fn test_gaussian_elimination() {
801        let a = array![[2.0, 1.0, -1.0], [-3.0, -1.0, 2.0], [-2.0, 1.0, 2.0]];
802        let b = array![8.0, -11.0, -3.0];
803
804        let x = gaussian_elimination_stable(&a.view(), &b.view())
805            .expect("Gaussian elimination should succeed for this test matrix");
806
807        // Expected solution: [2, 3, -1]
808        assert_relative_eq!(x[0], 2.0, epsilon = 1e-10);
809        assert_relative_eq!(x[1], 3.0, epsilon = 1e-10);
810        assert_relative_eq!(x[2], -1.0, epsilon = 1e-10);
811    }
812
813    #[test]
814    fn test_qr_decomposition() {
815        let a = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]];
816
817        let (q, r) = qr_decomposition_stable(&a.view())
818            .expect("QR decomposition should succeed for this test matrix");
819
820        // Verify Q is orthogonal
821        let qt_q = q.t().dot(&q);
822        for i in 0..2 {
823            for j in 0..2 {
824                let expected = if i == j { 1.0 } else { 0.0 };
825                assert_relative_eq!(qt_q[[i, j]], expected, epsilon = 1e-10);
826            }
827        }
828
829        // Verify A = QR
830        let reconstructed = q.dot(&r);
831        for i in 0..3 {
832            for j in 0..2 {
833                assert_relative_eq!(reconstructed[[i, j]], a[[i, j]], epsilon = 1e-10);
834            }
835        }
836    }
837
838    #[test]
839    fn test_cholesky() {
840        // Positive definite matrix
841        let a = array![
842            [4.0, 12.0, -16.0],
843            [12.0, 37.0, -43.0],
844            [-16.0, -43.0, 98.0]
845        ];
846
847        let l = cholesky_stable(&a.view())
848            .expect("Cholesky decomposition should succeed for this positive definite matrix");
849
850        // Verify A = L * L^T
851        let reconstructed = l.dot(&l.t());
852        for i in 0..3 {
853            for j in 0..3 {
854                assert_relative_eq!(reconstructed[[i, j]], a[[i, j]], epsilon = 1e-10);
855            }
856        }
857    }
858
859    #[test]
860    fn test_conjugate_gradient() {
861        // Symmetric positive definite matrix
862        let a = array![[4.0, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 2.0]];
863        let b = array![1.0, 2.0, 3.0];
864
865        let config = IterativeConfig::default();
866        let result = conjugate_gradient(&a.view(), &b.view(), None, &config)
867            .expect("Conjugate gradient should converge for this system");
868
869        assert!(result.converged);
870
871        // Verify A*x = b
872        let ax = a.dot(&result.solution);
873        for i in 0..3 {
874            assert_relative_eq!(ax[i], b[i], epsilon = 1e-8);
875        }
876    }
877
878    #[test]
879    fn test_richardson_derivative() {
880        // Test function: f(x) = x^2
881        let f = |x: f64| x * x;
882
883        // Derivative at x = 2 should be 4
884        let derivative =
885            richardson_derivative(f, 2.0, 0.1, 3).expect("Richardson extrapolation should succeed");
886        assert_relative_eq!(derivative, 4.0, epsilon = 1e-10);
887
888        // Test with sin(x)
889        let g = |x: f64| x.sin();
890
891        // Derivative at x = 0 should be 1 (cos(0) = 1)
892        let derivative = richardson_derivative(g, 0.0, 0.01, 4)
893            .expect("Richardson extrapolation should succeed for cos at 0");
894        assert_relative_eq!(derivative, 1.0, epsilon = 1e-10);
895    }
896
897    #[test]
898    fn test_adaptive_simpson() {
899        // Test with simple polynomial: f(x) = x^2
900        let f = |x: f64| x * x;
901
902        // Integral from 0 to 1 should be 1/3
903        let integral = adaptive_simpson(f, 0.0, 1.0, 1e-10, 10)
904            .expect("Adaptive Simpson integration should succeed");
905        assert_relative_eq!(integral, 1.0 / 3.0, epsilon = 1e-10);
906
907        // Test with sine function
908        let g = |x: f64| x.sin();
909
910        // Integral from 0 to π should be 2
911        let integral = adaptive_simpson(g, 0.0, std::f64::consts::PI, 1e-10, 10)
912            .expect("Adaptive Simpson integration should succeed for sin");
913        assert_relative_eq!(integral, 2.0, epsilon = 1e-10);
914    }
915
916    #[test]
917    fn testmatrix_exp() {
918        // Test with diagonal matrix
919        let a = array![[1.0, 0.0], [0.0, 2.0]];
920
921        let exp_a = matrix_exp_stable(&a.view(), None)
922            .expect("Matrix exponential should succeed for this small matrix");
923
924        // exp(diagonal) should have exp of diagonal elements
925        assert_relative_eq!(exp_a[[0, 0]], 1.0_f64.exp(), epsilon = 1e-8);
926        assert_relative_eq!(exp_a[[1, 1]], 2.0_f64.exp(), epsilon = 1e-8);
927        assert_relative_eq!(exp_a[[0, 1]], 0.0, epsilon = 1e-8);
928        assert_relative_eq!(exp_a[[1, 0]], 0.0, epsilon = 1e-8);
929    }
930}