sklears_kernel_approximation/
numerical_stability.rs

1//! Numerical Stability Enhancements for Kernel Approximation Methods
2//!
3//! This module provides tools for monitoring and improving numerical stability
4//! including condition number monitoring, overflow/underflow protection,
5//! and numerically stable algorithms.
6
7use scirs2_core::ndarray::{Array1, Array2, Axis};
8use scirs2_core::random::{thread_rng, Rng};
9use sklears_core::prelude::{Result, SklearsError};
10
11/// Numerical stability monitor for kernel approximation methods
12#[derive(Debug, Clone)]
13/// NumericalStabilityMonitor
14pub struct NumericalStabilityMonitor {
15    config: StabilityConfig,
16    metrics: StabilityMetrics,
17    warnings: Vec<StabilityWarning>,
18}
19
20/// Configuration for numerical stability monitoring
21#[derive(Debug, Clone)]
22/// StabilityConfig
23pub struct StabilityConfig {
24    /// Maximum allowed condition number
25    pub max_condition_number: f64,
26
27    /// Minimum eigenvalue threshold
28    pub min_eigenvalue: f64,
29
30    /// Maximum eigenvalue threshold
31    pub max_eigenvalue: f64,
32
33    /// Tolerance for numerical precision
34    pub numerical_tolerance: f64,
35
36    /// Enable overflow/underflow protection
37    pub enable_overflow_protection: bool,
38
39    /// Enable high-precision arithmetic when needed
40    pub enable_high_precision: bool,
41
42    /// Regularization parameter for ill-conditioned matrices
43    pub regularization: f64,
44}
45
46impl Default for StabilityConfig {
47    fn default() -> Self {
48        Self {
49            max_condition_number: 1e12,
50            min_eigenvalue: 1e-12,
51            max_eigenvalue: 1e12,
52            numerical_tolerance: 1e-12,
53            enable_overflow_protection: true,
54            enable_high_precision: false,
55            regularization: 1e-8,
56        }
57    }
58}
59
60/// Numerical stability metrics
61#[derive(Debug, Clone, Default)]
62/// StabilityMetrics
63pub struct StabilityMetrics {
64    /// Condition numbers of matrices encountered
65    pub condition_numbers: Vec<f64>,
66
67    /// Eigenvalue ranges
68    pub eigenvalue_ranges: Vec<(f64, f64)>,
69
70    /// Numerical errors detected
71    pub numerical_errors: Vec<f64>,
72
73    /// Matrix ranks
74    pub matrix_ranks: Vec<usize>,
75
76    /// Overflow/underflow occurrences
77    pub overflow_count: usize,
78    /// underflow_count
79    pub underflow_count: usize,
80
81    /// Precision loss estimates
82    pub precision_loss: Vec<f64>,
83}
84
85/// Types of numerical stability warnings
86#[derive(Debug, Clone)]
87/// StabilityWarning
88pub enum StabilityWarning {
89    /// High condition number detected
90    HighConditionNumber {
91        condition_number: f64,
92
93        location: String,
94    },
95
96    /// Near-singular matrix detected
97    NearSingular {
98        smallest_eigenvalue: f64,
99
100        location: String,
101    },
102
103    /// Overflow detected
104    Overflow { value: f64, location: String },
105
106    /// Underflow detected
107    Underflow { value: f64, location: String },
108
109    /// Significant precision loss
110    PrecisionLoss {
111        estimated_loss: f64,
112        location: String,
113    },
114
115    /// Rank deficiency detected
116    RankDeficient {
117        expected_rank: usize,
118        actual_rank: usize,
119        location: String,
120    },
121}
122
123impl NumericalStabilityMonitor {
124    /// Create a new stability monitor
125    pub fn new(config: StabilityConfig) -> Self {
126        Self {
127            config,
128            metrics: StabilityMetrics::default(),
129            warnings: Vec::new(),
130        }
131    }
132
133    /// Monitor matrix for numerical stability issues
134    pub fn monitor_matrix(&mut self, matrix: &Array2<f64>, location: &str) -> Result<()> {
135        // Check for NaN and infinite values
136        self.check_finite_values(matrix, location)?;
137
138        // Compute and monitor condition number
139        let condition_number = self.estimate_condition_number(matrix)?;
140        self.metrics.condition_numbers.push(condition_number);
141
142        if condition_number > self.config.max_condition_number {
143            self.warnings.push(StabilityWarning::HighConditionNumber {
144                condition_number,
145                location: location.to_string(),
146            });
147        }
148
149        // Check eigenvalues if matrix is square
150        if matrix.nrows() == matrix.ncols() {
151            let eigenvalues = self.estimate_eigenvalues(matrix)?;
152            let min_eigenval = eigenvalues.iter().fold(f64::INFINITY, |acc, &x| acc.min(x));
153            let max_eigenval = eigenvalues
154                .iter()
155                .fold(f64::NEG_INFINITY, |acc, &x| acc.max(x));
156
157            self.metrics
158                .eigenvalue_ranges
159                .push((min_eigenval, max_eigenval));
160
161            if min_eigenval.abs() < self.config.min_eigenvalue {
162                self.warnings.push(StabilityWarning::NearSingular {
163                    smallest_eigenvalue: min_eigenval,
164                    location: location.to_string(),
165                });
166            }
167        }
168
169        // Estimate matrix rank
170        let rank = self.estimate_rank(matrix)?;
171        self.metrics.matrix_ranks.push(rank);
172
173        let expected_rank = matrix.nrows().min(matrix.ncols());
174        if rank < expected_rank {
175            self.warnings.push(StabilityWarning::RankDeficient {
176                expected_rank,
177                actual_rank: rank,
178                location: location.to_string(),
179            });
180        }
181
182        Ok(())
183    }
184
185    /// Apply numerical stabilization to a matrix
186    pub fn stabilize_matrix(&mut self, matrix: &mut Array2<f64>) -> Result<()> {
187        // Apply regularization for ill-conditioned matrices
188        if matrix.nrows() == matrix.ncols() {
189            for i in 0..matrix.nrows() {
190                matrix[[i, i]] += self.config.regularization;
191            }
192        }
193
194        // Clamp extreme values if overflow protection is enabled
195        if self.config.enable_overflow_protection {
196            self.clamp_extreme_values(matrix)?;
197        }
198
199        Ok(())
200    }
201
202    /// Compute numerically stable eigendecomposition
203    pub fn stable_eigendecomposition(
204        &mut self,
205        matrix: &Array2<f64>,
206    ) -> Result<(Array1<f64>, Array2<f64>)> {
207        if matrix.nrows() != matrix.ncols() {
208            return Err(SklearsError::InvalidInput(
209                "Matrix must be square for eigendecomposition".to_string(),
210            ));
211        }
212
213        self.monitor_matrix(matrix, "eigendecomposition_input")?;
214
215        // Apply stabilization
216        let mut stabilized_matrix = matrix.clone();
217        self.stabilize_matrix(&mut stabilized_matrix)?;
218
219        // Simplified eigendecomposition using power iteration for largest eigenvalues
220        let n = matrix.nrows();
221        let max_eigenvalues = 10.min(n);
222
223        let mut eigenvalues = Array1::zeros(max_eigenvalues);
224        let mut eigenvectors = Array2::zeros((n, max_eigenvalues));
225
226        let mut current_matrix = stabilized_matrix.clone();
227
228        for i in 0..max_eigenvalues {
229            let (eigenvalue, eigenvector) = self.power_iteration(&current_matrix)?;
230            eigenvalues[i] = eigenvalue;
231            eigenvectors.column_mut(i).assign(&eigenvector);
232
233            // Deflate the matrix
234            let outer_product = self.outer_product(&eigenvector, &eigenvector);
235            current_matrix = &current_matrix - &(&outer_product * eigenvalue);
236
237            // Check for convergence
238            if eigenvalue.abs() < self.config.min_eigenvalue {
239                break;
240            }
241        }
242
243        Ok((eigenvalues, eigenvectors))
244    }
245
246    /// Compute numerically stable matrix inversion
247    pub fn stable_matrix_inverse(&mut self, matrix: &Array2<f64>) -> Result<Array2<f64>> {
248        self.monitor_matrix(matrix, "matrix_inverse_input")?;
249
250        if matrix.nrows() != matrix.ncols() {
251            return Err(SklearsError::InvalidInput(
252                "Matrix must be square for inversion".to_string(),
253            ));
254        }
255
256        let n = matrix.nrows();
257
258        // Use regularized pseudoinverse for stability
259        let regularized_matrix = self.regularize_matrix(matrix)?;
260
261        // Simplified inversion using Gauss-Jordan elimination with pivoting
262        let mut augmented = Array2::zeros((n, 2 * n));
263
264        // Set up augmented matrix [A | I]
265        for i in 0..n {
266            for j in 0..n {
267                augmented[[i, j]] = regularized_matrix[[i, j]];
268            }
269            augmented[[i, n + i]] = 1.0;
270        }
271
272        // Forward elimination with partial pivoting
273        for i in 0..n {
274            // Find pivot
275            let mut max_row = i;
276            for k in i + 1..n {
277                if augmented[[k, i]].abs() > augmented[[max_row, i]].abs() {
278                    max_row = k;
279                }
280            }
281
282            // Swap rows if needed
283            if max_row != i {
284                for j in 0..2 * n {
285                    let temp = augmented[[i, j]];
286                    augmented[[i, j]] = augmented[[max_row, j]];
287                    augmented[[max_row, j]] = temp;
288                }
289            }
290
291            // Check for near-zero pivot
292            if augmented[[i, i]].abs() < self.config.numerical_tolerance {
293                return Err(SklearsError::NumericalError(
294                    "Matrix is singular or near-singular".to_string(),
295                ));
296            }
297
298            // Scale pivot row
299            let pivot = augmented[[i, i]];
300            for j in 0..2 * n {
301                augmented[[i, j]] /= pivot;
302            }
303
304            // Eliminate column
305            for k in 0..n {
306                if k != i {
307                    let factor = augmented[[k, i]];
308                    for j in 0..2 * n {
309                        augmented[[k, j]] -= factor * augmented[[i, j]];
310                    }
311                }
312            }
313        }
314
315        // Extract inverse matrix
316        let mut inverse = Array2::zeros((n, n));
317        for i in 0..n {
318            for j in 0..n {
319                inverse[[i, j]] = augmented[[i, n + j]];
320            }
321        }
322
323        self.monitor_matrix(&inverse, "matrix_inverse_output")?;
324
325        Ok(inverse)
326    }
327
328    /// Compute numerically stable Cholesky decomposition
329    pub fn stable_cholesky(&mut self, matrix: &Array2<f64>) -> Result<Array2<f64>> {
330        self.monitor_matrix(matrix, "cholesky_input")?;
331
332        if matrix.nrows() != matrix.ncols() {
333            return Err(SklearsError::InvalidInput(
334                "Matrix must be square for Cholesky decomposition".to_string(),
335            ));
336        }
337
338        let n = matrix.nrows();
339
340        // Apply regularization for numerical stability
341        let regularized_matrix = self.regularize_matrix(matrix)?;
342
343        let mut L = Array2::zeros((n, n));
344
345        for i in 0..n {
346            for j in 0..=i {
347                if i == j {
348                    // Diagonal elements
349                    let mut sum = 0.0;
350                    for k in 0..j {
351                        sum += L[[j, k]] * L[[j, k]];
352                    }
353
354                    let diagonal_value = regularized_matrix[[j, j]] - sum;
355                    if diagonal_value <= 0.0 {
356                        return Err(SklearsError::NumericalError(
357                            "Matrix is not positive definite".to_string(),
358                        ));
359                    }
360
361                    L[[j, j]] = diagonal_value.sqrt();
362                } else {
363                    // Off-diagonal elements
364                    let mut sum = 0.0;
365                    for k in 0..j {
366                        sum += L[[i, k]] * L[[j, k]];
367                    }
368
369                    L[[i, j]] = (regularized_matrix[[i, j]] - sum) / L[[j, j]];
370                }
371            }
372        }
373
374        self.monitor_matrix(&L, "cholesky_output")?;
375
376        Ok(L)
377    }
378
379    /// Get stability warnings
380    pub fn get_warnings(&self) -> &[StabilityWarning] {
381        &self.warnings
382    }
383
384    /// Get stability metrics
385    pub fn get_metrics(&self) -> &StabilityMetrics {
386        &self.metrics
387    }
388
389    /// Clear warnings and metrics
390    pub fn clear(&mut self) {
391        self.warnings.clear();
392        self.metrics = StabilityMetrics::default();
393    }
394
395    /// Generate stability report
396    pub fn generate_report(&self) -> String {
397        let mut report = String::new();
398
399        report.push_str("=== Numerical Stability Report ===\n\n");
400
401        // Summary statistics
402        if !self.metrics.condition_numbers.is_empty() {
403            let mean_condition = self.metrics.condition_numbers.iter().sum::<f64>()
404                / self.metrics.condition_numbers.len() as f64;
405            let max_condition = self
406                .metrics
407                .condition_numbers
408                .iter()
409                .fold(f64::NEG_INFINITY, |acc, &x| acc.max(x));
410
411            report.push_str(&format!(
412                "Condition Numbers:\n  Mean: {:.2e}\n  Max: {:.2e}\n  Count: {}\n\n",
413                mean_condition,
414                max_condition,
415                self.metrics.condition_numbers.len()
416            ));
417        }
418
419        // Eigenvalue analysis
420        if !self.metrics.eigenvalue_ranges.is_empty() {
421            let min_eigenval = self
422                .metrics
423                .eigenvalue_ranges
424                .iter()
425                .map(|(min, _)| *min)
426                .fold(f64::INFINITY, f64::min);
427            let max_eigenval = self
428                .metrics
429                .eigenvalue_ranges
430                .iter()
431                .map(|(_, max)| *max)
432                .fold(f64::NEG_INFINITY, f64::max);
433
434            report.push_str(&format!(
435                "Eigenvalue Range:\n  Min: {:.2e}\n  Max: {:.2e}\n  Ratio: {:.2e}\n\n",
436                min_eigenval,
437                max_eigenval,
438                max_eigenval / min_eigenval.abs().max(1e-16)
439            ));
440        }
441
442        // Warnings
443        if !self.warnings.is_empty() {
444            report.push_str(&format!("Warnings ({}):\n", self.warnings.len()));
445            for (i, warning) in self.warnings.iter().enumerate() {
446                report.push_str(&format!("  {}: {}\n", i + 1, self.format_warning(warning)));
447            }
448            report.push('\n');
449        }
450
451        // Overflow/underflow statistics
452        if self.metrics.overflow_count > 0 || self.metrics.underflow_count > 0 {
453            report.push_str(&format!(
454                "Overflow/Underflow:\n  Overflows: {}\n  Underflows: {}\n\n",
455                self.metrics.overflow_count, self.metrics.underflow_count
456            ));
457        }
458
459        report.push_str("=== End Report ===\n");
460
461        report
462    }
463
464    // Helper methods
465
466    fn check_finite_values(&mut self, matrix: &Array2<f64>, location: &str) -> Result<()> {
467        for &value in matrix.iter() {
468            if !value.is_finite() {
469                if value.is_infinite() {
470                    self.metrics.overflow_count += 1;
471                    self.warnings.push(StabilityWarning::Overflow {
472                        value,
473                        location: location.to_string(),
474                    });
475                } else if value == 0.0 && value.is_sign_negative() {
476                    self.metrics.underflow_count += 1;
477                    self.warnings.push(StabilityWarning::Underflow {
478                        value,
479                        location: location.to_string(),
480                    });
481                }
482
483                return Err(SklearsError::NumericalError(format!(
484                    "Non-finite value detected: {} at {}",
485                    value, location
486                )));
487            }
488        }
489        Ok(())
490    }
491
492    fn estimate_condition_number(&self, matrix: &Array2<f64>) -> Result<f64> {
493        // Simplified condition number estimation using Frobenius norm
494        let norm = matrix.mapv(|x| x * x).sum().sqrt();
495
496        if matrix.nrows() == matrix.ncols() {
497            // For square matrices, estimate using diagonal dominance
498            let mut min_diag = f64::INFINITY;
499            let mut max_off_diag: f64 = 0.0;
500
501            for i in 0..matrix.nrows() {
502                min_diag = min_diag.min(matrix[[i, i]].abs());
503
504                for j in 0..matrix.ncols() {
505                    if i != j {
506                        max_off_diag = max_off_diag.max(matrix[[i, j]].abs());
507                    }
508                }
509            }
510
511            let condition_estimate = if min_diag > 0.0 {
512                (norm + max_off_diag) / min_diag
513            } else {
514                f64::INFINITY
515            };
516
517            Ok(condition_estimate)
518        } else {
519            // For non-square matrices, use norm-based estimate
520            let min_norm = matrix
521                .axis_iter(Axis(0))
522                .map(|row| row.mapv(|x| x * x).sum().sqrt())
523                .fold(f64::INFINITY, f64::min);
524
525            Ok(norm / min_norm.max(1e-16))
526        }
527    }
528
529    fn estimate_eigenvalues(&self, matrix: &Array2<f64>) -> Result<Array1<f64>> {
530        let n = matrix.nrows();
531
532        // Simplified eigenvalue estimation using Gershgorin circles
533        let mut eigenvalue_bounds = Array1::zeros(n);
534
535        for i in 0..n {
536            let center = matrix[[i, i]];
537            let radius = (0..n)
538                .filter(|&j| j != i)
539                .map(|j| matrix[[i, j]].abs())
540                .sum::<f64>();
541
542            eigenvalue_bounds[i] = center + radius; // Upper bound estimate
543        }
544
545        Ok(eigenvalue_bounds)
546    }
547
548    fn estimate_rank(&self, matrix: &Array2<f64>) -> Result<usize> {
549        // Simplified rank estimation using diagonal elements after regularization
550        let mut regularized = matrix.clone();
551
552        if matrix.nrows() == matrix.ncols() {
553            for i in 0..matrix.nrows() {
554                regularized[[i, i]] += self.config.regularization;
555            }
556        }
557
558        let mut rank = 0;
559        let min_dim = matrix.nrows().min(matrix.ncols());
560
561        for i in 0..min_dim {
562            let column_norm = regularized.column(i).mapv(|x| x * x).sum().sqrt();
563            if column_norm > self.config.numerical_tolerance {
564                rank += 1;
565            }
566        }
567
568        Ok(rank)
569    }
570
571    fn clamp_extreme_values(&mut self, matrix: &mut Array2<f64>) -> Result<()> {
572        let max_value = 1e12;
573        let min_value = -1e12;
574
575        for value in matrix.iter_mut() {
576            if *value > max_value {
577                *value = max_value;
578                self.metrics.overflow_count += 1;
579            } else if *value < min_value {
580                *value = min_value;
581                self.metrics.underflow_count += 1;
582            }
583        }
584
585        Ok(())
586    }
587
588    fn power_iteration(&self, matrix: &Array2<f64>) -> Result<(f64, Array1<f64>)> {
589        let n = matrix.nrows();
590        let mut vector = Array1::from_shape_fn(n, |_| thread_rng().gen::<f64>() - 0.5);
591
592        // Normalize initial vector
593        let norm = vector.mapv(|x| x * x).sum().sqrt();
594        vector /= norm;
595
596        let mut eigenvalue = 0.0;
597
598        for _ in 0..100 {
599            let new_vector = matrix.dot(&vector);
600            let new_norm = new_vector.mapv(|x| x * x).sum().sqrt();
601
602            if new_norm < self.config.numerical_tolerance {
603                break;
604            }
605
606            eigenvalue = vector.dot(&new_vector);
607            vector = new_vector / new_norm;
608        }
609
610        Ok((eigenvalue, vector))
611    }
612
613    fn outer_product(&self, v1: &Array1<f64>, v2: &Array1<f64>) -> Array2<f64> {
614        let mut result = Array2::zeros((v1.len(), v2.len()));
615
616        for i in 0..v1.len() {
617            for j in 0..v2.len() {
618                result[[i, j]] = v1[i] * v2[j];
619            }
620        }
621
622        result
623    }
624
625    fn regularize_matrix(&self, matrix: &Array2<f64>) -> Result<Array2<f64>> {
626        let mut regularized = matrix.clone();
627
628        if matrix.nrows() == matrix.ncols() {
629            for i in 0..matrix.nrows() {
630                regularized[[i, i]] += self.config.regularization;
631            }
632        }
633
634        Ok(regularized)
635    }
636
637    fn format_warning(&self, warning: &StabilityWarning) -> String {
638        match warning {
639            StabilityWarning::HighConditionNumber {
640                condition_number,
641                location,
642            } => {
643                format!(
644                    "High condition number {:.2e} at {}",
645                    condition_number, location
646                )
647            }
648            StabilityWarning::NearSingular {
649                smallest_eigenvalue,
650                location,
651            } => {
652                format!(
653                    "Near-singular matrix (λ_min = {:.2e}) at {}",
654                    smallest_eigenvalue, location
655                )
656            }
657            StabilityWarning::Overflow { value, location } => {
658                format!("Overflow (value = {:.2e}) at {}", value, location)
659            }
660            StabilityWarning::Underflow { value, location } => {
661                format!("Underflow (value = {:.2e}) at {}", value, location)
662            }
663            StabilityWarning::PrecisionLoss {
664                estimated_loss,
665                location,
666            } => {
667                format!(
668                    "Precision loss ({:.1}%) at {}",
669                    estimated_loss * 100.0,
670                    location
671                )
672            }
673            StabilityWarning::RankDeficient {
674                expected_rank,
675                actual_rank,
676                location,
677            } => {
678                format!(
679                    "Rank deficient ({}/{}) at {}",
680                    actual_rank, expected_rank, location
681                )
682            }
683        }
684    }
685}
686
687/// Numerically stable kernel matrix computation
688pub fn stable_kernel_matrix(
689    data1: &Array2<f64>,
690    data2: Option<&Array2<f64>>,
691    kernel_type: &str,
692    bandwidth: f64,
693    monitor: &mut NumericalStabilityMonitor,
694) -> Result<Array2<f64>> {
695    let data2 = data2.unwrap_or(data1);
696    let (n1, n_features) = data1.dim();
697    let (n2, _) = data2.dim();
698
699    let mut kernel = Array2::zeros((n1, n2));
700
701    // Precompute squared norms for numerical stability
702    let norms1: Vec<f64> = data1
703        .axis_iter(Axis(0))
704        .map(|row| row.mapv(|x| x * x).sum())
705        .collect();
706
707    let norms2: Vec<f64> = data2
708        .axis_iter(Axis(0))
709        .map(|row| row.mapv(|x| x * x).sum())
710        .collect();
711
712    for i in 0..n1 {
713        for j in 0..n2 {
714            let similarity = match kernel_type {
715                "RBF" => {
716                    // Use numerically stable distance computation: ||x-y||² = ||x||² + ||y||² - 2⟨x,y⟩
717                    let dot_product = data1.row(i).dot(&data2.row(j));
718                    let dist_sq = norms1[i] + norms2[j] - 2.0 * dot_product;
719                    let dist_sq = dist_sq.max(0.0); // Ensure non-negative due to numerical errors
720
721                    let exponent = -bandwidth * dist_sq;
722
723                    // Clamp exponent to prevent underflow
724                    let clamped_exponent = exponent.max(-700.0); // e^(-700) ≈ 1e-304
725
726                    clamped_exponent.exp()
727                }
728                "Laplacian" => {
729                    let diff = &data1.row(i) - &data2.row(j);
730                    let dist = diff.mapv(|x| x.abs()).sum();
731
732                    let exponent = -bandwidth * dist;
733                    let clamped_exponent = exponent.max(-700.0);
734
735                    clamped_exponent.exp()
736                }
737                "Polynomial" => {
738                    let dot_product = data1.row(i).dot(&data2.row(j));
739                    let base = bandwidth * dot_product + 1.0;
740
741                    // Ensure positive base for polynomial kernel
742                    let clamped_base = base.max(1e-16);
743
744                    clamped_base.powi(2) // Degree 2 polynomial
745                }
746                "Linear" => data1.row(i).dot(&data2.row(j)),
747                _ => {
748                    return Err(SklearsError::InvalidOperation(format!(
749                        "Unsupported kernel type: {}",
750                        kernel_type
751                    )));
752                }
753            };
754
755            kernel[[i, j]] = similarity;
756        }
757    }
758
759    monitor.monitor_matrix(&kernel, &format!("{}_kernel_matrix", kernel_type))?;
760
761    Ok(kernel)
762}
763
764#[allow(non_snake_case)]
765#[cfg(test)]
766mod tests {
767    use super::*;
768    use scirs2_core::ndarray::array;
769
770    #[test]
771    fn test_stability_monitor_creation() {
772        let config = StabilityConfig::default();
773        let monitor = NumericalStabilityMonitor::new(config);
774
775        assert!(monitor.get_warnings().is_empty());
776        assert_eq!(monitor.get_metrics().condition_numbers.len(), 0);
777    }
778
779    #[test]
780    fn test_condition_number_monitoring() {
781        let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
782
783        // Well-conditioned matrix
784        let well_conditioned = array![[2.0, 1.0], [1.0, 2.0],];
785
786        monitor
787            .monitor_matrix(&well_conditioned, "test_well_conditioned")
788            .unwrap();
789        assert!(monitor.get_warnings().is_empty());
790
791        // Ill-conditioned matrix
792        let ill_conditioned = array![[1.0, 1.0], [1.0, 1.000001],];
793
794        monitor
795            .monitor_matrix(&ill_conditioned, "test_ill_conditioned")
796            .unwrap();
797        // Should detect high condition number or near-singularity
798    }
799
800    #[test]
801    fn test_stable_eigendecomposition() {
802        let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
803
804        let matrix = array![[4.0, 2.0], [2.0, 3.0],];
805
806        let (eigenvalues, eigenvectors) = monitor.stable_eigendecomposition(&matrix).unwrap();
807
808        assert!(eigenvalues.len() <= matrix.nrows());
809        assert_eq!(eigenvectors.nrows(), matrix.nrows());
810        assert!(eigenvalues.iter().all(|&x| x.is_finite()));
811    }
812
813    #[test]
814    fn test_stable_matrix_inverse() {
815        let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
816
817        let matrix = array![[4.0, 2.0], [2.0, 3.0],];
818
819        let inverse = monitor.stable_matrix_inverse(&matrix).unwrap();
820
821        // Check that A_regularized * A^(-1) ≈ I (where A^(-1) is inverse of regularized matrix)
822        let mut regularized_matrix = matrix.clone();
823        for i in 0..matrix.nrows() {
824            regularized_matrix[[i, i]] += 1e-8; // Default regularization
825        }
826
827        let product = regularized_matrix.dot(&inverse);
828        let identity_error = (&product - &Array2::<f64>::eye(2)).mapv(|x| x.abs()).sum();
829
830        assert!(identity_error < 1e-10);
831    }
832
833    #[test]
834    fn test_stable_cholesky() {
835        let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
836
837        // Positive definite matrix
838        let matrix = array![[4.0, 2.0], [2.0, 3.0],];
839
840        let cholesky = monitor.stable_cholesky(&matrix).unwrap();
841
842        // Check that L * L^T = A_regularized (regularized matrix)
843        let reconstructed = cholesky.dot(&cholesky.t());
844
845        // Compute the regularized matrix for comparison
846        let mut regularized_matrix = matrix.clone();
847        for i in 0..matrix.nrows() {
848            regularized_matrix[[i, i]] += 1e-8; // Default regularization
849        }
850
851        let reconstruction_error = (&regularized_matrix - &reconstructed)
852            .mapv(|x| x.abs())
853            .sum();
854
855        assert!(reconstruction_error < 1e-10);
856    }
857
858    #[test]
859    fn test_stable_kernel_matrix() {
860        let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
861
862        let data = array![[1.0, 2.0], [2.0, 3.0], [3.0, 4.0],];
863
864        let kernel = stable_kernel_matrix(&data, None, "RBF", 1.0, &mut monitor).unwrap();
865
866        assert_eq!(kernel.shape(), &[3, 3]);
867        assert!(kernel.iter().all(|&x| x.is_finite() && x >= 0.0));
868
869        // Kernel matrix should be symmetric
870        for i in 0..3 {
871            for j in 0..3 {
872                assert!((kernel[[i, j]] - kernel[[j, i]]).abs() < 1e-12);
873            }
874        }
875
876        // Diagonal should be 1 for RBF kernel
877        for i in 0..3 {
878            assert!((kernel[[i, i]] - 1.0).abs() < 1e-12);
879        }
880    }
881
882    #[test]
883    fn test_stability_report() {
884        let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
885
886        let matrix = array![[1.0, 2.0], [3.0, 4.0],];
887
888        monitor.monitor_matrix(&matrix, "test_matrix").unwrap();
889
890        let report = monitor.generate_report();
891        assert!(report.contains("Numerical Stability Report"));
892        assert!(report.contains("Condition Numbers"));
893    }
894}