1use scirs2_core::ndarray::{Array1, Array2, Axis};
8use scirs2_core::random::{thread_rng, Rng};
9use sklears_core::prelude::{Result, SklearsError};
10
11#[derive(Debug, Clone)]
13pub struct NumericalStabilityMonitor {
15 config: StabilityConfig,
16 metrics: StabilityMetrics,
17 warnings: Vec<StabilityWarning>,
18}
19
20#[derive(Debug, Clone)]
22pub struct StabilityConfig {
24 pub max_condition_number: f64,
26
27 pub min_eigenvalue: f64,
29
30 pub max_eigenvalue: f64,
32
33 pub numerical_tolerance: f64,
35
36 pub enable_overflow_protection: bool,
38
39 pub enable_high_precision: bool,
41
42 pub regularization: f64,
44}
45
46impl Default for StabilityConfig {
47 fn default() -> Self {
48 Self {
49 max_condition_number: 1e12,
50 min_eigenvalue: 1e-12,
51 max_eigenvalue: 1e12,
52 numerical_tolerance: 1e-12,
53 enable_overflow_protection: true,
54 enable_high_precision: false,
55 regularization: 1e-8,
56 }
57 }
58}
59
60#[derive(Debug, Clone, Default)]
62pub struct StabilityMetrics {
64 pub condition_numbers: Vec<f64>,
66
67 pub eigenvalue_ranges: Vec<(f64, f64)>,
69
70 pub numerical_errors: Vec<f64>,
72
73 pub matrix_ranks: Vec<usize>,
75
76 pub overflow_count: usize,
78 pub underflow_count: usize,
80
81 pub precision_loss: Vec<f64>,
83}
84
85#[derive(Debug, Clone)]
87pub enum StabilityWarning {
89 HighConditionNumber {
91 condition_number: f64,
92
93 location: String,
94 },
95
96 NearSingular {
98 smallest_eigenvalue: f64,
99
100 location: String,
101 },
102
103 Overflow { value: f64, location: String },
105
106 Underflow { value: f64, location: String },
108
109 PrecisionLoss {
111 estimated_loss: f64,
112 location: String,
113 },
114
115 RankDeficient {
117 expected_rank: usize,
118 actual_rank: usize,
119 location: String,
120 },
121}
122
123impl NumericalStabilityMonitor {
124 pub fn new(config: StabilityConfig) -> Self {
126 Self {
127 config,
128 metrics: StabilityMetrics::default(),
129 warnings: Vec::new(),
130 }
131 }
132
133 pub fn monitor_matrix(&mut self, matrix: &Array2<f64>, location: &str) -> Result<()> {
135 self.check_finite_values(matrix, location)?;
137
138 let condition_number = self.estimate_condition_number(matrix)?;
140 self.metrics.condition_numbers.push(condition_number);
141
142 if condition_number > self.config.max_condition_number {
143 self.warnings.push(StabilityWarning::HighConditionNumber {
144 condition_number,
145 location: location.to_string(),
146 });
147 }
148
149 if matrix.nrows() == matrix.ncols() {
151 let eigenvalues = self.estimate_eigenvalues(matrix)?;
152 let min_eigenval = eigenvalues.iter().fold(f64::INFINITY, |acc, &x| acc.min(x));
153 let max_eigenval = eigenvalues
154 .iter()
155 .fold(f64::NEG_INFINITY, |acc, &x| acc.max(x));
156
157 self.metrics
158 .eigenvalue_ranges
159 .push((min_eigenval, max_eigenval));
160
161 if min_eigenval.abs() < self.config.min_eigenvalue {
162 self.warnings.push(StabilityWarning::NearSingular {
163 smallest_eigenvalue: min_eigenval,
164 location: location.to_string(),
165 });
166 }
167 }
168
169 let rank = self.estimate_rank(matrix)?;
171 self.metrics.matrix_ranks.push(rank);
172
173 let expected_rank = matrix.nrows().min(matrix.ncols());
174 if rank < expected_rank {
175 self.warnings.push(StabilityWarning::RankDeficient {
176 expected_rank,
177 actual_rank: rank,
178 location: location.to_string(),
179 });
180 }
181
182 Ok(())
183 }
184
185 pub fn stabilize_matrix(&mut self, matrix: &mut Array2<f64>) -> Result<()> {
187 if matrix.nrows() == matrix.ncols() {
189 for i in 0..matrix.nrows() {
190 matrix[[i, i]] += self.config.regularization;
191 }
192 }
193
194 if self.config.enable_overflow_protection {
196 self.clamp_extreme_values(matrix)?;
197 }
198
199 Ok(())
200 }
201
202 pub fn stable_eigendecomposition(
204 &mut self,
205 matrix: &Array2<f64>,
206 ) -> Result<(Array1<f64>, Array2<f64>)> {
207 if matrix.nrows() != matrix.ncols() {
208 return Err(SklearsError::InvalidInput(
209 "Matrix must be square for eigendecomposition".to_string(),
210 ));
211 }
212
213 self.monitor_matrix(matrix, "eigendecomposition_input")?;
214
215 let mut stabilized_matrix = matrix.clone();
217 self.stabilize_matrix(&mut stabilized_matrix)?;
218
219 let n = matrix.nrows();
221 let max_eigenvalues = 10.min(n);
222
223 let mut eigenvalues = Array1::zeros(max_eigenvalues);
224 let mut eigenvectors = Array2::zeros((n, max_eigenvalues));
225
226 let mut current_matrix = stabilized_matrix.clone();
227
228 for i in 0..max_eigenvalues {
229 let (eigenvalue, eigenvector) = self.power_iteration(¤t_matrix)?;
230 eigenvalues[i] = eigenvalue;
231 eigenvectors.column_mut(i).assign(&eigenvector);
232
233 let outer_product = self.outer_product(&eigenvector, &eigenvector);
235 current_matrix = ¤t_matrix - &(&outer_product * eigenvalue);
236
237 if eigenvalue.abs() < self.config.min_eigenvalue {
239 break;
240 }
241 }
242
243 Ok((eigenvalues, eigenvectors))
244 }
245
246 pub fn stable_matrix_inverse(&mut self, matrix: &Array2<f64>) -> Result<Array2<f64>> {
248 self.monitor_matrix(matrix, "matrix_inverse_input")?;
249
250 if matrix.nrows() != matrix.ncols() {
251 return Err(SklearsError::InvalidInput(
252 "Matrix must be square for inversion".to_string(),
253 ));
254 }
255
256 let n = matrix.nrows();
257
258 let regularized_matrix = self.regularize_matrix(matrix)?;
260
261 let mut augmented = Array2::zeros((n, 2 * n));
263
264 for i in 0..n {
266 for j in 0..n {
267 augmented[[i, j]] = regularized_matrix[[i, j]];
268 }
269 augmented[[i, n + i]] = 1.0;
270 }
271
272 for i in 0..n {
274 let mut max_row = i;
276 for k in i + 1..n {
277 if augmented[[k, i]].abs() > augmented[[max_row, i]].abs() {
278 max_row = k;
279 }
280 }
281
282 if max_row != i {
284 for j in 0..2 * n {
285 let temp = augmented[[i, j]];
286 augmented[[i, j]] = augmented[[max_row, j]];
287 augmented[[max_row, j]] = temp;
288 }
289 }
290
291 if augmented[[i, i]].abs() < self.config.numerical_tolerance {
293 return Err(SklearsError::NumericalError(
294 "Matrix is singular or near-singular".to_string(),
295 ));
296 }
297
298 let pivot = augmented[[i, i]];
300 for j in 0..2 * n {
301 augmented[[i, j]] /= pivot;
302 }
303
304 for k in 0..n {
306 if k != i {
307 let factor = augmented[[k, i]];
308 for j in 0..2 * n {
309 augmented[[k, j]] -= factor * augmented[[i, j]];
310 }
311 }
312 }
313 }
314
315 let mut inverse = Array2::zeros((n, n));
317 for i in 0..n {
318 for j in 0..n {
319 inverse[[i, j]] = augmented[[i, n + j]];
320 }
321 }
322
323 self.monitor_matrix(&inverse, "matrix_inverse_output")?;
324
325 Ok(inverse)
326 }
327
328 pub fn stable_cholesky(&mut self, matrix: &Array2<f64>) -> Result<Array2<f64>> {
330 self.monitor_matrix(matrix, "cholesky_input")?;
331
332 if matrix.nrows() != matrix.ncols() {
333 return Err(SklearsError::InvalidInput(
334 "Matrix must be square for Cholesky decomposition".to_string(),
335 ));
336 }
337
338 let n = matrix.nrows();
339
340 let regularized_matrix = self.regularize_matrix(matrix)?;
342
343 let mut L = Array2::zeros((n, n));
344
345 for i in 0..n {
346 for j in 0..=i {
347 if i == j {
348 let mut sum = 0.0;
350 for k in 0..j {
351 sum += L[[j, k]] * L[[j, k]];
352 }
353
354 let diagonal_value = regularized_matrix[[j, j]] - sum;
355 if diagonal_value <= 0.0 {
356 return Err(SklearsError::NumericalError(
357 "Matrix is not positive definite".to_string(),
358 ));
359 }
360
361 L[[j, j]] = diagonal_value.sqrt();
362 } else {
363 let mut sum = 0.0;
365 for k in 0..j {
366 sum += L[[i, k]] * L[[j, k]];
367 }
368
369 L[[i, j]] = (regularized_matrix[[i, j]] - sum) / L[[j, j]];
370 }
371 }
372 }
373
374 self.monitor_matrix(&L, "cholesky_output")?;
375
376 Ok(L)
377 }
378
379 pub fn get_warnings(&self) -> &[StabilityWarning] {
381 &self.warnings
382 }
383
384 pub fn get_metrics(&self) -> &StabilityMetrics {
386 &self.metrics
387 }
388
389 pub fn clear(&mut self) {
391 self.warnings.clear();
392 self.metrics = StabilityMetrics::default();
393 }
394
395 pub fn generate_report(&self) -> String {
397 let mut report = String::new();
398
399 report.push_str("=== Numerical Stability Report ===\n\n");
400
401 if !self.metrics.condition_numbers.is_empty() {
403 let mean_condition = self.metrics.condition_numbers.iter().sum::<f64>()
404 / self.metrics.condition_numbers.len() as f64;
405 let max_condition = self
406 .metrics
407 .condition_numbers
408 .iter()
409 .fold(f64::NEG_INFINITY, |acc, &x| acc.max(x));
410
411 report.push_str(&format!(
412 "Condition Numbers:\n Mean: {:.2e}\n Max: {:.2e}\n Count: {}\n\n",
413 mean_condition,
414 max_condition,
415 self.metrics.condition_numbers.len()
416 ));
417 }
418
419 if !self.metrics.eigenvalue_ranges.is_empty() {
421 let min_eigenval = self
422 .metrics
423 .eigenvalue_ranges
424 .iter()
425 .map(|(min, _)| *min)
426 .fold(f64::INFINITY, f64::min);
427 let max_eigenval = self
428 .metrics
429 .eigenvalue_ranges
430 .iter()
431 .map(|(_, max)| *max)
432 .fold(f64::NEG_INFINITY, f64::max);
433
434 report.push_str(&format!(
435 "Eigenvalue Range:\n Min: {:.2e}\n Max: {:.2e}\n Ratio: {:.2e}\n\n",
436 min_eigenval,
437 max_eigenval,
438 max_eigenval / min_eigenval.abs().max(1e-16)
439 ));
440 }
441
442 if !self.warnings.is_empty() {
444 report.push_str(&format!("Warnings ({}):\n", self.warnings.len()));
445 for (i, warning) in self.warnings.iter().enumerate() {
446 report.push_str(&format!(" {}: {}\n", i + 1, self.format_warning(warning)));
447 }
448 report.push('\n');
449 }
450
451 if self.metrics.overflow_count > 0 || self.metrics.underflow_count > 0 {
453 report.push_str(&format!(
454 "Overflow/Underflow:\n Overflows: {}\n Underflows: {}\n\n",
455 self.metrics.overflow_count, self.metrics.underflow_count
456 ));
457 }
458
459 report.push_str("=== End Report ===\n");
460
461 report
462 }
463
464 fn check_finite_values(&mut self, matrix: &Array2<f64>, location: &str) -> Result<()> {
467 for &value in matrix.iter() {
468 if !value.is_finite() {
469 if value.is_infinite() {
470 self.metrics.overflow_count += 1;
471 self.warnings.push(StabilityWarning::Overflow {
472 value,
473 location: location.to_string(),
474 });
475 } else if value == 0.0 && value.is_sign_negative() {
476 self.metrics.underflow_count += 1;
477 self.warnings.push(StabilityWarning::Underflow {
478 value,
479 location: location.to_string(),
480 });
481 }
482
483 return Err(SklearsError::NumericalError(format!(
484 "Non-finite value detected: {} at {}",
485 value, location
486 )));
487 }
488 }
489 Ok(())
490 }
491
492 fn estimate_condition_number(&self, matrix: &Array2<f64>) -> Result<f64> {
493 let norm = matrix.mapv(|x| x * x).sum().sqrt();
495
496 if matrix.nrows() == matrix.ncols() {
497 let mut min_diag = f64::INFINITY;
499 let mut max_off_diag: f64 = 0.0;
500
501 for i in 0..matrix.nrows() {
502 min_diag = min_diag.min(matrix[[i, i]].abs());
503
504 for j in 0..matrix.ncols() {
505 if i != j {
506 max_off_diag = max_off_diag.max(matrix[[i, j]].abs());
507 }
508 }
509 }
510
511 let condition_estimate = if min_diag > 0.0 {
512 (norm + max_off_diag) / min_diag
513 } else {
514 f64::INFINITY
515 };
516
517 Ok(condition_estimate)
518 } else {
519 let min_norm = matrix
521 .axis_iter(Axis(0))
522 .map(|row| row.mapv(|x| x * x).sum().sqrt())
523 .fold(f64::INFINITY, f64::min);
524
525 Ok(norm / min_norm.max(1e-16))
526 }
527 }
528
529 fn estimate_eigenvalues(&self, matrix: &Array2<f64>) -> Result<Array1<f64>> {
530 let n = matrix.nrows();
531
532 let mut eigenvalue_bounds = Array1::zeros(n);
534
535 for i in 0..n {
536 let center = matrix[[i, i]];
537 let radius = (0..n)
538 .filter(|&j| j != i)
539 .map(|j| matrix[[i, j]].abs())
540 .sum::<f64>();
541
542 eigenvalue_bounds[i] = center + radius; }
544
545 Ok(eigenvalue_bounds)
546 }
547
548 fn estimate_rank(&self, matrix: &Array2<f64>) -> Result<usize> {
549 let mut regularized = matrix.clone();
551
552 if matrix.nrows() == matrix.ncols() {
553 for i in 0..matrix.nrows() {
554 regularized[[i, i]] += self.config.regularization;
555 }
556 }
557
558 let mut rank = 0;
559 let min_dim = matrix.nrows().min(matrix.ncols());
560
561 for i in 0..min_dim {
562 let column_norm = regularized.column(i).mapv(|x| x * x).sum().sqrt();
563 if column_norm > self.config.numerical_tolerance {
564 rank += 1;
565 }
566 }
567
568 Ok(rank)
569 }
570
571 fn clamp_extreme_values(&mut self, matrix: &mut Array2<f64>) -> Result<()> {
572 let max_value = 1e12;
573 let min_value = -1e12;
574
575 for value in matrix.iter_mut() {
576 if *value > max_value {
577 *value = max_value;
578 self.metrics.overflow_count += 1;
579 } else if *value < min_value {
580 *value = min_value;
581 self.metrics.underflow_count += 1;
582 }
583 }
584
585 Ok(())
586 }
587
588 fn power_iteration(&self, matrix: &Array2<f64>) -> Result<(f64, Array1<f64>)> {
589 let n = matrix.nrows();
590 let mut vector = Array1::from_shape_fn(n, |_| thread_rng().gen::<f64>() - 0.5);
591
592 let norm = vector.mapv(|x| x * x).sum().sqrt();
594 vector /= norm;
595
596 let mut eigenvalue = 0.0;
597
598 for _ in 0..100 {
599 let new_vector = matrix.dot(&vector);
600 let new_norm = new_vector.mapv(|x| x * x).sum().sqrt();
601
602 if new_norm < self.config.numerical_tolerance {
603 break;
604 }
605
606 eigenvalue = vector.dot(&new_vector);
607 vector = new_vector / new_norm;
608 }
609
610 Ok((eigenvalue, vector))
611 }
612
613 fn outer_product(&self, v1: &Array1<f64>, v2: &Array1<f64>) -> Array2<f64> {
614 let mut result = Array2::zeros((v1.len(), v2.len()));
615
616 for i in 0..v1.len() {
617 for j in 0..v2.len() {
618 result[[i, j]] = v1[i] * v2[j];
619 }
620 }
621
622 result
623 }
624
625 fn regularize_matrix(&self, matrix: &Array2<f64>) -> Result<Array2<f64>> {
626 let mut regularized = matrix.clone();
627
628 if matrix.nrows() == matrix.ncols() {
629 for i in 0..matrix.nrows() {
630 regularized[[i, i]] += self.config.regularization;
631 }
632 }
633
634 Ok(regularized)
635 }
636
637 fn format_warning(&self, warning: &StabilityWarning) -> String {
638 match warning {
639 StabilityWarning::HighConditionNumber {
640 condition_number,
641 location,
642 } => {
643 format!(
644 "High condition number {:.2e} at {}",
645 condition_number, location
646 )
647 }
648 StabilityWarning::NearSingular {
649 smallest_eigenvalue,
650 location,
651 } => {
652 format!(
653 "Near-singular matrix (λ_min = {:.2e}) at {}",
654 smallest_eigenvalue, location
655 )
656 }
657 StabilityWarning::Overflow { value, location } => {
658 format!("Overflow (value = {:.2e}) at {}", value, location)
659 }
660 StabilityWarning::Underflow { value, location } => {
661 format!("Underflow (value = {:.2e}) at {}", value, location)
662 }
663 StabilityWarning::PrecisionLoss {
664 estimated_loss,
665 location,
666 } => {
667 format!(
668 "Precision loss ({:.1}%) at {}",
669 estimated_loss * 100.0,
670 location
671 )
672 }
673 StabilityWarning::RankDeficient {
674 expected_rank,
675 actual_rank,
676 location,
677 } => {
678 format!(
679 "Rank deficient ({}/{}) at {}",
680 actual_rank, expected_rank, location
681 )
682 }
683 }
684 }
685}
686
687pub fn stable_kernel_matrix(
689 data1: &Array2<f64>,
690 data2: Option<&Array2<f64>>,
691 kernel_type: &str,
692 bandwidth: f64,
693 monitor: &mut NumericalStabilityMonitor,
694) -> Result<Array2<f64>> {
695 let data2 = data2.unwrap_or(data1);
696 let (n1, n_features) = data1.dim();
697 let (n2, _) = data2.dim();
698
699 let mut kernel = Array2::zeros((n1, n2));
700
701 let norms1: Vec<f64> = data1
703 .axis_iter(Axis(0))
704 .map(|row| row.mapv(|x| x * x).sum())
705 .collect();
706
707 let norms2: Vec<f64> = data2
708 .axis_iter(Axis(0))
709 .map(|row| row.mapv(|x| x * x).sum())
710 .collect();
711
712 for i in 0..n1 {
713 for j in 0..n2 {
714 let similarity = match kernel_type {
715 "RBF" => {
716 let dot_product = data1.row(i).dot(&data2.row(j));
718 let dist_sq = norms1[i] + norms2[j] - 2.0 * dot_product;
719 let dist_sq = dist_sq.max(0.0); let exponent = -bandwidth * dist_sq;
722
723 let clamped_exponent = exponent.max(-700.0); clamped_exponent.exp()
727 }
728 "Laplacian" => {
729 let diff = &data1.row(i) - &data2.row(j);
730 let dist = diff.mapv(|x| x.abs()).sum();
731
732 let exponent = -bandwidth * dist;
733 let clamped_exponent = exponent.max(-700.0);
734
735 clamped_exponent.exp()
736 }
737 "Polynomial" => {
738 let dot_product = data1.row(i).dot(&data2.row(j));
739 let base = bandwidth * dot_product + 1.0;
740
741 let clamped_base = base.max(1e-16);
743
744 clamped_base.powi(2) }
746 "Linear" => data1.row(i).dot(&data2.row(j)),
747 _ => {
748 return Err(SklearsError::InvalidOperation(format!(
749 "Unsupported kernel type: {}",
750 kernel_type
751 )));
752 }
753 };
754
755 kernel[[i, j]] = similarity;
756 }
757 }
758
759 monitor.monitor_matrix(&kernel, &format!("{}_kernel_matrix", kernel_type))?;
760
761 Ok(kernel)
762}
763
764#[allow(non_snake_case)]
765#[cfg(test)]
766mod tests {
767 use super::*;
768 use scirs2_core::ndarray::array;
769
770 #[test]
771 fn test_stability_monitor_creation() {
772 let config = StabilityConfig::default();
773 let monitor = NumericalStabilityMonitor::new(config);
774
775 assert!(monitor.get_warnings().is_empty());
776 assert_eq!(monitor.get_metrics().condition_numbers.len(), 0);
777 }
778
779 #[test]
780 fn test_condition_number_monitoring() {
781 let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
782
783 let well_conditioned = array![[2.0, 1.0], [1.0, 2.0],];
785
786 monitor
787 .monitor_matrix(&well_conditioned, "test_well_conditioned")
788 .unwrap();
789 assert!(monitor.get_warnings().is_empty());
790
791 let ill_conditioned = array![[1.0, 1.0], [1.0, 1.000001],];
793
794 monitor
795 .monitor_matrix(&ill_conditioned, "test_ill_conditioned")
796 .unwrap();
797 }
799
800 #[test]
801 fn test_stable_eigendecomposition() {
802 let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
803
804 let matrix = array![[4.0, 2.0], [2.0, 3.0],];
805
806 let (eigenvalues, eigenvectors) = monitor.stable_eigendecomposition(&matrix).unwrap();
807
808 assert!(eigenvalues.len() <= matrix.nrows());
809 assert_eq!(eigenvectors.nrows(), matrix.nrows());
810 assert!(eigenvalues.iter().all(|&x| x.is_finite()));
811 }
812
813 #[test]
814 fn test_stable_matrix_inverse() {
815 let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
816
817 let matrix = array![[4.0, 2.0], [2.0, 3.0],];
818
819 let inverse = monitor.stable_matrix_inverse(&matrix).unwrap();
820
821 let mut regularized_matrix = matrix.clone();
823 for i in 0..matrix.nrows() {
824 regularized_matrix[[i, i]] += 1e-8; }
826
827 let product = regularized_matrix.dot(&inverse);
828 let identity_error = (&product - &Array2::<f64>::eye(2)).mapv(|x| x.abs()).sum();
829
830 assert!(identity_error < 1e-10);
831 }
832
833 #[test]
834 fn test_stable_cholesky() {
835 let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
836
837 let matrix = array![[4.0, 2.0], [2.0, 3.0],];
839
840 let cholesky = monitor.stable_cholesky(&matrix).unwrap();
841
842 let reconstructed = cholesky.dot(&cholesky.t());
844
845 let mut regularized_matrix = matrix.clone();
847 for i in 0..matrix.nrows() {
848 regularized_matrix[[i, i]] += 1e-8; }
850
851 let reconstruction_error = (®ularized_matrix - &reconstructed)
852 .mapv(|x| x.abs())
853 .sum();
854
855 assert!(reconstruction_error < 1e-10);
856 }
857
858 #[test]
859 fn test_stable_kernel_matrix() {
860 let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
861
862 let data = array![[1.0, 2.0], [2.0, 3.0], [3.0, 4.0],];
863
864 let kernel = stable_kernel_matrix(&data, None, "RBF", 1.0, &mut monitor).unwrap();
865
866 assert_eq!(kernel.shape(), &[3, 3]);
867 assert!(kernel.iter().all(|&x| x.is_finite() && x >= 0.0));
868
869 for i in 0..3 {
871 for j in 0..3 {
872 assert!((kernel[[i, j]] - kernel[[j, i]]).abs() < 1e-12);
873 }
874 }
875
876 for i in 0..3 {
878 assert!((kernel[[i, i]] - 1.0).abs() < 1e-12);
879 }
880 }
881
882 #[test]
883 fn test_stability_report() {
884 let mut monitor = NumericalStabilityMonitor::new(StabilityConfig::default());
885
886 let matrix = array![[1.0, 2.0], [3.0, 4.0],];
887
888 monitor.monitor_matrix(&matrix, "test_matrix").unwrap();
889
890 let report = monitor.generate_report();
891 assert!(report.contains("Numerical Stability Report"));
892 assert!(report.contains("Condition Numbers"));
893 }
894}