1use scirs2_core::ndarray::{Array1, Array2, Axis};
7use scirs2_core::random::essentials::{Normal as RandNormal, Uniform as RandUniform};
8use scirs2_core::random::rngs::StdRng as RealStdRng;
9use scirs2_core::random::Rng;
10use scirs2_core::random::{thread_rng, SeedableRng};
11use serde::{Deserialize, Serialize};
12
13use sklears_core::error::{Result, SklearsError};
14use std::collections::HashMap;
15use std::f64::consts::PI;
16
17#[derive(Debug, Clone, Serialize, Deserialize)]
19pub struct ConvergenceAnalyzer {
21 pub max_components: usize,
23 pub component_steps: Vec<usize>,
25 pub n_trials: usize,
27 pub convergence_tolerance: f64,
29 pub reference_method: ReferenceMethod,
31}
32
33#[derive(Debug, Clone, Serialize, Deserialize)]
34pub enum ReferenceMethod {
36 ExactKernel,
38 HighPrecisionApproximation { n_components: usize },
40 MonteCarloEstimate { n_samples: usize },
42}
43
44impl ConvergenceAnalyzer {
45 pub fn new(max_components: usize) -> Self {
46 let component_steps = (1..=10)
47 .map(|i| (max_components * i) / 10)
48 .filter(|&x| x > 0)
49 .collect();
50
51 Self {
52 max_components,
53 component_steps,
54 n_trials: 10,
55 convergence_tolerance: 1e-6,
56 reference_method: ReferenceMethod::ExactKernel,
57 }
58 }
59
60 pub fn component_steps(mut self, steps: Vec<usize>) -> Self {
61 self.component_steps = steps;
62 self
63 }
64
65 pub fn n_trials(mut self, n_trials: usize) -> Self {
66 self.n_trials = n_trials;
67 self
68 }
69
70 pub fn convergence_tolerance(mut self, tolerance: f64) -> Self {
71 self.convergence_tolerance = tolerance;
72 self
73 }
74
75 pub fn reference_method(mut self, method: ReferenceMethod) -> Self {
76 self.reference_method = method;
77 self
78 }
79
80 pub fn analyze_rbf_convergence(
81 &self,
82 x: &Array2<f64>,
83 gamma: f64,
84 ) -> Result<ConvergenceResult> {
85 let mut convergence_rates = Vec::new();
86 let mut approximation_errors = Vec::new();
87
88 let reference_kernel = self.compute_reference_kernel(x, x, gamma)?;
90
91 for &n_components in &self.component_steps {
92 let mut trial_errors = Vec::new();
93
94 for _ in 0..self.n_trials {
95 let approximated_kernel =
97 self.compute_rbf_approximation(x, x, gamma, n_components)?;
98
99 let error =
101 self.compute_approximation_error(&reference_kernel, &approximated_kernel)?;
102 trial_errors.push(error);
103 }
104
105 let mean_error = trial_errors.iter().sum::<f64>() / trial_errors.len() as f64;
106 let std_error = {
107 let variance = trial_errors
108 .iter()
109 .map(|&e| (e - mean_error).powi(2))
110 .sum::<f64>()
111 / trial_errors.len() as f64;
112 variance.sqrt()
113 };
114
115 convergence_rates.push(n_components);
116 approximation_errors.push(ApproximationError {
117 mean: mean_error,
118 std: std_error,
119 min: trial_errors.iter().fold(f64::INFINITY, |a, &b| a.min(b)),
120 max: trial_errors
121 .iter()
122 .fold(f64::NEG_INFINITY, |a, &b| a.max(b)),
123 });
124 }
125
126 let convergence_rate =
128 self.estimate_convergence_rate(&convergence_rates, &approximation_errors)?;
129 let is_converged = approximation_errors.last().unwrap().mean < self.convergence_tolerance;
130
131 Ok(ConvergenceResult {
132 component_counts: convergence_rates,
133 approximation_errors,
134 convergence_rate,
135 is_converged,
136 })
137 }
138
139 fn compute_reference_kernel(
140 &self,
141 x: &Array2<f64>,
142 y: &Array2<f64>,
143 gamma: f64,
144 ) -> Result<Array2<f64>> {
145 match &self.reference_method {
146 ReferenceMethod::ExactKernel => self.compute_exact_rbf_kernel(x, y, gamma),
147 ReferenceMethod::HighPrecisionApproximation { n_components } => {
148 self.compute_rbf_approximation(x, y, gamma, *n_components)
149 }
150 ReferenceMethod::MonteCarloEstimate { n_samples } => {
151 self.compute_monte_carlo_estimate(x, y, gamma, *n_samples)
152 }
153 }
154 }
155
156 fn compute_exact_rbf_kernel(
157 &self,
158 x: &Array2<f64>,
159 y: &Array2<f64>,
160 gamma: f64,
161 ) -> Result<Array2<f64>> {
162 let n_x = x.nrows();
163 let n_y = y.nrows();
164 let mut kernel_matrix = Array2::zeros((n_x, n_y));
165
166 for i in 0..n_x {
167 for j in 0..n_y {
168 let diff = &x.row(i) - &y.row(j);
169 let squared_norm = diff.dot(&diff);
170 kernel_matrix[[i, j]] = (-gamma * squared_norm).exp();
171 }
172 }
173
174 Ok(kernel_matrix)
175 }
176
177 fn compute_rbf_approximation(
178 &self,
179 x: &Array2<f64>,
180 y: &Array2<f64>,
181 gamma: f64,
182 n_components: usize,
183 ) -> Result<Array2<f64>> {
184 let mut rng = RealStdRng::from_seed(thread_rng().gen());
185 let normal = RandNormal::new(0.0, (2.0 * gamma).sqrt()).unwrap();
186 let uniform = RandUniform::new(0.0, 2.0 * PI).unwrap();
187
188 let input_dim = x.ncols();
189
190 let mut weights = Array2::zeros((n_components, input_dim));
192 let mut biases = Array1::zeros(n_components);
193
194 for i in 0..n_components {
195 for j in 0..input_dim {
196 weights[[i, j]] = rng.sample(normal);
197 }
198 biases[i] = rng.sample(uniform);
199 }
200
201 let features_x = self.compute_rff_features(x, &weights, &biases, n_components)?;
203 let features_y = self.compute_rff_features(y, &weights, &biases, n_components)?;
204
205 let n_x = x.nrows();
207 let n_y = y.nrows();
208 let mut kernel_matrix = Array2::zeros((n_x, n_y));
209
210 for i in 0..n_x {
211 for j in 0..n_y {
212 kernel_matrix[[i, j]] = features_x.row(i).dot(&features_y.row(j));
213 }
214 }
215
216 Ok(kernel_matrix)
217 }
218
219 fn compute_rff_features(
220 &self,
221 x: &Array2<f64>,
222 weights: &Array2<f64>,
223 biases: &Array1<f64>,
224 n_components: usize,
225 ) -> Result<Array2<f64>> {
226 let n_samples = x.nrows();
227 let mut features = Array2::zeros((n_samples, n_components));
228 let scaling = (2.0 / n_components as f64).sqrt();
229
230 for i in 0..n_samples {
231 for j in 0..n_components {
232 let mut dot_product = 0.0;
233 for k in 0..x.ncols() {
234 dot_product += x[[i, k]] * weights[[j, k]];
235 }
236 dot_product += biases[j];
237 features[[i, j]] = scaling * dot_product.cos();
238 }
239 }
240
241 Ok(features)
242 }
243
244 fn compute_monte_carlo_estimate(
245 &self,
246 x: &Array2<f64>,
247 y: &Array2<f64>,
248 gamma: f64,
249 n_samples: usize,
250 ) -> Result<Array2<f64>> {
251 let mut kernel_sum = Array2::zeros((x.nrows(), y.nrows()));
253
254 for _ in 0..n_samples {
255 let approx = self.compute_rbf_approximation(x, y, gamma, 1000)?;
256 kernel_sum = kernel_sum + approx;
257 }
258
259 Ok(kernel_sum / n_samples as f64)
260 }
261
262 fn compute_approximation_error(
263 &self,
264 reference: &Array2<f64>,
265 approximation: &Array2<f64>,
266 ) -> Result<f64> {
267 if reference.shape() != approximation.shape() {
268 return Err(SklearsError::InvalidInput(
269 "Matrix dimensions don't match".to_string(),
270 ));
271 }
272
273 let diff = reference - approximation;
275 let frobenius_norm = diff.mapv(|x| x * x).sum().sqrt();
276
277 let reference_norm = reference.mapv(|x| x * x).sum().sqrt();
279
280 if reference_norm > 0.0 {
281 Ok(frobenius_norm / reference_norm)
282 } else {
283 Ok(frobenius_norm)
284 }
285 }
286
287 fn estimate_convergence_rate(
288 &self,
289 components: &[usize],
290 errors: &[ApproximationError],
291 ) -> Result<f64> {
292 if components.len() < 2 || errors.len() < 2 {
293 return Ok(0.0);
294 }
295
296 let mut log_components = Vec::new();
298 let mut log_errors = Vec::new();
299
300 for (i, error) in errors.iter().enumerate() {
301 if error.mean > 0.0 {
302 log_components.push((components[i] as f64).ln());
303 log_errors.push(error.mean.ln());
304 }
305 }
306
307 if log_components.len() < 2 {
308 return Ok(0.0);
309 }
310
311 let n = log_components.len() as f64;
313 let sum_x = log_components.iter().sum::<f64>();
314 let sum_y = log_errors.iter().sum::<f64>();
315 let sum_xy = log_components
316 .iter()
317 .zip(log_errors.iter())
318 .map(|(x, y)| x * y)
319 .sum::<f64>();
320 let sum_x2 = log_components.iter().map(|x| x * x).sum::<f64>();
321
322 let slope = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x * sum_x);
323
324 Ok(-slope) }
326}
327
328#[derive(Debug, Clone, Serialize, Deserialize)]
329pub struct ApproximationError {
331 pub mean: f64,
332 pub std: f64,
333 pub min: f64,
334 pub max: f64,
335}
336
337#[derive(Debug, Clone, Serialize, Deserialize)]
338pub struct ConvergenceResult {
340 pub component_counts: Vec<usize>,
342 pub approximation_errors: Vec<ApproximationError>,
344 pub convergence_rate: f64,
346 pub is_converged: bool,
348}
349
350#[derive(Debug, Clone, Serialize, Deserialize)]
352pub struct ErrorBoundsValidator {
354 pub confidence_level: f64,
356 pub n_bootstrap_samples: usize,
358 pub bound_types: Vec<BoundType>,
360}
361
362#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
363pub enum BoundType {
365 Hoeffding,
367 McDiarmid,
369 Azuma,
371 Bernstein,
373 Empirical,
375}
376
377impl Default for ErrorBoundsValidator {
378 fn default() -> Self {
379 Self::new()
380 }
381}
382
383impl ErrorBoundsValidator {
384 pub fn new() -> Self {
385 Self {
386 confidence_level: 0.95,
387 n_bootstrap_samples: 1000,
388 bound_types: vec![
389 BoundType::Hoeffding,
390 BoundType::McDiarmid,
391 BoundType::Bernstein,
392 BoundType::Empirical,
393 ],
394 }
395 }
396
397 pub fn confidence_level(mut self, level: f64) -> Self {
398 self.confidence_level = level;
399 self
400 }
401
402 pub fn n_bootstrap_samples(mut self, n_samples: usize) -> Self {
403 self.n_bootstrap_samples = n_samples;
404 self
405 }
406
407 pub fn bound_types(mut self, bounds: Vec<BoundType>) -> Self {
408 self.bound_types = bounds;
409 self
410 }
411
412 pub fn validate_rff_bounds(
413 &self,
414 x: &Array2<f64>,
415 gamma: f64,
416 n_components: usize,
417 ) -> Result<ErrorBoundsResult> {
418 let mut bound_results = HashMap::new();
419
420 let exact_kernel = self.compute_exact_kernel(x, gamma)?;
422
423 let mut approximation_errors = Vec::new();
425
426 for _ in 0..self.n_bootstrap_samples {
427 let approx_kernel = self.compute_rff_approximation(x, gamma, n_components)?;
428 let error = self.compute_relative_error(&exact_kernel, &approx_kernel)?;
429 approximation_errors.push(error);
430 }
431
432 approximation_errors.sort_by(|a, b| a.partial_cmp(b).unwrap());
433
434 let empirical_mean =
435 approximation_errors.iter().sum::<f64>() / approximation_errors.len() as f64;
436 let empirical_variance = approximation_errors
437 .iter()
438 .map(|&e| (e - empirical_mean).powi(2))
439 .sum::<f64>()
440 / approximation_errors.len() as f64;
441
442 for bound_type in &self.bound_types {
444 let bound = self.compute_theoretical_bound(
445 bound_type,
446 n_components,
447 x.ncols(),
448 self.confidence_level,
449 )?;
450 let empirical_quantile_idx =
451 ((1.0 - self.confidence_level) * approximation_errors.len() as f64) as usize;
452 let empirical_bound =
453 approximation_errors[empirical_quantile_idx.min(approximation_errors.len() - 1)];
454
455 bound_results.insert(
456 bound_type.clone(),
457 BoundValidation {
458 theoretical_bound: bound,
459 empirical_bound,
460 is_valid: bound >= empirical_bound,
461 tightness_ratio: if bound > 0.0 {
462 empirical_bound / bound
463 } else {
464 0.0
465 },
466 },
467 );
468 }
469
470 Ok(ErrorBoundsResult {
471 empirical_mean,
472 empirical_variance,
473 empirical_quantiles: self.compute_quantiles(&approximation_errors),
474 bound_validations: bound_results,
475 })
476 }
477
478 fn compute_exact_kernel(&self, x: &Array2<f64>, gamma: f64) -> Result<Array2<f64>> {
479 let n = x.nrows();
480 let mut kernel = Array2::zeros((n, n));
481
482 for i in 0..n {
483 for j in 0..n {
484 let diff = &x.row(i) - &x.row(j);
485 let squared_norm = diff.dot(&diff);
486 kernel[[i, j]] = (-gamma * squared_norm).exp();
487 }
488 }
489
490 Ok(kernel)
491 }
492
493 fn compute_rff_approximation(
494 &self,
495 x: &Array2<f64>,
496 gamma: f64,
497 n_components: usize,
498 ) -> Result<Array2<f64>> {
499 let mut rng = RealStdRng::from_seed(thread_rng().gen());
500 let normal = RandNormal::new(0.0, (2.0 * gamma).sqrt()).unwrap();
501 let uniform = RandUniform::new(0.0, 2.0 * PI).unwrap();
502
503 let input_dim = x.ncols();
504 let n_samples = x.nrows();
505
506 let mut weights = Array2::zeros((n_components, input_dim));
508 let mut biases = Array1::zeros(n_components);
509
510 for i in 0..n_components {
511 for j in 0..input_dim {
512 weights[[i, j]] = rng.sample(normal);
513 }
514 biases[i] = rng.sample(uniform);
515 }
516
517 let mut features = Array2::zeros((n_samples, n_components));
519 let scaling = (2.0 / n_components as f64).sqrt();
520
521 for i in 0..n_samples {
522 for j in 0..n_components {
523 let mut dot_product = 0.0;
524 for k in 0..input_dim {
525 dot_product += x[[i, k]] * weights[[j, k]];
526 }
527 dot_product += biases[j];
528 features[[i, j]] = scaling * dot_product.cos();
529 }
530 }
531
532 let mut kernel = Array2::zeros((n_samples, n_samples));
534 for i in 0..n_samples {
535 for j in 0..n_samples {
536 kernel[[i, j]] = features.row(i).dot(&features.row(j));
537 }
538 }
539
540 Ok(kernel)
541 }
542
543 fn compute_relative_error(&self, exact: &Array2<f64>, approx: &Array2<f64>) -> Result<f64> {
544 let diff = exact - approx;
545 let frobenius_error = diff.mapv(|x| x * x).sum().sqrt();
546 let exact_norm = exact.mapv(|x| x * x).sum().sqrt();
547
548 if exact_norm > 0.0 {
549 Ok(frobenius_error / exact_norm)
550 } else {
551 Ok(frobenius_error)
552 }
553 }
554
555 fn compute_theoretical_bound(
556 &self,
557 bound_type: &BoundType,
558 n_components: usize,
559 input_dim: usize,
560 confidence: f64,
561 ) -> Result<f64> {
562 let delta = 1.0 - confidence;
563
564 match bound_type {
565 BoundType::Hoeffding => {
566 let c = 2.0; Ok(c * (2.0 * delta.ln().abs() / n_components as f64).sqrt())
569 }
570 BoundType::McDiarmid => {
571 let c = 4.0 / (n_components as f64).sqrt();
573 Ok(c * (2.0 * delta.ln().abs() / n_components as f64).sqrt())
574 }
575 BoundType::Bernstein => {
576 let variance_bound = 1.0 / n_components as f64;
578 let range_bound = 2.0 / (n_components as f64).sqrt();
579 let term1 = (2.0 * variance_bound * delta.ln().abs() / n_components as f64).sqrt();
580 let term2 = 2.0 * range_bound * delta.ln().abs() / (3.0 * n_components as f64);
581 Ok(term1 + term2)
582 }
583 BoundType::Azuma => {
584 let c = 1.0 / (n_components as f64).sqrt();
586 Ok(c * (2.0 * delta.ln().abs()).sqrt())
587 }
588 BoundType::Empirical => {
589 let rademacher_complexity = (input_dim as f64 / n_components as f64).sqrt();
591 Ok(2.0 * rademacher_complexity
592 + (delta.ln().abs() / (2.0 * n_components as f64)).sqrt())
593 }
594 }
595 }
596
597 fn compute_quantiles(&self, data: &[f64]) -> HashMap<String, f64> {
598 let mut quantiles = HashMap::new();
599 let n = data.len();
600
601 for &p in &[0.05, 0.25, 0.5, 0.75, 0.95, 0.99] {
602 let idx = ((p * n as f64) as usize).min(n - 1);
603 quantiles.insert(format!("q{}", (p * 100.0) as u8), data[idx]);
604 }
605
606 quantiles
607 }
608}
609
610#[derive(Debug, Clone, Serialize, Deserialize)]
611pub struct BoundValidation {
613 pub theoretical_bound: f64,
615 pub empirical_bound: f64,
617 pub is_valid: bool,
619 pub tightness_ratio: f64,
621}
622
623#[derive(Debug, Clone, Serialize, Deserialize)]
624pub struct ErrorBoundsResult {
626 pub empirical_mean: f64,
628 pub empirical_variance: f64,
630 pub empirical_quantiles: HashMap<String, f64>,
632 pub bound_validations: HashMap<BoundType, BoundValidation>,
634}
635
636#[derive(Debug, Clone, Serialize, Deserialize)]
638pub struct QualityAssessment {
640 pub metrics: Vec<QualityMetric>,
642 pub baseline_methods: Vec<BaselineMethod>,
644}
645
646#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
647pub enum QualityMetric {
649 KernelAlignment,
651 SpectralError,
653 FrobeniusError,
655 NuclearNormError,
657 OperatorNormError,
659 RelativeError,
661 EffectiveRank,
663}
664
665#[derive(Debug, Clone, Serialize, Deserialize)]
666pub enum BaselineMethod {
668 RandomSampling,
670 UniformSampling,
672 ExactMethod,
674 PreviousBest { method_name: String },
676}
677
678impl Default for QualityAssessment {
679 fn default() -> Self {
680 Self::new()
681 }
682}
683
684impl QualityAssessment {
685 pub fn new() -> Self {
686 Self {
687 metrics: vec![
688 QualityMetric::KernelAlignment,
689 QualityMetric::SpectralError,
690 QualityMetric::FrobeniusError,
691 QualityMetric::RelativeError,
692 ],
693 baseline_methods: vec![BaselineMethod::RandomSampling, BaselineMethod::ExactMethod],
694 }
695 }
696
697 pub fn assess_approximation(
698 &self,
699 exact_kernel: &Array2<f64>,
700 approx_kernel: &Array2<f64>,
701 ) -> Result<QualityResult> {
702 let mut metric_scores = HashMap::new();
703
704 for metric in &self.metrics {
705 let score = self.compute_metric(metric, exact_kernel, approx_kernel)?;
706 metric_scores.insert(metric.clone(), score);
707 }
708
709 let overall_score = self.compute_overall_score(&metric_scores);
710 Ok(QualityResult {
711 metric_scores: metric_scores.clone(),
712 overall_score,
713 })
714 }
715
716 fn compute_metric(
717 &self,
718 metric: &QualityMetric,
719 exact: &Array2<f64>,
720 approx: &Array2<f64>,
721 ) -> Result<f64> {
722 match metric {
723 QualityMetric::KernelAlignment => {
724 let exact_centered = self.center_kernel(exact)?;
725 let approx_centered = self.center_kernel(approx)?;
726
727 let numerator = self.frobenius_inner_product(&exact_centered, &approx_centered)?;
728 let exact_norm = self.frobenius_norm(&exact_centered)?;
729 let approx_norm = self.frobenius_norm(&approx_centered)?;
730
731 if exact_norm > 0.0 && approx_norm > 0.0 {
732 Ok(numerator / (exact_norm * approx_norm))
733 } else {
734 Ok(0.0)
735 }
736 }
737 QualityMetric::FrobeniusError => {
738 let diff = exact - approx;
739 Ok(self.frobenius_norm(&diff)?)
740 }
741 QualityMetric::RelativeError => {
742 let diff = exact - approx;
743 let error_norm = self.frobenius_norm(&diff)?;
744 let exact_norm = self.frobenius_norm(exact)?;
745
746 if exact_norm > 0.0 {
747 Ok(error_norm / exact_norm)
748 } else {
749 Ok(error_norm)
750 }
751 }
752 QualityMetric::SpectralError => {
753 let diff = exact - approx;
755 self.largest_singular_value(&diff)
756 }
757 QualityMetric::NuclearNormError => {
758 let diff = exact - approx;
759 self.nuclear_norm(&diff)
760 }
761 QualityMetric::OperatorNormError => {
762 let diff = exact - approx;
763 self.operator_norm(&diff)
764 }
765 QualityMetric::EffectiveRank => self.effective_rank(approx),
766 }
767 }
768
769 fn center_kernel(&self, kernel: &Array2<f64>) -> Result<Array2<f64>> {
770 let n = kernel.nrows();
771 let row_means = kernel.mean_axis(Axis(1)).unwrap();
772 let col_means = kernel.mean_axis(Axis(0)).unwrap();
773 let overall_mean = kernel.mean().unwrap();
774
775 let mut centered = kernel.clone();
776
777 for i in 0..n {
778 for j in 0..n {
779 centered[[i, j]] = kernel[[i, j]] - row_means[i] - col_means[j] + overall_mean;
780 }
781 }
782
783 Ok(centered)
784 }
785
786 fn frobenius_inner_product(&self, a: &Array2<f64>, b: &Array2<f64>) -> Result<f64> {
787 if a.shape() != b.shape() {
788 return Err(SklearsError::InvalidInput(
789 "Matrix dimensions don't match".to_string(),
790 ));
791 }
792
793 Ok(a.iter().zip(b.iter()).map(|(x, y)| x * y).sum())
794 }
795
796 fn frobenius_norm(&self, matrix: &Array2<f64>) -> Result<f64> {
797 Ok(matrix.mapv(|x| x * x).sum().sqrt())
798 }
799
800 fn largest_singular_value(&self, matrix: &Array2<f64>) -> Result<f64> {
801 let n = matrix.nrows();
803 let m = matrix.ncols();
804
805 if n == 0 || m == 0 {
806 return Ok(0.0);
807 }
808
809 let mut v = Array1::from_vec(vec![1.0; m]);
810 #[allow(clippy::unnecessary_cast)]
811 {
812 v /= (v.dot(&v) as f64).sqrt();
813 }
814
815 for _ in 0..50 {
816 let mut av: Array1<f64> = Array1::zeros(n);
817 for i in 0..n {
818 for j in 0..m {
819 av[i] += matrix[[i, j]] * v[j];
820 }
821 }
822
823 let mut ata_v: Array1<f64> = Array1::zeros(m);
824 for j in 0..m {
825 for i in 0..n {
826 ata_v[j] += matrix[[i, j]] * av[i];
827 }
828 }
829
830 let norm = ata_v.dot(&ata_v).sqrt();
831 if norm > 1e-12 {
832 v = ata_v / norm;
833 } else {
834 break;
835 }
836 }
837
838 let mut av: Array1<f64> = Array1::zeros(n);
839 for i in 0..n {
840 for j in 0..m {
841 av[i] += matrix[[i, j]] * v[j];
842 }
843 }
844
845 Ok(av.dot(&av).sqrt())
846 }
847
848 fn nuclear_norm(&self, matrix: &Array2<f64>) -> Result<f64> {
849 let trace = (0..matrix.nrows().min(matrix.ncols()))
852 .map(|i| matrix[[i, i]].abs())
853 .sum::<f64>();
854 Ok(trace)
855 }
856
857 fn operator_norm(&self, matrix: &Array2<f64>) -> Result<f64> {
858 self.largest_singular_value(matrix)
860 }
861
862 fn effective_rank(&self, matrix: &Array2<f64>) -> Result<f64> {
863 let trace = (0..matrix.nrows().min(matrix.ncols()))
865 .map(|i| matrix[[i, i]])
866 .sum::<f64>();
867 let frobenius_squared = matrix.mapv(|x| x * x).sum();
868
869 if frobenius_squared > 0.0 {
870 Ok(trace * trace / frobenius_squared)
871 } else {
872 Ok(0.0)
873 }
874 }
875
876 fn compute_overall_score(&self, metric_scores: &HashMap<QualityMetric, f64>) -> f64 {
877 let weights = [
879 (QualityMetric::KernelAlignment, 0.4),
880 (QualityMetric::RelativeError, 0.3),
881 (QualityMetric::FrobeniusError, 0.2),
882 (QualityMetric::EffectiveRank, 0.1),
883 ];
884
885 let mut weighted_sum = 0.0;
886 let mut total_weight = 0.0;
887
888 for (metric, weight) in &weights {
889 if let Some(&score) = metric_scores.get(metric) {
890 let normalized_score = match metric {
891 QualityMetric::KernelAlignment => score, QualityMetric::EffectiveRank => score / matrix_size_as_float(metric_scores),
893 _ => 1.0 / (1.0 + score), };
895 weighted_sum += weight * normalized_score;
896 total_weight += weight;
897 }
898 }
899
900 if total_weight > 0.0 {
901 weighted_sum / total_weight
902 } else {
903 0.0
904 }
905 }
906}
907
908fn matrix_size_as_float(_metric_scores: &HashMap<QualityMetric, f64>) -> f64 {
909 100.0
911}
912
913#[derive(Debug, Clone, Serialize, Deserialize)]
914pub struct QualityResult {
916 pub metric_scores: HashMap<QualityMetric, f64>,
918 pub overall_score: f64,
920}
921
922#[allow(non_snake_case)]
923#[cfg(test)]
924mod tests {
925 use super::*;
926 use scirs2_core::essentials::Normal;
927
928 use scirs2_core::ndarray::{Array, Array2};
929 use scirs2_core::random::thread_rng;
930
931 #[test]
932 fn test_convergence_analyzer() {
933 let x: Array2<f64> = Array::from_shape_fn((20, 5), |_| {
934 let mut rng = thread_rng();
935 rng.sample(&Normal::new(0.0, 1.0).unwrap())
936 });
937 let analyzer = ConvergenceAnalyzer::new(50)
938 .component_steps(vec![10, 20, 30, 40, 50])
939 .n_trials(3);
940
941 let result = analyzer.analyze_rbf_convergence(&x, 1.0).unwrap();
942
943 assert_eq!(result.component_counts.len(), 5);
944 assert!(result.convergence_rate >= 0.0);
945 }
946
947 #[test]
948 fn test_error_bounds_validator() {
949 let x: Array2<f64> = Array::from_shape_fn((15, 4), |_| {
950 let mut rng = thread_rng();
951 rng.sample(&Normal::new(0.0, 1.0).unwrap())
952 });
953 let validator = ErrorBoundsValidator::new()
954 .confidence_level(0.9)
955 .n_bootstrap_samples(50);
956
957 let result = validator.validate_rff_bounds(&x, 1.0, 20).unwrap();
958
959 assert!(result.empirical_mean >= 0.0);
960 assert!(result.empirical_variance >= 0.0);
961 assert!(!result.bound_validations.is_empty());
962 }
963
964 #[test]
965 fn test_quality_assessment() {
966 let exact = Array::eye(10);
967 let mut approx = Array::eye(10);
968 approx[[0, 0]] = 0.9; let assessment = QualityAssessment::new();
971 let result = assessment.assess_approximation(&exact, &approx).unwrap();
972
973 assert!(result.overall_score > 0.0);
974 assert!(result.overall_score <= 1.0);
975 assert!(!result.metric_scores.is_empty());
976 }
977}