1use scirs2_core::ndarray::{Array1, Array2, Axis};
7use scirs2_core::random::essentials::{Normal as RandNormal, Uniform as RandUniform};
8use scirs2_core::random::rngs::StdRng as RealStdRng;
9use scirs2_core::random::RngExt;
10use scirs2_core::random::{thread_rng, SeedableRng};
11use serde::{Deserialize, Serialize};
12
13use sklears_core::error::{Result, SklearsError};
14use std::collections::HashMap;
15use std::f64::consts::PI;
16
17#[derive(Debug, Clone, Serialize, Deserialize)]
19pub struct ConvergenceAnalyzer {
21 pub max_components: usize,
23 pub component_steps: Vec<usize>,
25 pub n_trials: usize,
27 pub convergence_tolerance: f64,
29 pub reference_method: ReferenceMethod,
31}
32
33#[derive(Debug, Clone, Serialize, Deserialize)]
34pub enum ReferenceMethod {
36 ExactKernel,
38 HighPrecisionApproximation { n_components: usize },
40 MonteCarloEstimate { n_samples: usize },
42}
43
44impl ConvergenceAnalyzer {
45 pub fn new(max_components: usize) -> Self {
46 let component_steps = (1..=10)
47 .map(|i| (max_components * i) / 10)
48 .filter(|&x| x > 0)
49 .collect();
50
51 Self {
52 max_components,
53 component_steps,
54 n_trials: 10,
55 convergence_tolerance: 1e-6,
56 reference_method: ReferenceMethod::ExactKernel,
57 }
58 }
59
60 pub fn component_steps(mut self, steps: Vec<usize>) -> Self {
61 self.component_steps = steps;
62 self
63 }
64
65 pub fn n_trials(mut self, n_trials: usize) -> Self {
66 self.n_trials = n_trials;
67 self
68 }
69
70 pub fn convergence_tolerance(mut self, tolerance: f64) -> Self {
71 self.convergence_tolerance = tolerance;
72 self
73 }
74
75 pub fn reference_method(mut self, method: ReferenceMethod) -> Self {
76 self.reference_method = method;
77 self
78 }
79
80 pub fn analyze_rbf_convergence(
81 &self,
82 x: &Array2<f64>,
83 gamma: f64,
84 ) -> Result<ConvergenceResult> {
85 let mut convergence_rates = Vec::new();
86 let mut approximation_errors = Vec::new();
87
88 let reference_kernel = self.compute_reference_kernel(x, x, gamma)?;
90
91 for &n_components in &self.component_steps {
92 let mut trial_errors = Vec::new();
93
94 for _ in 0..self.n_trials {
95 let approximated_kernel =
97 self.compute_rbf_approximation(x, x, gamma, n_components)?;
98
99 let error =
101 self.compute_approximation_error(&reference_kernel, &approximated_kernel)?;
102 trial_errors.push(error);
103 }
104
105 let mean_error = trial_errors.iter().sum::<f64>() / trial_errors.len() as f64;
106 let std_error = {
107 let variance = trial_errors
108 .iter()
109 .map(|&e| (e - mean_error).powi(2))
110 .sum::<f64>()
111 / trial_errors.len() as f64;
112 variance.sqrt()
113 };
114
115 convergence_rates.push(n_components);
116 approximation_errors.push(ApproximationError {
117 mean: mean_error,
118 std: std_error,
119 min: trial_errors.iter().fold(f64::INFINITY, |a, &b| a.min(b)),
120 max: trial_errors
121 .iter()
122 .fold(f64::NEG_INFINITY, |a, &b| a.max(b)),
123 });
124 }
125
126 let convergence_rate =
128 self.estimate_convergence_rate(&convergence_rates, &approximation_errors)?;
129 let is_converged = approximation_errors
130 .last()
131 .expect("operation should succeed")
132 .mean
133 < self.convergence_tolerance;
134
135 Ok(ConvergenceResult {
136 component_counts: convergence_rates,
137 approximation_errors,
138 convergence_rate,
139 is_converged,
140 })
141 }
142
143 fn compute_reference_kernel(
144 &self,
145 x: &Array2<f64>,
146 y: &Array2<f64>,
147 gamma: f64,
148 ) -> Result<Array2<f64>> {
149 match &self.reference_method {
150 ReferenceMethod::ExactKernel => self.compute_exact_rbf_kernel(x, y, gamma),
151 ReferenceMethod::HighPrecisionApproximation { n_components } => {
152 self.compute_rbf_approximation(x, y, gamma, *n_components)
153 }
154 ReferenceMethod::MonteCarloEstimate { n_samples } => {
155 self.compute_monte_carlo_estimate(x, y, gamma, *n_samples)
156 }
157 }
158 }
159
160 fn compute_exact_rbf_kernel(
161 &self,
162 x: &Array2<f64>,
163 y: &Array2<f64>,
164 gamma: f64,
165 ) -> Result<Array2<f64>> {
166 let n_x = x.nrows();
167 let n_y = y.nrows();
168 let mut kernel_matrix = Array2::zeros((n_x, n_y));
169
170 for i in 0..n_x {
171 for j in 0..n_y {
172 let diff = &x.row(i) - &y.row(j);
173 let squared_norm = diff.dot(&diff);
174 kernel_matrix[[i, j]] = (-gamma * squared_norm).exp();
175 }
176 }
177
178 Ok(kernel_matrix)
179 }
180
181 fn compute_rbf_approximation(
182 &self,
183 x: &Array2<f64>,
184 y: &Array2<f64>,
185 gamma: f64,
186 n_components: usize,
187 ) -> Result<Array2<f64>> {
188 let mut rng = RealStdRng::from_seed(thread_rng().random());
189 let normal = RandNormal::new(0.0, (2.0 * gamma).sqrt()).expect("operation should succeed");
190 let uniform = RandUniform::new(0.0, 2.0 * PI).expect("operation should succeed");
191
192 let input_dim = x.ncols();
193
194 let mut weights = Array2::zeros((n_components, input_dim));
196 let mut biases = Array1::zeros(n_components);
197
198 for i in 0..n_components {
199 for j in 0..input_dim {
200 weights[[i, j]] = rng.sample(normal);
201 }
202 biases[i] = rng.sample(uniform);
203 }
204
205 let features_x = self.compute_rff_features(x, &weights, &biases, n_components)?;
207 let features_y = self.compute_rff_features(y, &weights, &biases, n_components)?;
208
209 let n_x = x.nrows();
211 let n_y = y.nrows();
212 let mut kernel_matrix = Array2::zeros((n_x, n_y));
213
214 for i in 0..n_x {
215 for j in 0..n_y {
216 kernel_matrix[[i, j]] = features_x.row(i).dot(&features_y.row(j));
217 }
218 }
219
220 Ok(kernel_matrix)
221 }
222
223 fn compute_rff_features(
224 &self,
225 x: &Array2<f64>,
226 weights: &Array2<f64>,
227 biases: &Array1<f64>,
228 n_components: usize,
229 ) -> Result<Array2<f64>> {
230 let n_samples = x.nrows();
231 let mut features = Array2::zeros((n_samples, n_components));
232 let scaling = (2.0 / n_components as f64).sqrt();
233
234 for i in 0..n_samples {
235 for j in 0..n_components {
236 let mut dot_product = 0.0;
237 for k in 0..x.ncols() {
238 dot_product += x[[i, k]] * weights[[j, k]];
239 }
240 dot_product += biases[j];
241 features[[i, j]] = scaling * dot_product.cos();
242 }
243 }
244
245 Ok(features)
246 }
247
248 fn compute_monte_carlo_estimate(
249 &self,
250 x: &Array2<f64>,
251 y: &Array2<f64>,
252 gamma: f64,
253 n_samples: usize,
254 ) -> Result<Array2<f64>> {
255 let mut kernel_sum = Array2::zeros((x.nrows(), y.nrows()));
257
258 for _ in 0..n_samples {
259 let approx = self.compute_rbf_approximation(x, y, gamma, 1000)?;
260 kernel_sum = kernel_sum + approx;
261 }
262
263 Ok(kernel_sum / n_samples as f64)
264 }
265
266 fn compute_approximation_error(
267 &self,
268 reference: &Array2<f64>,
269 approximation: &Array2<f64>,
270 ) -> Result<f64> {
271 if reference.shape() != approximation.shape() {
272 return Err(SklearsError::InvalidInput(
273 "Matrix dimensions don't match".to_string(),
274 ));
275 }
276
277 let diff = reference - approximation;
279 let frobenius_norm = diff.mapv(|x| x * x).sum().sqrt();
280
281 let reference_norm = reference.mapv(|x| x * x).sum().sqrt();
283
284 if reference_norm > 0.0 {
285 Ok(frobenius_norm / reference_norm)
286 } else {
287 Ok(frobenius_norm)
288 }
289 }
290
291 fn estimate_convergence_rate(
292 &self,
293 components: &[usize],
294 errors: &[ApproximationError],
295 ) -> Result<f64> {
296 if components.len() < 2 || errors.len() < 2 {
297 return Ok(0.0);
298 }
299
300 let mut log_components = Vec::new();
302 let mut log_errors = Vec::new();
303
304 for (i, error) in errors.iter().enumerate() {
305 if error.mean > 0.0 {
306 log_components.push((components[i] as f64).ln());
307 log_errors.push(error.mean.ln());
308 }
309 }
310
311 if log_components.len() < 2 {
312 return Ok(0.0);
313 }
314
315 let n = log_components.len() as f64;
317 let sum_x = log_components.iter().sum::<f64>();
318 let sum_y = log_errors.iter().sum::<f64>();
319 let sum_xy = log_components
320 .iter()
321 .zip(log_errors.iter())
322 .map(|(x, y)| x * y)
323 .sum::<f64>();
324 let sum_x2 = log_components.iter().map(|x| x * x).sum::<f64>();
325
326 let slope = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x * sum_x);
327
328 Ok(-slope) }
330}
331
332#[derive(Debug, Clone, Serialize, Deserialize)]
333pub struct ApproximationError {
335 pub mean: f64,
336 pub std: f64,
337 pub min: f64,
338 pub max: f64,
339}
340
341#[derive(Debug, Clone, Serialize, Deserialize)]
342pub struct ConvergenceResult {
344 pub component_counts: Vec<usize>,
346 pub approximation_errors: Vec<ApproximationError>,
348 pub convergence_rate: f64,
350 pub is_converged: bool,
352}
353
354#[derive(Debug, Clone, Serialize, Deserialize)]
356pub struct ErrorBoundsValidator {
358 pub confidence_level: f64,
360 pub n_bootstrap_samples: usize,
362 pub bound_types: Vec<BoundType>,
364}
365
366#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
367pub enum BoundType {
369 Hoeffding,
371 McDiarmid,
373 Azuma,
375 Bernstein,
377 Empirical,
379}
380
381impl Default for ErrorBoundsValidator {
382 fn default() -> Self {
383 Self::new()
384 }
385}
386
387impl ErrorBoundsValidator {
388 pub fn new() -> Self {
389 Self {
390 confidence_level: 0.95,
391 n_bootstrap_samples: 1000,
392 bound_types: vec![
393 BoundType::Hoeffding,
394 BoundType::McDiarmid,
395 BoundType::Bernstein,
396 BoundType::Empirical,
397 ],
398 }
399 }
400
401 pub fn confidence_level(mut self, level: f64) -> Self {
402 self.confidence_level = level;
403 self
404 }
405
406 pub fn n_bootstrap_samples(mut self, n_samples: usize) -> Self {
407 self.n_bootstrap_samples = n_samples;
408 self
409 }
410
411 pub fn bound_types(mut self, bounds: Vec<BoundType>) -> Self {
412 self.bound_types = bounds;
413 self
414 }
415
416 pub fn validate_rff_bounds(
417 &self,
418 x: &Array2<f64>,
419 gamma: f64,
420 n_components: usize,
421 ) -> Result<ErrorBoundsResult> {
422 let mut bound_results = HashMap::new();
423
424 let exact_kernel = self.compute_exact_kernel(x, gamma)?;
426
427 let mut approximation_errors = Vec::new();
429
430 for _ in 0..self.n_bootstrap_samples {
431 let approx_kernel = self.compute_rff_approximation(x, gamma, n_components)?;
432 let error = self.compute_relative_error(&exact_kernel, &approx_kernel)?;
433 approximation_errors.push(error);
434 }
435
436 approximation_errors.sort_by(|a, b| a.partial_cmp(b).expect("operation should succeed"));
437
438 let empirical_mean =
439 approximation_errors.iter().sum::<f64>() / approximation_errors.len() as f64;
440 let empirical_variance = approximation_errors
441 .iter()
442 .map(|&e| (e - empirical_mean).powi(2))
443 .sum::<f64>()
444 / approximation_errors.len() as f64;
445
446 for bound_type in &self.bound_types {
448 let bound = self.compute_theoretical_bound(
449 bound_type,
450 n_components,
451 x.ncols(),
452 self.confidence_level,
453 )?;
454 let empirical_quantile_idx =
455 ((1.0 - self.confidence_level) * approximation_errors.len() as f64) as usize;
456 let empirical_bound =
457 approximation_errors[empirical_quantile_idx.min(approximation_errors.len() - 1)];
458
459 bound_results.insert(
460 bound_type.clone(),
461 BoundValidation {
462 theoretical_bound: bound,
463 empirical_bound,
464 is_valid: bound >= empirical_bound,
465 tightness_ratio: if bound > 0.0 {
466 empirical_bound / bound
467 } else {
468 0.0
469 },
470 },
471 );
472 }
473
474 Ok(ErrorBoundsResult {
475 empirical_mean,
476 empirical_variance,
477 empirical_quantiles: self.compute_quantiles(&approximation_errors),
478 bound_validations: bound_results,
479 })
480 }
481
482 fn compute_exact_kernel(&self, x: &Array2<f64>, gamma: f64) -> Result<Array2<f64>> {
483 let n = x.nrows();
484 let mut kernel = Array2::zeros((n, n));
485
486 for i in 0..n {
487 for j in 0..n {
488 let diff = &x.row(i) - &x.row(j);
489 let squared_norm = diff.dot(&diff);
490 kernel[[i, j]] = (-gamma * squared_norm).exp();
491 }
492 }
493
494 Ok(kernel)
495 }
496
497 fn compute_rff_approximation(
498 &self,
499 x: &Array2<f64>,
500 gamma: f64,
501 n_components: usize,
502 ) -> Result<Array2<f64>> {
503 let mut rng = RealStdRng::from_seed(thread_rng().random());
504 let normal = RandNormal::new(0.0, (2.0 * gamma).sqrt()).expect("operation should succeed");
505 let uniform = RandUniform::new(0.0, 2.0 * PI).expect("operation should succeed");
506
507 let input_dim = x.ncols();
508 let n_samples = x.nrows();
509
510 let mut weights = Array2::zeros((n_components, input_dim));
512 let mut biases = Array1::zeros(n_components);
513
514 for i in 0..n_components {
515 for j in 0..input_dim {
516 weights[[i, j]] = rng.sample(normal);
517 }
518 biases[i] = rng.sample(uniform);
519 }
520
521 let mut features = Array2::zeros((n_samples, n_components));
523 let scaling = (2.0 / n_components as f64).sqrt();
524
525 for i in 0..n_samples {
526 for j in 0..n_components {
527 let mut dot_product = 0.0;
528 for k in 0..input_dim {
529 dot_product += x[[i, k]] * weights[[j, k]];
530 }
531 dot_product += biases[j];
532 features[[i, j]] = scaling * dot_product.cos();
533 }
534 }
535
536 let mut kernel = Array2::zeros((n_samples, n_samples));
538 for i in 0..n_samples {
539 for j in 0..n_samples {
540 kernel[[i, j]] = features.row(i).dot(&features.row(j));
541 }
542 }
543
544 Ok(kernel)
545 }
546
547 fn compute_relative_error(&self, exact: &Array2<f64>, approx: &Array2<f64>) -> Result<f64> {
548 let diff = exact - approx;
549 let frobenius_error = diff.mapv(|x| x * x).sum().sqrt();
550 let exact_norm = exact.mapv(|x| x * x).sum().sqrt();
551
552 if exact_norm > 0.0 {
553 Ok(frobenius_error / exact_norm)
554 } else {
555 Ok(frobenius_error)
556 }
557 }
558
559 fn compute_theoretical_bound(
560 &self,
561 bound_type: &BoundType,
562 n_components: usize,
563 input_dim: usize,
564 confidence: f64,
565 ) -> Result<f64> {
566 let delta = 1.0 - confidence;
567
568 match bound_type {
569 BoundType::Hoeffding => {
570 let c = 2.0; Ok(c * (2.0 * delta.ln().abs() / n_components as f64).sqrt())
573 }
574 BoundType::McDiarmid => {
575 let c = 4.0 / (n_components as f64).sqrt();
577 Ok(c * (2.0 * delta.ln().abs() / n_components as f64).sqrt())
578 }
579 BoundType::Bernstein => {
580 let variance_bound = 1.0 / n_components as f64;
582 let range_bound = 2.0 / (n_components as f64).sqrt();
583 let term1 = (2.0 * variance_bound * delta.ln().abs() / n_components as f64).sqrt();
584 let term2 = 2.0 * range_bound * delta.ln().abs() / (3.0 * n_components as f64);
585 Ok(term1 + term2)
586 }
587 BoundType::Azuma => {
588 let c = 1.0 / (n_components as f64).sqrt();
590 Ok(c * (2.0 * delta.ln().abs()).sqrt())
591 }
592 BoundType::Empirical => {
593 let rademacher_complexity = (input_dim as f64 / n_components as f64).sqrt();
595 Ok(2.0 * rademacher_complexity
596 + (delta.ln().abs() / (2.0 * n_components as f64)).sqrt())
597 }
598 }
599 }
600
601 fn compute_quantiles(&self, data: &[f64]) -> HashMap<String, f64> {
602 let mut quantiles = HashMap::new();
603 let n = data.len();
604
605 for &p in &[0.05, 0.25, 0.5, 0.75, 0.95, 0.99] {
606 let idx = ((p * n as f64) as usize).min(n - 1);
607 quantiles.insert(format!("q{}", (p * 100.0) as u8), data[idx]);
608 }
609
610 quantiles
611 }
612}
613
614#[derive(Debug, Clone, Serialize, Deserialize)]
615pub struct BoundValidation {
617 pub theoretical_bound: f64,
619 pub empirical_bound: f64,
621 pub is_valid: bool,
623 pub tightness_ratio: f64,
625}
626
627#[derive(Debug, Clone, Serialize, Deserialize)]
628pub struct ErrorBoundsResult {
630 pub empirical_mean: f64,
632 pub empirical_variance: f64,
634 pub empirical_quantiles: HashMap<String, f64>,
636 pub bound_validations: HashMap<BoundType, BoundValidation>,
638}
639
640#[derive(Debug, Clone, Serialize, Deserialize)]
642pub struct QualityAssessment {
644 pub metrics: Vec<QualityMetric>,
646 pub baseline_methods: Vec<BaselineMethod>,
648}
649
650#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
651pub enum QualityMetric {
653 KernelAlignment,
655 SpectralError,
657 FrobeniusError,
659 NuclearNormError,
661 OperatorNormError,
663 RelativeError,
665 EffectiveRank,
667}
668
669#[derive(Debug, Clone, Serialize, Deserialize)]
670pub enum BaselineMethod {
672 RandomSampling,
674 UniformSampling,
676 ExactMethod,
678 PreviousBest { method_name: String },
680}
681
682impl Default for QualityAssessment {
683 fn default() -> Self {
684 Self::new()
685 }
686}
687
688impl QualityAssessment {
689 pub fn new() -> Self {
690 Self {
691 metrics: vec![
692 QualityMetric::KernelAlignment,
693 QualityMetric::SpectralError,
694 QualityMetric::FrobeniusError,
695 QualityMetric::RelativeError,
696 ],
697 baseline_methods: vec![BaselineMethod::RandomSampling, BaselineMethod::ExactMethod],
698 }
699 }
700
701 pub fn assess_approximation(
702 &self,
703 exact_kernel: &Array2<f64>,
704 approx_kernel: &Array2<f64>,
705 ) -> Result<QualityResult> {
706 let mut metric_scores = HashMap::new();
707
708 for metric in &self.metrics {
709 let score = self.compute_metric(metric, exact_kernel, approx_kernel)?;
710 metric_scores.insert(metric.clone(), score);
711 }
712
713 let overall_score = self.compute_overall_score(&metric_scores);
714 Ok(QualityResult {
715 metric_scores: metric_scores.clone(),
716 overall_score,
717 })
718 }
719
720 fn compute_metric(
721 &self,
722 metric: &QualityMetric,
723 exact: &Array2<f64>,
724 approx: &Array2<f64>,
725 ) -> Result<f64> {
726 match metric {
727 QualityMetric::KernelAlignment => {
728 let exact_centered = self.center_kernel(exact)?;
729 let approx_centered = self.center_kernel(approx)?;
730
731 let numerator = self.frobenius_inner_product(&exact_centered, &approx_centered)?;
732 let exact_norm = self.frobenius_norm(&exact_centered)?;
733 let approx_norm = self.frobenius_norm(&approx_centered)?;
734
735 if exact_norm > 0.0 && approx_norm > 0.0 {
736 Ok(numerator / (exact_norm * approx_norm))
737 } else {
738 Ok(0.0)
739 }
740 }
741 QualityMetric::FrobeniusError => {
742 let diff = exact - approx;
743 Ok(self.frobenius_norm(&diff)?)
744 }
745 QualityMetric::RelativeError => {
746 let diff = exact - approx;
747 let error_norm = self.frobenius_norm(&diff)?;
748 let exact_norm = self.frobenius_norm(exact)?;
749
750 if exact_norm > 0.0 {
751 Ok(error_norm / exact_norm)
752 } else {
753 Ok(error_norm)
754 }
755 }
756 QualityMetric::SpectralError => {
757 let diff = exact - approx;
759 self.largest_singular_value(&diff)
760 }
761 QualityMetric::NuclearNormError => {
762 let diff = exact - approx;
763 self.nuclear_norm(&diff)
764 }
765 QualityMetric::OperatorNormError => {
766 let diff = exact - approx;
767 self.operator_norm(&diff)
768 }
769 QualityMetric::EffectiveRank => self.effective_rank(approx),
770 }
771 }
772
773 fn center_kernel(&self, kernel: &Array2<f64>) -> Result<Array2<f64>> {
774 let n = kernel.nrows();
775 let row_means = kernel.mean_axis(Axis(1)).expect("operation should succeed");
776 let col_means = kernel.mean_axis(Axis(0)).expect("operation should succeed");
777 let overall_mean = kernel.mean().expect("operation should succeed");
778
779 let mut centered = kernel.clone();
780
781 for i in 0..n {
782 for j in 0..n {
783 centered[[i, j]] = kernel[[i, j]] - row_means[i] - col_means[j] + overall_mean;
784 }
785 }
786
787 Ok(centered)
788 }
789
790 fn frobenius_inner_product(&self, a: &Array2<f64>, b: &Array2<f64>) -> Result<f64> {
791 if a.shape() != b.shape() {
792 return Err(SklearsError::InvalidInput(
793 "Matrix dimensions don't match".to_string(),
794 ));
795 }
796
797 Ok(a.iter().zip(b.iter()).map(|(x, y)| x * y).sum())
798 }
799
800 fn frobenius_norm(&self, matrix: &Array2<f64>) -> Result<f64> {
801 Ok(matrix.mapv(|x| x * x).sum().sqrt())
802 }
803
804 fn largest_singular_value(&self, matrix: &Array2<f64>) -> Result<f64> {
805 let n = matrix.nrows();
807 let m = matrix.ncols();
808
809 if n == 0 || m == 0 {
810 return Ok(0.0);
811 }
812
813 let mut v = Array1::from_vec(vec![1.0; m]);
814 #[allow(clippy::unnecessary_cast)]
815 {
816 v /= (v.dot(&v) as f64).sqrt();
817 }
818
819 for _ in 0..50 {
820 let mut av: Array1<f64> = Array1::zeros(n);
821 for i in 0..n {
822 for j in 0..m {
823 av[i] += matrix[[i, j]] * v[j];
824 }
825 }
826
827 let mut ata_v: Array1<f64> = Array1::zeros(m);
828 for j in 0..m {
829 for i in 0..n {
830 ata_v[j] += matrix[[i, j]] * av[i];
831 }
832 }
833
834 let norm = ata_v.dot(&ata_v).sqrt();
835 if norm > 1e-12 {
836 v = ata_v / norm;
837 } else {
838 break;
839 }
840 }
841
842 let mut av: Array1<f64> = Array1::zeros(n);
843 for i in 0..n {
844 for j in 0..m {
845 av[i] += matrix[[i, j]] * v[j];
846 }
847 }
848
849 Ok(av.dot(&av).sqrt())
850 }
851
852 fn nuclear_norm(&self, matrix: &Array2<f64>) -> Result<f64> {
853 let trace = (0..matrix.nrows().min(matrix.ncols()))
856 .map(|i| matrix[[i, i]].abs())
857 .sum::<f64>();
858 Ok(trace)
859 }
860
861 fn operator_norm(&self, matrix: &Array2<f64>) -> Result<f64> {
862 self.largest_singular_value(matrix)
864 }
865
866 fn effective_rank(&self, matrix: &Array2<f64>) -> Result<f64> {
867 let trace = (0..matrix.nrows().min(matrix.ncols()))
869 .map(|i| matrix[[i, i]])
870 .sum::<f64>();
871 let frobenius_squared = matrix.mapv(|x| x * x).sum();
872
873 if frobenius_squared > 0.0 {
874 Ok(trace * trace / frobenius_squared)
875 } else {
876 Ok(0.0)
877 }
878 }
879
880 fn compute_overall_score(&self, metric_scores: &HashMap<QualityMetric, f64>) -> f64 {
881 let weights = [
883 (QualityMetric::KernelAlignment, 0.4),
884 (QualityMetric::RelativeError, 0.3),
885 (QualityMetric::FrobeniusError, 0.2),
886 (QualityMetric::EffectiveRank, 0.1),
887 ];
888
889 let mut weighted_sum = 0.0;
890 let mut total_weight = 0.0;
891
892 for (metric, weight) in &weights {
893 if let Some(&score) = metric_scores.get(metric) {
894 let normalized_score = match metric {
895 QualityMetric::KernelAlignment => score, QualityMetric::EffectiveRank => score / matrix_size_as_float(metric_scores),
897 _ => 1.0 / (1.0 + score), };
899 weighted_sum += weight * normalized_score;
900 total_weight += weight;
901 }
902 }
903
904 if total_weight > 0.0 {
905 weighted_sum / total_weight
906 } else {
907 0.0
908 }
909 }
910}
911
912fn matrix_size_as_float(_metric_scores: &HashMap<QualityMetric, f64>) -> f64 {
913 100.0
915}
916
917#[derive(Debug, Clone, Serialize, Deserialize)]
918pub struct QualityResult {
920 pub metric_scores: HashMap<QualityMetric, f64>,
922 pub overall_score: f64,
924}
925
926#[allow(non_snake_case)]
927#[cfg(test)]
928mod tests {
929 use super::*;
930 use scirs2_core::essentials::Normal;
931
932 use scirs2_core::ndarray::{Array, Array2};
933 use scirs2_core::random::thread_rng;
934
935 #[test]
936 fn test_convergence_analyzer() {
937 let x: Array2<f64> = Array::from_shape_fn((20, 5), |_| {
938 let mut rng = thread_rng();
939 rng.sample(&Normal::new(0.0, 1.0).expect("operation should succeed"))
940 });
941 let analyzer = ConvergenceAnalyzer::new(50)
942 .component_steps(vec![10, 20, 30, 40, 50])
943 .n_trials(3);
944
945 let result = analyzer
946 .analyze_rbf_convergence(&x, 1.0)
947 .expect("operation should succeed");
948
949 assert_eq!(result.component_counts.len(), 5);
950 assert!(result.convergence_rate >= 0.0);
951 }
952
953 #[test]
954 fn test_error_bounds_validator() {
955 let x: Array2<f64> = Array::from_shape_fn((15, 4), |_| {
956 let mut rng = thread_rng();
957 rng.sample(&Normal::new(0.0, 1.0).expect("operation should succeed"))
958 });
959 let validator = ErrorBoundsValidator::new()
960 .confidence_level(0.9)
961 .n_bootstrap_samples(50);
962
963 let result = validator
964 .validate_rff_bounds(&x, 1.0, 20)
965 .expect("operation should succeed");
966
967 assert!(result.empirical_mean >= 0.0);
968 assert!(result.empirical_variance >= 0.0);
969 assert!(!result.bound_validations.is_empty());
970 }
971
972 #[test]
973 fn test_quality_assessment() {
974 let exact = Array::eye(10);
975 let mut approx = Array::eye(10);
976 approx[[0, 0]] = 0.9; let assessment = QualityAssessment::new();
979 let result = assessment
980 .assess_approximation(&exact, &approx)
981 .expect("operation should succeed");
982
983 assert!(result.overall_score > 0.0);
984 assert!(result.overall_score <= 1.0);
985 assert!(!result.metric_scores.is_empty());
986 }
987}