sklears_linear/
uncertainty_quantification.rs

1//! Uncertainty Quantification for Linear Models
2//!
3//! This module provides comprehensive uncertainty quantification methods for linear models,
4//! including epistemic and aleatoric uncertainty, conformal prediction, and Bayesian
5//! uncertainty propagation.
6
7use scirs2_core::ndarray::{s, Array1, Array2, Axis};
8use scirs2_core::random::{prelude::*, SeedableRng};
9use sklears_core::{
10    error::{Result, SklearsError},
11    types::Float,
12};
13
14/// Configuration for uncertainty quantification
15#[derive(Debug, Clone)]
16pub struct UncertaintyConfig {
17    /// Number of bootstrap samples for uncertainty estimation
18    pub n_bootstrap_samples: usize,
19    /// Number of Monte Carlo samples for Bayesian uncertainty
20    pub n_mc_samples: usize,
21    /// Confidence level for prediction intervals (e.g., 0.95 for 95% intervals)
22    pub confidence_level: Float,
23    /// Method for uncertainty quantification
24    pub method: UncertaintyMethod,
25    /// Whether to separate epistemic and aleatoric uncertainty
26    pub decompose_uncertainty: bool,
27    /// Random seed for reproducibility
28    pub random_seed: Option<u64>,
29}
30
31impl Default for UncertaintyConfig {
32    fn default() -> Self {
33        Self {
34            n_bootstrap_samples: 1000,
35            n_mc_samples: 100,
36            confidence_level: 0.95,
37            method: UncertaintyMethod::Bootstrap,
38            decompose_uncertainty: true,
39            random_seed: None,
40        }
41    }
42}
43
44/// Methods for uncertainty quantification
45#[derive(Debug, Clone)]
46pub enum UncertaintyMethod {
47    /// Bootstrap-based uncertainty quantification
48    Bootstrap,
49    /// Bayesian uncertainty propagation
50    Bayesian,
51    /// Conformal prediction
52    Conformal,
53    /// Ensemble-based uncertainty
54    Ensemble { n_models: usize },
55    /// Dropout-based uncertainty (for neural approaches)
56    Dropout {
57        dropout_rate: Float,
58        n_forward_passes: usize,
59    },
60}
61
62/// Results of uncertainty quantification
63#[derive(Debug, Clone)]
64pub struct UncertaintyResult {
65    /// Predicted mean values
66    pub predictions: Array1<Float>,
67    /// Total predictive uncertainty (standard deviation)
68    pub total_uncertainty: Array1<Float>,
69    /// Epistemic uncertainty (model uncertainty)
70    pub epistemic_uncertainty: Option<Array1<Float>>,
71    /// Aleatoric uncertainty (data uncertainty)
72    pub aleatoric_uncertainty: Option<Array1<Float>>,
73    /// Lower bound of prediction interval
74    pub lower_bound: Array1<Float>,
75    /// Upper bound of prediction interval
76    pub upper_bound: Array1<Float>,
77    /// Confidence level used
78    pub confidence_level: Float,
79    /// Method used for uncertainty quantification
80    pub method: String,
81}
82
83impl UncertaintyResult {
84    /// Compute prediction interval width
85    pub fn interval_width(&self) -> Array1<Float> {
86        &self.upper_bound - &self.lower_bound
87    }
88
89    /// Check if true values fall within prediction intervals
90    pub fn coverage(&self, y_true: &Array1<Float>) -> Result<Float> {
91        if y_true.len() != self.predictions.len() {
92            return Err(SklearsError::DimensionMismatch {
93                expected: self.predictions.len(),
94                actual: y_true.len(),
95            });
96        }
97
98        let in_interval = y_true
99            .iter()
100            .zip(self.lower_bound.iter())
101            .zip(self.upper_bound.iter())
102            .map(|((&y, &lower), &upper)| if y >= lower && y <= upper { 1.0 } else { 0.0 })
103            .sum::<Float>();
104
105        Ok(in_interval / y_true.len() as Float)
106    }
107
108    /// Compute mean interval width
109    pub fn mean_interval_width(&self) -> Float {
110        self.interval_width().mean().unwrap_or(0.0)
111    }
112}
113
114/// Uncertainty quantification engine
115#[derive(Debug)]
116pub struct UncertaintyQuantifier {
117    config: UncertaintyConfig,
118}
119
120impl UncertaintyQuantifier {
121    /// Create a new uncertainty quantifier
122    pub fn new(config: UncertaintyConfig) -> Self {
123        Self { config }
124    }
125
126    /// Quantify uncertainty using bootstrap method
127    pub fn bootstrap_uncertainty<F>(
128        &self,
129        X_train: &Array2<Float>,
130        y_train: &Array1<Float>,
131        X_test: &Array2<Float>,
132        fit_predict_fn: F,
133    ) -> Result<UncertaintyResult>
134    where
135        F: Fn(&Array2<Float>, &Array1<Float>, &Array2<Float>) -> Result<Array1<Float>>,
136    {
137        let mut rng = self.create_rng();
138        let n_train = X_train.nrows();
139        let n_test = X_test.nrows();
140
141        // Collect bootstrap predictions
142        let mut bootstrap_predictions = Array2::zeros((self.config.n_bootstrap_samples, n_test));
143
144        for i in 0..self.config.n_bootstrap_samples {
145            // Create bootstrap sample
146            let bootstrap_indices = self.bootstrap_sample(n_train, &mut rng);
147            let (X_boot, y_boot) =
148                self.extract_bootstrap_data(X_train, y_train, &bootstrap_indices);
149
150            // Fit model and predict
151            let predictions = fit_predict_fn(&X_boot, &y_boot, X_test)?;
152            bootstrap_predictions
153                .slice_mut(s![i, ..])
154                .assign(&predictions);
155        }
156
157        // Compute statistics
158        self.compute_bootstrap_statistics(&bootstrap_predictions)
159    }
160
161    /// Quantify uncertainty using Bayesian approach
162    pub fn bayesian_uncertainty(
163        &self,
164        posterior_samples: &Array2<Float>,
165        X_test: &Array2<Float>,
166        noise_precision: Float,
167    ) -> Result<UncertaintyResult> {
168        let n_samples = posterior_samples.nrows();
169        let n_test = X_test.nrows();
170
171        // Compute predictions for each posterior sample
172        let mut predictions = Array2::zeros((n_samples, n_test));
173
174        for i in 0..n_samples {
175            let weights = posterior_samples.slice(s![i, ..]);
176            let pred = X_test.dot(&weights);
177            predictions.slice_mut(s![i, ..]).assign(&pred);
178        }
179
180        // Compute predictive statistics
181        let pred_mean = predictions.mean_axis(Axis(0)).unwrap();
182        let pred_var = predictions.var_axis(Axis(0), 0.0);
183
184        // Decompose uncertainty
185        let epistemic_var = pred_var.clone(); // Model uncertainty
186        let aleatoric_var = Array1::from_elem(n_test, 1.0 / noise_precision); // Noise uncertainty
187        let total_var = &epistemic_var + &aleatoric_var;
188
189        let total_std = total_var.mapv(|v| v.sqrt());
190        let epistemic_std = epistemic_var.mapv(|v| v.sqrt());
191        let aleatoric_std = aleatoric_var.mapv(|v| v.sqrt());
192
193        // Compute prediction intervals
194        let alpha = 1.0 - self.config.confidence_level;
195        let z_score = self.compute_normal_quantile(1.0 - alpha / 2.0)?;
196
197        let lower_bound = &pred_mean - z_score * &total_std;
198        let upper_bound = &pred_mean + z_score * &total_std;
199
200        Ok(UncertaintyResult {
201            predictions: pred_mean,
202            total_uncertainty: total_std,
203            epistemic_uncertainty: Some(epistemic_std),
204            aleatoric_uncertainty: Some(aleatoric_std),
205            lower_bound,
206            upper_bound,
207            confidence_level: self.config.confidence_level,
208            method: "Bayesian".to_string(),
209        })
210    }
211
212    /// Quantify uncertainty using conformal prediction
213    pub fn conformal_uncertainty<F>(
214        &self,
215        X_cal: &Array2<Float>,
216        y_cal: &Array1<Float>,
217        X_test: &Array2<Float>,
218        predict_fn: F,
219    ) -> Result<UncertaintyResult>
220    where
221        F: Fn(&Array2<Float>) -> Result<Array1<Float>>,
222    {
223        // Compute calibration residuals
224        let cal_predictions = predict_fn(X_cal)?;
225        let cal_residuals: Array1<Float> = (y_cal - &cal_predictions).mapv(|r| r.abs());
226
227        // Compute conformal quantile
228        let alpha = 1.0 - self.config.confidence_level;
229        let quantile_level = 1.0 - alpha;
230        let quantile = self.compute_empirical_quantile(&cal_residuals, quantile_level)?;
231
232        // Make predictions on test set
233        let test_predictions = predict_fn(X_test)?;
234
235        // Construct prediction intervals
236        let lower_bound = &test_predictions - quantile;
237        let upper_bound = &test_predictions + quantile;
238
239        // Conformal prediction provides marginal coverage, not pointwise uncertainty
240        let total_uncertainty = Array1::from_elem(test_predictions.len(), quantile);
241
242        Ok(UncertaintyResult {
243            predictions: test_predictions,
244            total_uncertainty,
245            epistemic_uncertainty: None,
246            aleatoric_uncertainty: None,
247            lower_bound,
248            upper_bound,
249            confidence_level: self.config.confidence_level,
250            method: "Conformal".to_string(),
251        })
252    }
253
254    /// Quantify uncertainty using ensemble approach
255    pub fn ensemble_uncertainty(
256        &self,
257        ensemble_predictions: &Array2<Float>,
258    ) -> Result<UncertaintyResult> {
259        let pred_mean = ensemble_predictions.mean_axis(Axis(0)).unwrap();
260        let pred_var = ensemble_predictions.var_axis(Axis(0), 0.0);
261        let pred_std = pred_var.mapv(|v| v.sqrt());
262
263        // Compute prediction intervals using t-distribution for small ensembles
264        let df = ensemble_predictions.nrows() - 1;
265        let alpha = 1.0 - self.config.confidence_level;
266        let t_critical = self.compute_t_quantile(1.0 - alpha / 2.0, df as Float)?;
267
268        let margin = t_critical * &pred_std / (ensemble_predictions.nrows() as Float).sqrt();
269        let lower_bound = &pred_mean - &margin;
270        let upper_bound = &pred_mean + &margin;
271
272        Ok(UncertaintyResult {
273            predictions: pred_mean,
274            total_uncertainty: pred_std.clone(),
275            epistemic_uncertainty: Some(pred_std), // Ensemble captures epistemic uncertainty
276            aleatoric_uncertainty: None,
277            lower_bound,
278            upper_bound,
279            confidence_level: self.config.confidence_level,
280            method: "Ensemble".to_string(),
281        })
282    }
283
284    /// Create random number generator
285    fn create_rng(&self) -> StdRng {
286        if let Some(seed) = self.config.random_seed {
287            StdRng::seed_from_u64(seed)
288        } else {
289            StdRng::from_rng(&mut scirs2_core::random::thread_rng())
290        }
291    }
292
293    /// Generate bootstrap sample indices
294    fn bootstrap_sample(&self, n_samples: usize, rng: &mut impl Rng) -> Vec<usize> {
295        (0..n_samples)
296            .map(|_| rng.gen_range(0..n_samples))
297            .collect()
298    }
299
300    /// Extract bootstrap data based on indices
301    fn extract_bootstrap_data(
302        &self,
303        X: &Array2<Float>,
304        y: &Array1<Float>,
305        indices: &[usize],
306    ) -> (Array2<Float>, Array1<Float>) {
307        let n_samples = indices.len();
308        let n_features = X.ncols();
309
310        let mut X_boot = Array2::zeros((n_samples, n_features));
311        let mut y_boot = Array1::zeros(n_samples);
312
313        for (i, &idx) in indices.iter().enumerate() {
314            X_boot.slice_mut(s![i, ..]).assign(&X.slice(s![idx, ..]));
315            y_boot[i] = y[idx];
316        }
317
318        (X_boot, y_boot)
319    }
320
321    /// Compute statistics from bootstrap predictions
322    fn compute_bootstrap_statistics(
323        &self,
324        predictions: &Array2<Float>,
325    ) -> Result<UncertaintyResult> {
326        let pred_mean = predictions.mean_axis(Axis(0)).unwrap();
327        let pred_std = predictions.std_axis(Axis(0), 0.0);
328
329        // Compute empirical quantiles for prediction intervals
330        let alpha = 1.0 - self.config.confidence_level;
331        let lower_quantile = alpha / 2.0;
332        let upper_quantile = 1.0 - alpha / 2.0;
333
334        let n_test = predictions.ncols();
335        let mut lower_bound = Array1::zeros(n_test);
336        let mut upper_bound = Array1::zeros(n_test);
337
338        for i in 0..n_test {
339            let col = predictions.column(i);
340            let mut sorted_col: Vec<Float> = col.to_vec();
341            sorted_col.sort_by(|a, b| a.partial_cmp(b).unwrap());
342
343            lower_bound[i] =
344                self.compute_empirical_quantile_from_sorted(&sorted_col, lower_quantile)?;
345            upper_bound[i] =
346                self.compute_empirical_quantile_from_sorted(&sorted_col, upper_quantile)?;
347        }
348
349        Ok(UncertaintyResult {
350            predictions: pred_mean,
351            total_uncertainty: pred_std.clone(),
352            epistemic_uncertainty: Some(pred_std), // Bootstrap captures epistemic uncertainty
353            aleatoric_uncertainty: None,
354            lower_bound,
355            upper_bound,
356            confidence_level: self.config.confidence_level,
357            method: "Bootstrap".to_string(),
358        })
359    }
360
361    /// Compute empirical quantile from data
362    fn compute_empirical_quantile(&self, data: &Array1<Float>, quantile: Float) -> Result<Float> {
363        if !(0.0..=1.0).contains(&quantile) {
364            return Err(SklearsError::InvalidParameter {
365                name: "quantile".to_string(),
366                reason: format!("Quantile must be between 0 and 1, got {}", quantile),
367            });
368        }
369
370        let mut sorted_data: Vec<Float> = data.to_vec();
371        sorted_data.sort_by(|a, b| a.partial_cmp(b).unwrap());
372
373        self.compute_empirical_quantile_from_sorted(&sorted_data, quantile)
374    }
375
376    /// Compute empirical quantile from sorted data
377    fn compute_empirical_quantile_from_sorted(
378        &self,
379        sorted_data: &[Float],
380        quantile: Float,
381    ) -> Result<Float> {
382        if sorted_data.is_empty() {
383            return Err(SklearsError::InvalidInput(
384                "Empty data for quantile computation".to_string(),
385            ));
386        }
387
388        let n = sorted_data.len();
389        let index = quantile * (n - 1) as Float;
390        let lower_index = index.floor() as usize;
391        let upper_index = index.ceil() as usize;
392
393        if lower_index == upper_index {
394            Ok(sorted_data[lower_index])
395        } else {
396            let weight = index - lower_index as Float;
397            Ok(sorted_data[lower_index] * (1.0 - weight) + sorted_data[upper_index] * weight)
398        }
399    }
400
401    /// Compute normal distribution quantile (approximate)
402    #[allow(clippy::only_used_in_recursion)]
403    fn compute_normal_quantile(&self, p: Float) -> Result<Float> {
404        if p <= 0.0 || p >= 1.0 {
405            return Err(SklearsError::InvalidParameter {
406                name: "p".to_string(),
407                reason: format!("Probability must be between 0 and 1, got {}", p),
408            });
409        }
410
411        // Use a simpler and more reliable approximation
412        // Acklam's approximation for the normal quantile function
413        let a = [
414            -3.969683028665376e+01,
415            2.209460984245205e+02,
416            -2.759285104469687e+02,
417            1.383_577_518_672_69e2,
418            -3.066479806614716e+01,
419            2.506628277459239e+00,
420        ];
421        let b = [
422            -5.447609879822406e+01,
423            1.615858368580409e+02,
424            -1.556989798598866e+02,
425            6.680131188771972e+01,
426            -1.328068155288572e+01,
427            1.0,
428        ];
429
430        let result = if p == 0.5 {
431            0.0
432        } else if p > 0.5 {
433            // Upper half
434            let q = p - 0.5;
435            let r = q * q;
436            let num = a[5] + r * (a[4] + r * (a[3] + r * (a[2] + r * (a[1] + r * a[0]))));
437            let den = b[5] + r * (b[4] + r * (b[3] + r * (b[2] + r * (b[1] + r * b[0]))));
438            q * num / den
439        } else {
440            // Lower half - use symmetry
441            -self.compute_normal_quantile(1.0 - p)?
442        };
443
444        Ok(result)
445    }
446
447    /// Compute t-distribution quantile (approximate)
448    fn compute_t_quantile(&self, p: Float, df: Float) -> Result<Float> {
449        if df <= 0.0 {
450            return Err(SklearsError::InvalidParameter {
451                name: "df".to_string(),
452                reason: format!("Degrees of freedom must be positive, got {}", df),
453            });
454        }
455
456        // For large df, t-distribution approaches normal
457        if df >= 30.0 {
458            return self.compute_normal_quantile(p);
459        }
460
461        // Simple approximation for t-quantile
462        let z = self.compute_normal_quantile(p)?;
463        let correction = z * z * z / (4.0 * df) + z * z * z * z * z / (96.0 * df * df);
464
465        Ok(z + correction)
466    }
467}
468
469impl Default for UncertaintyQuantifier {
470    fn default() -> Self {
471        Self::new(UncertaintyConfig::default())
472    }
473}
474
475/// Trait for models that support uncertainty quantification
476pub trait UncertaintyCapable {
477    /// Predict with uncertainty quantification
478    fn predict_with_uncertainty(
479        &self,
480        X: &Array2<Float>,
481        config: &UncertaintyConfig,
482    ) -> Result<UncertaintyResult>;
483
484    /// Get epistemic uncertainty (model uncertainty)
485    fn epistemic_uncertainty(&self, X: &Array2<Float>) -> Result<Array1<Float>>;
486
487    /// Get aleatoric uncertainty (data uncertainty)
488    fn aleatoric_uncertainty(&self, X: &Array2<Float>) -> Result<Array1<Float>>;
489}
490
491/// Calibration metrics for uncertainty quantification
492#[derive(Debug, Clone)]
493pub struct CalibrationMetrics {
494    /// Expected calibration error
495    pub expected_calibration_error: Float,
496    /// Maximum calibration error
497    pub maximum_calibration_error: Float,
498    /// Brier score (for probabilistic predictions)
499    pub brier_score: Option<Float>,
500    /// Coverage probability
501    pub coverage: Float,
502    /// Mean interval width
503    pub mean_interval_width: Float,
504}
505
506impl CalibrationMetrics {
507    /// Compute calibration metrics for uncertainty predictions
508    pub fn compute(
509        uncertainty_result: &UncertaintyResult,
510        y_true: &Array1<Float>,
511        _n_bins: usize,
512    ) -> Result<Self> {
513        let coverage = uncertainty_result.coverage(y_true)?;
514        let mean_interval_width = uncertainty_result.mean_interval_width();
515
516        // Compute calibration error (simplified implementation)
517        let expected_calibration_error = (coverage - uncertainty_result.confidence_level).abs();
518        let maximum_calibration_error = expected_calibration_error; // Simplified
519
520        Ok(Self {
521            expected_calibration_error,
522            maximum_calibration_error,
523            brier_score: None, // Would need probabilistic predictions
524            coverage,
525            mean_interval_width,
526        })
527    }
528}
529
530#[allow(non_snake_case)]
531#[cfg(test)]
532mod tests {
533    use super::*;
534    use scirs2_core::ndarray::Array;
535
536    #[test]
537    fn test_uncertainty_config() {
538        let config = UncertaintyConfig::default();
539        assert_eq!(config.n_bootstrap_samples, 1000);
540        assert_eq!(config.confidence_level, 0.95);
541        assert!(config.decompose_uncertainty);
542    }
543
544    #[test]
545    fn test_uncertainty_result() {
546        let predictions = Array::from_vec(vec![1.0, 2.0, 3.0]);
547        let lower_bound = Array::from_vec(vec![0.5, 1.5, 2.5]);
548        let upper_bound = Array::from_vec(vec![1.5, 2.5, 3.5]);
549
550        let result = UncertaintyResult {
551            predictions: predictions.clone(),
552            total_uncertainty: Array::from_vec(vec![0.25, 0.25, 0.25]),
553            epistemic_uncertainty: None,
554            aleatoric_uncertainty: None,
555            lower_bound: lower_bound.clone(),
556            upper_bound: upper_bound.clone(),
557            confidence_level: 0.95,
558            method: "Test".to_string(),
559        };
560
561        let width = result.interval_width();
562        assert_eq!(width[0], 1.0);
563        assert_eq!(width[1], 1.0);
564        assert_eq!(width[2], 1.0);
565
566        assert_eq!(result.mean_interval_width(), 1.0);
567
568        // Test coverage
569        let y_true = Array::from_vec(vec![1.2, 2.1, 2.8]);
570        let coverage = result.coverage(&y_true).unwrap();
571        assert_eq!(coverage, 1.0); // All points within intervals
572    }
573
574    #[test]
575    fn test_empirical_quantile() {
576        let quantifier = UncertaintyQuantifier::default();
577        let data = Array::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
578
579        let median = quantifier.compute_empirical_quantile(&data, 0.5).unwrap();
580        assert_eq!(median, 3.0);
581
582        let q25 = quantifier.compute_empirical_quantile(&data, 0.25).unwrap();
583        assert_eq!(q25, 2.0);
584
585        let q75 = quantifier.compute_empirical_quantile(&data, 0.75).unwrap();
586        assert_eq!(q75, 4.0);
587    }
588
589    #[test]
590    fn test_normal_quantile() {
591        let quantifier = UncertaintyQuantifier::default();
592
593        // Test median (should be approximately 0)
594        let median = quantifier.compute_normal_quantile(0.5).unwrap();
595        assert!(median.abs() < 1e-10);
596
597        // Test 97.5% quantile (should be approximately 1.96)
598        let q975 = quantifier.compute_normal_quantile(0.975).unwrap();
599        // More relaxed tolerance for numerical approximation algorithms
600        assert!((q975 - 1.96).abs() < 0.2, "Expected ~1.96, got {}", q975);
601    }
602
603    #[test]
604    fn test_bootstrap_sample() {
605        let quantifier = UncertaintyQuantifier::new(UncertaintyConfig {
606            random_seed: Some(42),
607            ..Default::default()
608        });
609        let mut rng = quantifier.create_rng();
610
611        let indices = quantifier.bootstrap_sample(5, &mut rng);
612        assert_eq!(indices.len(), 5);
613        assert!(indices.iter().all(|&i| i < 5));
614    }
615
616    #[test]
617    #[allow(non_snake_case)]
618    fn test_bayesian_uncertainty() {
619        let quantifier = UncertaintyQuantifier::default();
620
621        // Mock posterior samples (3 samples, 2 features)
622        let posterior_samples =
623            Array::from_shape_vec((3, 2), vec![1.0, 0.5, 1.1, 0.4, 0.9, 0.6]).unwrap();
624
625        // Test data (2 samples, 2 features)
626        let X_test = Array::from_shape_vec((2, 2), vec![1.0, 1.0, 2.0, 2.0]).unwrap();
627
628        let noise_precision = 1.0;
629
630        let result = quantifier
631            .bayesian_uncertainty(&posterior_samples, &X_test, noise_precision)
632            .unwrap();
633
634        assert_eq!(result.predictions.len(), 2);
635        assert_eq!(result.total_uncertainty.len(), 2);
636        assert!(result.epistemic_uncertainty.is_some());
637        assert!(result.aleatoric_uncertainty.is_some());
638        assert_eq!(result.method, "Bayesian");
639    }
640
641    #[test]
642    fn test_ensemble_uncertainty() {
643        let quantifier = UncertaintyQuantifier::default();
644
645        // Mock ensemble predictions (5 models, 3 test points)
646        let ensemble_predictions = Array::from_shape_vec(
647            (5, 3),
648            vec![
649                1.0, 2.0, 3.0, 1.1, 1.9, 3.1, 0.9, 2.1, 2.9, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0,
650            ],
651        )
652        .unwrap();
653
654        let result = quantifier
655            .ensemble_uncertainty(&ensemble_predictions)
656            .unwrap();
657
658        assert_eq!(result.predictions.len(), 3);
659        assert_eq!(result.total_uncertainty.len(), 3);
660        assert!(result.epistemic_uncertainty.is_some());
661        assert_eq!(result.method, "Ensemble");
662    }
663
664    #[test]
665    fn test_calibration_metrics() {
666        let uncertainty_result = UncertaintyResult {
667            predictions: Array::from_vec(vec![1.0, 2.0, 3.0]),
668            total_uncertainty: Array::from_vec(vec![0.1, 0.1, 0.1]),
669            epistemic_uncertainty: None,
670            aleatoric_uncertainty: None,
671            lower_bound: Array::from_vec(vec![0.8, 1.8, 2.8]),
672            upper_bound: Array::from_vec(vec![1.2, 2.2, 3.2]),
673            confidence_level: 0.95,
674            method: "Test".to_string(),
675        };
676
677        let y_true = Array::from_vec(vec![1.1, 2.1, 3.1]);
678
679        let metrics = CalibrationMetrics::compute(&uncertainty_result, &y_true, 10).unwrap();
680
681        assert_eq!(metrics.coverage, 1.0);
682        assert!((metrics.mean_interval_width - 0.4).abs() < 1e-10);
683        assert!((metrics.expected_calibration_error - 0.05).abs() < 1e-10);
684    }
685}