sklears_model_selection/epistemic_uncertainty/
uncertainty_decomposition.rs

1use scirs2_core::ndarray::{Array1, Array2};
2// use scirs2_core::numeric::Float;
3
4#[derive(Debug, Clone)]
5pub enum UncertaintyDecompositionMethod {
6    /// VarianceDecomposition
7    VarianceDecomposition,
8    /// InformationTheoretic
9    InformationTheoretic,
10    /// BayesianDecomposition
11    BayesianDecomposition,
12    /// PredictiveEntropy
13    PredictiveEntropy,
14    /// MutualInformation
15    MutualInformation,
16}
17
18pub fn decompose_variance_uncertainty(
19    predictions: &Array2<f64>,
20    mean_prediction: &Array1<f64>,
21) -> Result<(Array1<f64>, Array1<f64>), Box<dyn std::error::Error>> {
22    let n_samples = predictions.nrows();
23    let n_predictions = predictions.ncols();
24
25    let total_variance = predictions.var_axis(scirs2_core::ndarray::Axis(0), 0.0);
26
27    let mut epistemic_uncertainty = Array1::<f64>::zeros(n_predictions);
28    let mut aleatoric_uncertainty = Array1::<f64>::zeros(n_predictions);
29
30    for i in 0..n_predictions {
31        let pred_col = predictions.column(i);
32        let mean_val = mean_prediction[i];
33
34        let model_variance = pred_col
35            .iter()
36            .map(|&x| (x - mean_val).powi(2))
37            .sum::<f64>()
38            / n_samples as f64;
39        let within_model_variance = total_variance[i] - model_variance;
40
41        epistemic_uncertainty[i] = model_variance;
42        aleatoric_uncertainty[i] = within_model_variance.max(0.0);
43    }
44
45    Ok((epistemic_uncertainty, aleatoric_uncertainty))
46}
47
48pub fn information_theoretic_decomposition(
49    predictions: &Array2<f64>,
50    prediction_probs: &Array2<f64>,
51) -> Result<(Array1<f64>, Array1<f64>), Box<dyn std::error::Error>> {
52    let n_samples = predictions.nrows();
53    let n_predictions = predictions.ncols();
54
55    let mut epistemic_uncertainty = Array1::<f64>::zeros(n_predictions);
56    let mut aleatoric_uncertainty = Array1::<f64>::zeros(n_predictions);
57
58    for i in 0..n_predictions {
59        let prob_col = prediction_probs.column(i);
60
61        let mean_entropy = prob_col
62            .iter()
63            .map(|&p| if p > 0.0 { -p * p.ln() } else { 0.0 })
64            .sum::<f64>()
65            / n_samples as f64;
66
67        let entropy_of_mean = {
68            let mean_p = prob_col.mean().unwrap_or(0.0);
69            if mean_p > 0.0 {
70                -mean_p * mean_p.ln()
71            } else {
72                0.0
73            }
74        };
75
76        epistemic_uncertainty[i] = entropy_of_mean;
77        aleatoric_uncertainty[i] = mean_entropy;
78    }
79
80    Ok((epistemic_uncertainty, aleatoric_uncertainty))
81}