use serde::{Deserialize, Serialize};
pub use rv;
pub use statrs;
use statrs::distribution::{Beta, Continuous, ContinuousCDF, Normal};
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum ConfidenceLevel {
P90,
P95,
P99,
}
impl ConfidenceLevel {
pub fn alpha(&self) -> f64 {
match self {
ConfidenceLevel::P90 => 0.10,
ConfidenceLevel::P95 => 0.05,
ConfidenceLevel::P99 => 0.01,
}
}
pub fn z_score(&self) -> f64 {
match self {
ConfidenceLevel::P90 => 1.645,
ConfidenceLevel::P95 => 1.96,
ConfidenceLevel::P99 => 2.576,
}
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct ConfidenceInterval {
pub lower: f64,
pub upper: f64,
pub level: ConfidenceLevel,
}
impl ConfidenceInterval {
pub fn new(lower: f64, upper: f64, level: ConfidenceLevel) -> Self {
Self {
lower,
upper,
level,
}
}
pub fn width(&self) -> f64 {
self.upper - self.lower
}
pub fn contains(&self, value: f64) -> bool {
value >= self.lower && value <= self.upper
}
}
pub fn confidence_interval(samples: &[f64], level: ConfidenceLevel) -> ConfidenceInterval {
if samples.is_empty() {
return ConfidenceInterval::new(f64::NAN, f64::NAN, level);
}
let mean = samples.iter().sum::<f64>() / samples.len() as f64;
let variance =
samples.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / (samples.len() - 1) as f64;
let std_err = (variance / samples.len() as f64).sqrt();
let z = level.z_score();
let lower = mean - z * std_err;
let upper = mean + z * std_err;
ConfidenceInterval::new(lower, upper, level)
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UncertainValue {
pub value: f64,
pub uncertainty: f64,
pub confidence_interval: Option<ConfidenceInterval>,
}
impl UncertainValue {
pub fn from_normal(mean: f64, std_dev: f64) -> Self {
let ci = ConfidenceInterval::new(
mean - 1.96 * std_dev,
mean + 1.96 * std_dev,
ConfidenceLevel::P95,
);
Self {
value: mean,
uncertainty: std_dev,
confidence_interval: Some(ci),
}
}
pub fn from_samples(samples: &[f64]) -> Self {
if samples.is_empty() {
return Self {
value: f64::NAN,
uncertainty: f64::NAN,
confidence_interval: None,
};
}
let mean = samples.iter().sum::<f64>() / samples.len() as f64;
let variance =
samples.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / (samples.len() - 1) as f64;
let std_dev = variance.sqrt();
let ci = confidence_interval(samples, ConfidenceLevel::P95);
Self {
value: mean,
uncertainty: std_dev,
confidence_interval: Some(ci),
}
}
pub fn relative_uncertainty(&self) -> f64 {
if self.value == 0.0 {
f64::INFINITY
} else {
self.uncertainty / self.value.abs()
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BinaryBelief {
pub successes: u64,
pub failures: u64,
pub prior_alpha: f64,
pub prior_beta: f64,
}
impl BinaryBelief {
pub fn uniform_prior() -> Self {
Self {
successes: 0,
failures: 0,
prior_alpha: 1.0,
prior_beta: 1.0,
}
}
pub fn jeffreys_prior() -> Self {
Self {
successes: 0,
failures: 0,
prior_alpha: 0.5,
prior_beta: 0.5,
}
}
pub fn with_prior(prior_probability: f64, strength: f64) -> Self {
let alpha = prior_probability * strength;
let beta = (1.0 - prior_probability) * strength;
Self {
successes: 0,
failures: 0,
prior_alpha: alpha,
prior_beta: beta,
}
}
pub fn observe(&mut self, success: bool) {
if success {
self.successes += 1;
} else {
self.failures += 1;
}
}
pub fn observe_batch(&mut self, successes: u64, failures: u64) {
self.successes += successes;
self.failures += failures;
}
pub fn posterior_alpha(&self) -> f64 {
self.prior_alpha + self.successes as f64
}
pub fn posterior_beta(&self) -> f64 {
self.prior_beta + self.failures as f64
}
pub fn mean(&self) -> f64 {
let alpha = self.posterior_alpha();
let beta = self.posterior_beta();
alpha / (alpha + beta)
}
pub fn variance(&self) -> f64 {
let alpha = self.posterior_alpha();
let beta = self.posterior_beta();
let sum = alpha + beta;
(alpha * beta) / (sum * sum * (sum + 1.0))
}
pub fn std_dev(&self) -> f64 {
self.variance().sqrt()
}
pub fn to_uncertain(&self) -> UncertainValue {
UncertainValue::from_normal(self.mean(), self.std_dev())
}
pub fn credible_interval(&self, level: ConfidenceLevel) -> ConfidenceInterval {
let alpha = self.posterior_alpha();
let beta_param = self.posterior_beta();
let dist = Beta::new(alpha, beta_param).unwrap();
let half_alpha = level.alpha() / 2.0;
let lower = dist.inverse_cdf(half_alpha);
let upper = dist.inverse_cdf(1.0 - half_alpha);
ConfidenceInterval::new(lower, upper, level)
}
}
impl Default for BinaryBelief {
fn default() -> Self {
Self::uniform_prior()
}
}
pub struct ParticleFilter {
particles: Vec<f64>,
weights: Vec<f64>,
process_noise: f64,
}
impl ParticleFilter {
pub fn new(
num_particles: usize,
initial_state: f64,
initial_spread: f64,
process_noise: f64,
) -> Self {
let dist = Normal::new(initial_state, initial_spread).unwrap();
let particles: Vec<f64> = (0..num_particles)
.map(|_| {
let u: f64 = rand::random();
dist.inverse_cdf(u)
})
.collect();
let uniform_weight = 1.0 / num_particles as f64;
let weights = vec![uniform_weight; num_particles];
Self {
particles,
weights,
process_noise,
}
}
pub fn predict(&mut self) {
let noise_dist = Normal::new(0.0, self.process_noise).unwrap();
for particle in &mut self.particles {
let u: f64 = rand::random();
*particle += noise_dist.inverse_cdf(u);
}
}
pub fn update(&mut self, observation: f64, observation_noise: f64) {
let obs_dist = Normal::new(0.0, observation_noise).unwrap();
for (i, particle) in self.particles.iter().enumerate() {
let residual = observation - particle;
self.weights[i] *= obs_dist.pdf(residual);
}
let sum: f64 = self.weights.iter().sum();
if sum > 0.0 {
for w in &mut self.weights {
*w /= sum;
}
}
}
pub fn estimate(&self) -> f64 {
self.particles
.iter()
.zip(self.weights.iter())
.map(|(p, w)| p * w)
.sum()
}
pub fn uncertainty(&self) -> UncertainValue {
let mean = self.estimate();
let variance: f64 = self
.particles
.iter()
.zip(self.weights.iter())
.map(|(p, w)| w * (p - mean).powi(2))
.sum();
UncertainValue::from_normal(mean, variance.sqrt())
}
pub fn resample(&mut self) {
let n = self.particles.len();
let mut cumsum = vec![0.0; n];
cumsum[0] = self.weights[0];
for i in 1..n {
cumsum[i] = cumsum[i - 1] + self.weights[i];
}
let mut new_particles = Vec::with_capacity(n);
for _ in 0..n {
let u: f64 = rand::random();
let idx = cumsum.iter().position(|&c| c >= u).unwrap_or(n - 1);
new_particles.push(self.particles[idx]);
}
self.particles = new_particles;
self.weights = vec![1.0 / n as f64; n];
}
pub fn effective_sample_size(&self) -> f64 {
let sum_sq: f64 = self.weights.iter().map(|w| w * w).sum();
if sum_sq > 0.0 {
1.0 / sum_sq
} else {
0.0
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_confidence_interval() {
let samples: Vec<f64> = (0..100).map(|i| i as f64).collect();
let ci = confidence_interval(&samples, ConfidenceLevel::P95);
assert!(ci.lower < ci.upper);
assert!(ci.contains(50.0));
}
#[test]
fn test_binary_belief() {
let mut belief = BinaryBelief::uniform_prior();
belief.observe_batch(8, 2);
let mean = belief.mean();
assert!(mean > 0.7 && mean < 0.9);
let ci = belief.credible_interval(ConfidenceLevel::P95);
assert!(ci.contains(mean));
}
#[test]
fn test_uncertain_value() {
let samples = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let uncertain = UncertainValue::from_samples(&samples);
assert!((uncertain.value - 3.0).abs() < 0.01);
assert!(uncertain.uncertainty > 0.0);
}
#[test]
fn test_particle_filter() {
let mut pf = ParticleFilter::new(1000, 0.0, 1.0, 0.1);
pf.predict();
pf.update(1.0, 0.5);
let estimate = pf.estimate();
assert!(estimate > 0.0);
}
}