use crate::distributions::traits::Distribution;
use crate::error::{StatsError, StatsResult};
use crate::utils::special_functions::gamma_fn;
#[derive(Debug, Clone, Copy)]
pub struct Weibull {
pub k: f64,
pub lambda: f64,
}
impl Weibull {
pub fn new(k: f64, lambda: f64) -> StatsResult<Self> {
if k <= 0.0 || lambda <= 0.0 {
return Err(StatsError::InvalidInput {
message: "Weibull::new: k and lambda must be positive".to_string(),
});
}
Ok(Self { k, lambda })
}
pub fn fit(data: &[f64]) -> StatsResult<Self> {
if data.is_empty() {
return Err(StatsError::InvalidInput {
message: "Weibull::fit: data must not be empty".to_string(),
});
}
if data.iter().any(|&x| x <= 0.0) {
return Err(StatsError::InvalidInput {
message: "Weibull::fit: all data values must be positive".to_string(),
});
}
let n = data.len() as f64;
let mean = data.iter().sum::<f64>() / n;
let variance = data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / n;
let cv = variance.sqrt() / mean;
let k = cv.powf(-1.086).max(0.01);
let sum_xk: f64 = data.iter().map(|&x| x.powf(k)).sum::<f64>();
let lambda = (sum_xk / n).powf(1.0 / k);
Self::new(k, lambda)
}
}
impl Distribution for Weibull {
fn name(&self) -> &str {
"Weibull"
}
fn num_params(&self) -> usize {
2
}
fn pdf(&self, x: f64) -> StatsResult<f64> {
if x < 0.0 {
return Ok(0.0);
}
if x == 0.0 {
return Ok(if self.k < 1.0 {
f64::INFINITY
} else if self.k == 1.0 {
1.0 / self.lambda
} else {
0.0
});
}
Ok(self.logpdf(x)?.exp())
}
fn logpdf(&self, x: f64) -> StatsResult<f64> {
if x <= 0.0 {
return Ok(f64::NEG_INFINITY);
}
let xl = x / self.lambda;
Ok(
self.k.ln() - self.lambda.ln() + (self.k - 1.0) * (x / self.lambda).ln()
- xl.powf(self.k),
)
}
fn cdf(&self, x: f64) -> StatsResult<f64> {
if x <= 0.0 {
return Ok(0.0);
}
Ok(1.0 - (-(x / self.lambda).powf(self.k)).exp())
}
fn inverse_cdf(&self, p: f64) -> StatsResult<f64> {
if !(0.0..=1.0).contains(&p) {
return Err(StatsError::InvalidInput {
message: "Weibull::inverse_cdf: p must be in [0, 1]".to_string(),
});
}
if p == 0.0 {
return Ok(0.0);
}
if p == 1.0 {
return Ok(f64::INFINITY);
}
Ok(self.lambda * (-(1.0 - p).ln()).powf(1.0 / self.k))
}
fn mean(&self) -> f64 {
self.lambda * gamma_fn(1.0 + 1.0 / self.k)
}
fn variance(&self) -> f64 {
let g1 = gamma_fn(1.0 + 1.0 / self.k);
let g2 = gamma_fn(1.0 + 2.0 / self.k);
self.lambda * self.lambda * (g2 - g1 * g1)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_weibull_exponential_case() {
let w = Weibull::new(1.0, 2.0).unwrap();
assert!((w.mean() - 2.0).abs() < 1e-8);
assert!((w.cdf(2.0).unwrap() - (1.0 - (-1.0_f64).exp())).abs() < 1e-10);
}
#[test]
fn test_weibull_inverse_cdf_exact() {
let w = Weibull::new(2.0, 3.0).unwrap();
for p in [0.1, 0.25, 0.5, 0.75, 0.9] {
let x = w.inverse_cdf(p).unwrap();
let p_back = w.cdf(x).unwrap();
assert!((p - p_back).abs() < 1e-10, "p={p}");
}
}
#[test]
fn test_weibull_fit_recovers_scale() {
let data = vec![1.5, 2.5, 3.5, 2.0, 4.0, 1.8, 3.0, 2.8, 1.2, 3.8];
let w = Weibull::fit(&data).unwrap();
assert!(w.k > 0.0 && w.lambda > 0.0);
}
#[test]
fn test_weibull_invalid() {
assert!(Weibull::new(0.0, 1.0).is_err());
assert!(Weibull::new(1.0, 0.0).is_err());
}
}