use crate::special;
#[derive(Debug, Clone, PartialEq)]
pub enum DistributionError {
InvalidParameters(String),
}
impl std::fmt::Display for DistributionError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DistributionError::InvalidParameters(msg) => {
write!(f, "invalid distribution parameters: {msg}")
}
}
}
}
impl std::error::Error for DistributionError {}
#[derive(Debug, Clone, PartialEq)]
pub struct Uniform {
min: f64,
max: f64,
}
impl Uniform {
pub fn new(min: f64, max: f64) -> Result<Self, DistributionError> {
if !min.is_finite() || !max.is_finite() || min >= max {
return Err(DistributionError::InvalidParameters(format!(
"Uniform requires min < max, got min={min}, max={max}"
)));
}
Ok(Self { min, max })
}
pub fn min(&self) -> f64 {
self.min
}
pub fn max(&self) -> f64 {
self.max
}
pub fn mean(&self) -> f64 {
(self.min + self.max) / 2.0
}
pub fn variance(&self) -> f64 {
let range = self.max - self.min;
range * range / 12.0
}
pub fn cdf(&self, x: f64) -> f64 {
if x <= self.min {
0.0
} else if x >= self.max {
1.0
} else {
(x - self.min) / (self.max - self.min)
}
}
pub fn quantile(&self, p: f64) -> Option<f64> {
if !(0.0..=1.0).contains(&p) {
return None;
}
Some(self.min + p * (self.max - self.min))
}
pub fn pdf(&self, x: f64) -> f64 {
if x >= self.min && x <= self.max {
1.0 / (self.max - self.min)
} else {
0.0
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct Triangular {
min: f64,
mode: f64,
max: f64,
}
impl Triangular {
pub fn new(min: f64, mode: f64, max: f64) -> Result<Self, DistributionError> {
if !min.is_finite() || !mode.is_finite() || !max.is_finite() {
return Err(DistributionError::InvalidParameters(
"Triangular parameters must be finite".into(),
));
}
if min > mode || mode > max || min >= max {
return Err(DistributionError::InvalidParameters(format!(
"Triangular requires min ≤ mode ≤ max and min < max, got {min}, {mode}, {max}"
)));
}
Ok(Self { min, mode, max })
}
pub fn min(&self) -> f64 {
self.min
}
pub fn mode(&self) -> f64 {
self.mode
}
pub fn max(&self) -> f64 {
self.max
}
pub fn mean(&self) -> f64 {
(self.min + self.mode + self.max) / 3.0
}
pub fn variance(&self) -> f64 {
let (a, b, c) = (self.min, self.mode, self.max);
(a * a + b * b + c * c - a * b - a * c - b * c) / 18.0
}
pub fn pdf(&self, x: f64) -> f64 {
let (a, b, c) = (self.min, self.mode, self.max);
if x < a || x > c {
0.0
} else if x <= b {
2.0 * (x - a) / ((c - a) * (b - a).max(f64::MIN_POSITIVE))
} else {
2.0 * (c - x) / ((c - a) * (c - b).max(f64::MIN_POSITIVE))
}
}
pub fn cdf(&self, x: f64) -> f64 {
let (a, b, c) = (self.min, self.mode, self.max);
if x <= a {
0.0
} else if x <= b {
(x - a) * (x - a) / ((c - a) * (b - a).max(f64::MIN_POSITIVE))
} else if x < c {
1.0 - (c - x) * (c - x) / ((c - a) * (c - b).max(f64::MIN_POSITIVE))
} else {
1.0
}
}
pub fn quantile(&self, p: f64) -> Option<f64> {
if !(0.0..=1.0).contains(&p) {
return None;
}
let (a, b, c) = (self.min, self.mode, self.max);
let fc = (b - a) / (c - a); if p < fc {
Some(a + ((c - a) * (b - a) * p).sqrt())
} else {
Some(c - ((c - a) * (c - b) * (1.0 - p)).sqrt())
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct Normal {
mu: f64,
sigma: f64,
}
impl Normal {
pub fn new(mu: f64, sigma: f64) -> Result<Self, DistributionError> {
if !mu.is_finite() || !sigma.is_finite() || sigma <= 0.0 {
return Err(DistributionError::InvalidParameters(format!(
"Normal requires finite μ and σ > 0, got μ={mu}, σ={sigma}"
)));
}
Ok(Self { mu, sigma })
}
pub fn mu(&self) -> f64 {
self.mu
}
pub fn sigma(&self) -> f64 {
self.sigma
}
pub fn mean(&self) -> f64 {
self.mu
}
pub fn variance(&self) -> f64 {
self.sigma * self.sigma
}
pub fn std_dev(&self) -> f64 {
self.sigma
}
pub fn pdf(&self, x: f64) -> f64 {
let z = (x - self.mu) / self.sigma;
special::standard_normal_pdf(z) / self.sigma
}
pub fn cdf(&self, x: f64) -> f64 {
let z = (x - self.mu) / self.sigma;
special::standard_normal_cdf(z)
}
pub fn quantile(&self, p: f64) -> Option<f64> {
if p <= 0.0 || p >= 1.0 {
return None;
}
Some(self.mu + self.sigma * special::inverse_normal_cdf(p))
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct LogNormal {
mu: f64,
sigma: f64,
}
impl LogNormal {
pub fn new(mu: f64, sigma: f64) -> Result<Self, DistributionError> {
if !mu.is_finite() || !sigma.is_finite() || sigma <= 0.0 {
return Err(DistributionError::InvalidParameters(format!(
"LogNormal requires finite μ and σ > 0, got μ={mu}, σ={sigma}"
)));
}
Ok(Self { mu, sigma })
}
pub fn mu(&self) -> f64 {
self.mu
}
pub fn sigma(&self) -> f64 {
self.sigma
}
pub fn mean(&self) -> f64 {
(self.mu + self.sigma * self.sigma / 2.0).exp()
}
pub fn variance(&self) -> f64 {
let s2 = self.sigma * self.sigma;
(s2.exp() - 1.0) * (2.0 * self.mu + s2).exp()
}
pub fn pdf(&self, x: f64) -> f64 {
if x <= 0.0 {
return 0.0;
}
let ln_x = x.ln();
let z = (ln_x - self.mu) / self.sigma;
special::standard_normal_pdf(z) / (x * self.sigma)
}
pub fn cdf(&self, x: f64) -> f64 {
if x <= 0.0 {
return 0.0;
}
let z = (x.ln() - self.mu) / self.sigma;
special::standard_normal_cdf(z)
}
pub fn quantile(&self, p: f64) -> Option<f64> {
if p <= 0.0 || p >= 1.0 {
return None;
}
Some((self.mu + self.sigma * special::inverse_normal_cdf(p)).exp())
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct Pert {
min: f64,
mode: f64,
max: f64,
alpha: f64,
beta: f64,
}
impl Pert {
pub fn new(min: f64, mode: f64, max: f64) -> Result<Self, DistributionError> {
Self::with_shape(min, mode, max, 4.0)
}
pub fn with_shape(
min: f64,
mode: f64,
max: f64,
lambda: f64,
) -> Result<Self, DistributionError> {
if !min.is_finite() || !mode.is_finite() || !max.is_finite() || !lambda.is_finite() {
return Err(DistributionError::InvalidParameters(
"PERT parameters must be finite".into(),
));
}
if min > mode || mode > max || min >= max {
return Err(DistributionError::InvalidParameters(format!(
"PERT requires min ≤ mode ≤ max and min < max, got {min}, {mode}, {max}"
)));
}
if lambda <= 0.0 {
return Err(DistributionError::InvalidParameters(format!(
"PERT λ must be > 0, got {lambda}"
)));
}
let range = max - min;
let alpha = 1.0 + lambda * (mode - min) / range;
let beta = 1.0 + lambda * (max - mode) / range;
Ok(Self {
min,
mode,
max,
alpha,
beta,
})
}
pub fn min(&self) -> f64 {
self.min
}
pub fn mode(&self) -> f64 {
self.mode
}
pub fn max(&self) -> f64 {
self.max
}
pub fn alpha(&self) -> f64 {
self.alpha
}
pub fn beta_param(&self) -> f64 {
self.beta
}
pub fn mean(&self) -> f64 {
let ab = self.alpha + self.beta;
self.min + (self.alpha / ab) * (self.max - self.min)
}
pub fn variance(&self) -> f64 {
let ab = self.alpha + self.beta;
let range = self.max - self.min;
(self.alpha * self.beta) / (ab * ab * (ab + 1.0)) * range * range
}
pub fn std_dev(&self) -> f64 {
self.variance().sqrt()
}
pub fn cdf(&self, x: f64) -> f64 {
if x <= self.min {
return 0.0;
}
if x >= self.max {
return 1.0;
}
let t = (x - self.min) / (self.max - self.min);
special::regularized_incomplete_beta(t, self.alpha, self.beta)
}
pub fn quantile_approx(&self, p: f64) -> Option<f64> {
if p <= 0.0 || p >= 1.0 {
return None;
}
let z = special::inverse_normal_cdf(p);
let result = self.mean() + z * self.std_dev();
Some(result.clamp(self.min, self.max))
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct Weibull {
shape: f64, scale: f64, }
impl Weibull {
pub fn new(shape: f64, scale: f64) -> Result<Self, DistributionError> {
if !shape.is_finite() || !scale.is_finite() || shape <= 0.0 || scale <= 0.0 {
return Err(DistributionError::InvalidParameters(format!(
"Weibull requires shape > 0 and scale > 0, got shape={shape}, scale={scale}"
)));
}
Ok(Self { shape, scale })
}
pub fn shape(&self) -> f64 {
self.shape
}
pub fn scale(&self) -> f64 {
self.scale
}
pub fn mean(&self) -> f64 {
self.scale * special::gamma(1.0 + 1.0 / self.shape)
}
pub fn variance(&self) -> f64 {
let g1 = special::gamma(1.0 + 1.0 / self.shape);
let g2 = special::gamma(1.0 + 2.0 / self.shape);
self.scale * self.scale * (g2 - g1 * g1)
}
pub fn pdf(&self, t: f64) -> f64 {
if t < 0.0 {
return 0.0;
}
if t == 0.0 {
return if self.shape < 1.0 {
f64::INFINITY
} else if (self.shape - 1.0).abs() < f64::EPSILON {
1.0 / self.scale
} else {
0.0
};
}
let z = t / self.scale;
(self.shape / self.scale) * z.powf(self.shape - 1.0) * (-z.powf(self.shape)).exp()
}
pub fn cdf(&self, t: f64) -> f64 {
if t <= 0.0 {
return 0.0;
}
let z = t / self.scale;
1.0 - (-z.powf(self.shape)).exp()
}
pub fn quantile(&self, p: f64) -> Option<f64> {
if !(0.0..1.0).contains(&p) {
return None;
}
if p == 0.0 {
return Some(0.0);
}
Some(self.scale * (-(1.0 - p).ln()).powf(1.0 / self.shape))
}
pub fn hazard_rate(&self, t: f64) -> f64 {
if t <= 0.0 {
return if t == 0.0 && self.shape >= 1.0 {
if (self.shape - 1.0).abs() < f64::EPSILON {
1.0 / self.scale
} else {
0.0
}
} else {
0.0
};
}
let z = t / self.scale;
(self.shape / self.scale) * z.powf(self.shape - 1.0)
}
pub fn reliability(&self, t: f64) -> f64 {
1.0 - self.cdf(t)
}
}
#[derive(Debug, Clone, Copy)]
pub struct Exponential {
rate: f64,
}
impl Exponential {
pub fn new(rate: f64) -> Result<Self, DistributionError> {
if !rate.is_finite() || rate <= 0.0 {
return Err(DistributionError::InvalidParameters(format!(
"Exponential rate must be positive and finite, got {rate}"
)));
}
Ok(Self { rate })
}
pub fn rate(&self) -> f64 {
self.rate
}
pub fn mean(&self) -> f64 {
1.0 / self.rate
}
pub fn variance(&self) -> f64 {
1.0 / (self.rate * self.rate)
}
pub fn pdf(&self, x: f64) -> f64 {
if x < 0.0 {
0.0
} else {
self.rate * (-self.rate * x).exp()
}
}
pub fn cdf(&self, x: f64) -> f64 {
if x < 0.0 {
0.0
} else {
1.0 - (-self.rate * x).exp()
}
}
pub fn quantile(&self, p: f64) -> Option<f64> {
if !(0.0..=1.0).contains(&p) {
return None;
}
if p == 1.0 {
return Some(f64::INFINITY);
}
Some(-(1.0 - p).ln() / self.rate)
}
pub fn hazard_rate(&self) -> f64 {
self.rate
}
pub fn survival(&self, x: f64) -> f64 {
1.0 - self.cdf(x)
}
}
#[derive(Debug, Clone, Copy)]
pub struct GammaDistribution {
shape: f64, rate: f64, }
impl GammaDistribution {
pub fn new(shape: f64, rate: f64) -> Result<Self, DistributionError> {
if !shape.is_finite() || shape <= 0.0 || !rate.is_finite() || rate <= 0.0 {
return Err(DistributionError::InvalidParameters(format!(
"Gamma shape and rate must be positive and finite, got shape={shape}, rate={rate}"
)));
}
Ok(Self { shape, rate })
}
pub fn from_shape_scale(shape: f64, scale: f64) -> Result<Self, DistributionError> {
if !scale.is_finite() || scale <= 0.0 {
return Err(DistributionError::InvalidParameters(format!(
"Gamma scale must be positive and finite, got {scale}"
)));
}
Self::new(shape, 1.0 / scale)
}
pub fn shape(&self) -> f64 {
self.shape
}
pub fn rate(&self) -> f64 {
self.rate
}
pub fn scale(&self) -> f64 {
1.0 / self.rate
}
pub fn mean(&self) -> f64 {
self.shape / self.rate
}
pub fn variance(&self) -> f64 {
self.shape / (self.rate * self.rate)
}
pub fn mode(&self) -> f64 {
if self.shape >= 1.0 {
(self.shape - 1.0) / self.rate
} else {
0.0
}
}
pub fn pdf(&self, x: f64) -> f64 {
if x <= 0.0 {
if x == 0.0 && self.shape < 1.0 {
return f64::INFINITY;
}
return 0.0;
}
let log_pdf = self.shape * self.rate.ln() - special::ln_gamma(self.shape)
+ (self.shape - 1.0) * x.ln()
- self.rate * x;
log_pdf.exp()
}
pub fn cdf(&self, x: f64) -> f64 {
if x <= 0.0 {
return 0.0;
}
special::regularized_lower_gamma(self.shape, self.rate * x)
}
pub fn quantile(&self, p: f64) -> Option<f64> {
if !(0.0..=1.0).contains(&p) {
return None;
}
if p == 0.0 {
return Some(0.0);
}
if p == 1.0 {
return Some(f64::INFINITY);
}
let z = crate::special::inverse_normal_cdf(p);
let v = 1.0 / (9.0 * self.shape);
let chi2_approx = self.shape * (1.0 - v + z * v.sqrt()).powi(3).max(0.001);
let mut x = chi2_approx / self.rate;
for _ in 0..50 {
let f = self.cdf(x) - p;
let fp = self.pdf(x);
if fp < 1e-300 {
break;
}
let step = f / fp;
x = (x - step).max(1e-15);
if step.abs() < 1e-12 * x {
break;
}
}
Some(x)
}
}
#[derive(Debug, Clone)]
pub struct BetaDistribution {
pub alpha: f64,
pub beta: f64,
}
impl BetaDistribution {
pub fn new(alpha: f64, beta: f64) -> Result<Self, DistributionError> {
if alpha <= 0.0 || !alpha.is_finite() {
return Err(DistributionError::InvalidParameters(format!(
"alpha must be positive and finite, got {alpha}"
)));
}
if beta <= 0.0 || !beta.is_finite() {
return Err(DistributionError::InvalidParameters(format!(
"beta must be positive and finite, got {beta}"
)));
}
Ok(Self { alpha, beta })
}
pub fn mean(&self) -> f64 {
self.alpha / (self.alpha + self.beta)
}
pub fn variance(&self) -> f64 {
let ab = self.alpha + self.beta;
self.alpha * self.beta / (ab * ab * (ab + 1.0))
}
pub fn mode(&self) -> Option<f64> {
if self.alpha > 1.0 && self.beta > 1.0 {
Some((self.alpha - 1.0) / (self.alpha + self.beta - 2.0))
} else {
None
}
}
pub fn pdf(&self, x: f64) -> f64 {
if x <= 0.0 || x >= 1.0 {
return 0.0;
}
let ln_pdf = (self.alpha - 1.0) * x.ln() + (self.beta - 1.0) * (1.0 - x).ln()
- special::ln_beta(self.alpha, self.beta);
ln_pdf.exp()
}
pub fn cdf(&self, x: f64) -> f64 {
if x <= 0.0 {
return 0.0;
}
if x >= 1.0 {
return 1.0;
}
special::regularized_incomplete_beta(x, self.alpha, self.beta)
}
pub fn quantile(&self, p: f64) -> Option<f64> {
if !(0.0..=1.0).contains(&p) {
return None;
}
if p == 0.0 {
return Some(0.0);
}
if (p - 1.0).abs() < 1e-15 {
return Some(1.0);
}
let mut lo = 0.0_f64;
let mut hi = 1.0_f64;
for _ in 0..100 {
let mid = (lo + hi) / 2.0;
if hi - lo < 1e-14 {
break;
}
if self.cdf(mid) < p {
lo = mid;
} else {
hi = mid;
}
}
Some((lo + hi) / 2.0)
}
}
#[derive(Debug, Clone)]
pub struct ChiSquared {
pub k: f64,
}
impl ChiSquared {
pub fn new(k: f64) -> Result<Self, DistributionError> {
if k <= 0.0 || !k.is_finite() {
return Err(DistributionError::InvalidParameters(format!(
"k must be positive and finite, got {k}"
)));
}
Ok(Self { k })
}
pub fn mean(&self) -> f64 {
self.k
}
pub fn variance(&self) -> f64 {
2.0 * self.k
}
pub fn pdf(&self, x: f64) -> f64 {
if x <= 0.0 {
return 0.0;
}
let half_k = self.k / 2.0;
let ln_pdf =
(half_k - 1.0) * x.ln() - x / 2.0 - half_k * 2.0_f64.ln() - special::ln_gamma(half_k);
ln_pdf.exp()
}
pub fn cdf(&self, x: f64) -> f64 {
if x <= 0.0 {
return 0.0;
}
special::regularized_lower_gamma(self.k / 2.0, x / 2.0)
}
pub fn quantile(&self, p: f64) -> Option<f64> {
if !(0.0..=1.0).contains(&p) {
return None;
}
if p == 0.0 {
return Some(0.0);
}
if (p - 1.0).abs() < 1e-15 {
return Some(f64::INFINITY);
}
let gamma = GammaDistribution {
shape: self.k / 2.0,
rate: 0.5,
};
gamma.quantile(p)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_uniform_basic() {
let u = Uniform::new(0.0, 10.0).unwrap();
assert!((u.mean() - 5.0).abs() < 1e-15);
assert!((u.variance() - 100.0 / 12.0).abs() < 1e-10);
}
#[test]
fn test_uniform_cdf() {
let u = Uniform::new(0.0, 10.0).unwrap();
assert_eq!(u.cdf(-1.0), 0.0);
assert!((u.cdf(5.0) - 0.5).abs() < 1e-15);
assert_eq!(u.cdf(11.0), 1.0);
}
#[test]
fn test_uniform_quantile() {
let u = Uniform::new(2.0, 8.0).unwrap();
assert_eq!(u.quantile(0.0), Some(2.0));
assert_eq!(u.quantile(1.0), Some(8.0));
assert!((u.quantile(0.5).unwrap() - 5.0).abs() < 1e-15);
}
#[test]
fn test_uniform_pdf() {
let u = Uniform::new(0.0, 5.0).unwrap();
assert!((u.pdf(2.5) - 0.2).abs() < 1e-15);
assert_eq!(u.pdf(-1.0), 0.0);
}
#[test]
fn test_uniform_invalid() {
assert!(Uniform::new(5.0, 5.0).is_err());
assert!(Uniform::new(5.0, 3.0).is_err());
assert!(Uniform::new(f64::NAN, 5.0).is_err());
}
#[test]
fn test_triangular_mean() {
let t = Triangular::new(0.0, 3.0, 6.0).unwrap();
assert!((t.mean() - 3.0).abs() < 1e-15);
}
#[test]
fn test_triangular_symmetric_variance() {
let t = Triangular::new(0.0, 5.0, 10.0).unwrap();
let expected = (0.0 + 25.0 + 100.0 - 0.0 - 0.0 - 50.0) / 18.0;
assert!((t.variance() - expected).abs() < 1e-10);
}
#[test]
fn test_triangular_cdf() {
let t = Triangular::new(0.0, 5.0, 10.0).unwrap();
assert!((t.cdf(0.0)).abs() < 1e-15);
assert!((t.cdf(10.0) - 1.0).abs() < 1e-15);
assert!((t.cdf(5.0) - 0.5).abs() < 1e-15);
}
#[test]
fn test_triangular_quantile() {
let t = Triangular::new(0.0, 5.0, 10.0).unwrap();
assert!((t.quantile(0.0).unwrap() - 0.0).abs() < 1e-15);
assert!((t.quantile(1.0).unwrap() - 10.0).abs() < 1e-15);
assert!((t.quantile(0.5).unwrap() - 5.0).abs() < 1e-10);
}
#[test]
fn test_triangular_invalid() {
assert!(Triangular::new(5.0, 3.0, 10.0).is_err()); assert!(Triangular::new(0.0, 11.0, 10.0).is_err()); assert!(Triangular::new(5.0, 5.0, 5.0).is_err()); }
#[test]
fn test_normal_standard() {
let n = Normal::new(0.0, 1.0).unwrap();
assert!((n.mean()).abs() < 1e-15);
assert!((n.variance() - 1.0).abs() < 1e-15);
assert!((n.cdf(0.0) - 0.5).abs() < 1e-7);
}
#[test]
fn test_normal_shifted() {
let n = Normal::new(10.0, 2.0).unwrap();
assert!((n.mean() - 10.0).abs() < 1e-15);
assert!((n.variance() - 4.0).abs() < 1e-15);
assert!((n.cdf(10.0) - 0.5).abs() < 1e-7);
}
#[test]
fn test_normal_quantile() {
let n = Normal::new(0.0, 1.0).unwrap();
assert!((n.quantile(0.5).unwrap()).abs() < 0.01);
assert!((n.quantile(0.975).unwrap() - 1.96).abs() < 0.01);
}
#[test]
fn test_normal_invalid() {
assert!(Normal::new(0.0, 0.0).is_err());
assert!(Normal::new(0.0, -1.0).is_err());
}
#[test]
fn test_lognormal_mean() {
let ln = LogNormal::new(0.0, 1.0).unwrap();
let expected = (0.5_f64).exp(); assert!((ln.mean() - expected).abs() < 1e-10);
}
#[test]
fn test_lognormal_cdf() {
let ln = LogNormal::new(0.0, 1.0).unwrap();
assert_eq!(ln.cdf(0.0), 0.0);
assert!((ln.cdf(1.0) - 0.5).abs() < 0.001);
}
#[test]
fn test_lognormal_quantile() {
let ln = LogNormal::new(0.0, 1.0).unwrap();
let q50 = ln.quantile(0.5).unwrap();
assert!((q50 - 1.0).abs() < 0.01);
}
#[test]
fn test_pert_mean() {
let p = Pert::new(1.0, 4.0, 7.0).unwrap();
assert!((p.mean() - 4.0).abs() < 1e-10);
}
#[test]
fn test_pert_symmetric_variance() {
let p = Pert::new(0.0, 5.0, 10.0).unwrap();
let expected = 9.0 / (36.0 * 7.0) * 100.0;
assert!(
(p.variance() - expected).abs() < 1e-10,
"PERT variance: {} vs expected: {}",
p.variance(),
expected
);
}
#[test]
fn test_pert_shape_params() {
let p = Pert::new(0.0, 5.0, 10.0).unwrap();
assert!((p.alpha() - 3.0).abs() < 1e-15);
assert!((p.beta_param() - 3.0).abs() < 1e-15);
}
#[test]
fn test_pert_cdf_bounds() {
let p = Pert::new(1.0, 4.0, 7.0).unwrap();
assert_eq!(p.cdf(0.0), 0.0);
assert_eq!(p.cdf(8.0), 1.0);
let p_sym = Pert::new(0.0, 5.0, 10.0).unwrap();
assert!((p_sym.cdf(5.0) - 0.5).abs() < 0.01);
}
#[test]
fn test_pert_cdf_monotonic() {
let p = Pert::new(0.0, 3.0, 10.0).unwrap();
let mut prev = 0.0;
for i in 0..=100 {
let x = i as f64 * 0.1;
let c = p.cdf(x);
assert!(c >= prev - 1e-15, "CDF not monotonic at x={x}");
prev = c;
}
}
#[test]
fn test_pert_quantile_approx() {
let p = Pert::new(0.0, 5.0, 10.0).unwrap();
let q50 = p.quantile_approx(0.5).unwrap();
assert!((q50 - 5.0).abs() < 0.5, "median approx: {q50}");
}
#[test]
fn test_pert_invalid() {
assert!(Pert::new(5.0, 3.0, 10.0).is_err());
assert!(Pert::new(5.0, 5.0, 5.0).is_err());
}
#[test]
fn test_pert_with_lambda() {
let p4 = Pert::with_shape(0.0, 5.0, 10.0, 4.0).unwrap();
let p8 = Pert::with_shape(0.0, 5.0, 10.0, 8.0).unwrap();
assert!(
p8.variance() < p4.variance(),
"higher λ should give lower variance"
);
}
#[test]
fn test_regularized_beta_bounds() {
assert_eq!(special::regularized_incomplete_beta(0.0, 2.0, 3.0), 0.0);
assert_eq!(special::regularized_incomplete_beta(1.0, 2.0, 3.0), 1.0);
}
#[test]
fn test_regularized_beta_symmetric() {
let result = special::regularized_incomplete_beta(0.5, 3.0, 3.0);
assert!(
(result - 0.5).abs() < 1e-8,
"I_0.5(3,3) = {result}, expected 0.5"
);
}
#[test]
fn test_regularized_beta_known_values() {
for &x in &[0.1, 0.3, 0.5, 0.7, 0.9] {
let result = special::regularized_incomplete_beta(x, 1.0, 1.0);
assert!(
(result - x).abs() < 1e-10,
"I_{x}(1,1) = {result}, expected {x}"
);
}
for &x in &[0.1, 0.5, 0.9] {
let result = special::regularized_incomplete_beta(x, 1.0, 3.0);
let expected = 1.0 - (1.0 - x).powi(3);
assert!(
(result - expected).abs() < 1e-10,
"I_{x}(1,3) = {result}, expected {expected}"
);
}
}
#[test]
fn test_ln_gamma_known() {
assert!((special::ln_gamma(1.0)).abs() < 1e-10);
assert!((special::ln_gamma(2.0)).abs() < 1e-10);
assert!((special::ln_gamma(3.0) - 2.0_f64.ln()).abs() < 1e-10);
assert!((special::ln_gamma(5.0) - 24.0_f64.ln()).abs() < 1e-10);
assert!(
(special::ln_gamma(0.5) - std::f64::consts::PI.sqrt().ln()).abs() < 1e-10,
"ln Γ(0.5) = {}, expected {}",
special::ln_gamma(0.5),
std::f64::consts::PI.sqrt().ln()
);
}
#[test]
fn test_weibull_exponential_special_case() {
let w = Weibull::new(1.0, 5.0).unwrap();
assert!((w.mean() - 5.0).abs() < 1e-10);
assert!((w.variance() - 25.0).abs() < 1e-8);
}
#[test]
fn test_weibull_rayleigh_special_case() {
let w = Weibull::new(2.0, 1.0).unwrap();
let expected_mean = std::f64::consts::PI.sqrt() / 2.0;
assert!(
(w.mean() - expected_mean).abs() < 1e-10,
"Weibull(2,1) mean = {}, expected {}",
w.mean(),
expected_mean
);
}
#[test]
fn test_weibull_cdf_known_values() {
let w = Weibull::new(2.0, 10.0).unwrap();
assert!((w.cdf(10.0) - (1.0 - (-1.0_f64).exp())).abs() < 1e-10);
assert_eq!(w.cdf(0.0), 0.0);
assert_eq!(w.cdf(-1.0), 0.0);
}
#[test]
fn test_weibull_pdf_basic() {
let w = Weibull::new(1.0, 1.0).unwrap();
assert!((w.pdf(0.0) - 1.0).abs() < 1e-10);
assert!((w.pdf(1.0) - (-1.0_f64).exp()).abs() < 1e-10);
assert!((w.pdf(2.0) - (-2.0_f64).exp()).abs() < 1e-10);
}
#[test]
fn test_weibull_pdf_negative() {
let w = Weibull::new(2.0, 5.0).unwrap();
assert_eq!(w.pdf(-1.0), 0.0);
}
#[test]
fn test_weibull_quantile_roundtrip() {
let w = Weibull::new(2.5, 100.0).unwrap();
for &p in &[0.1, 0.25, 0.5, 0.75, 0.9] {
let t = w.quantile(p).unwrap();
let p_back = w.cdf(t);
assert!(
(p_back - p).abs() < 1e-10,
"roundtrip: p={p} -> t={t} -> p_back={p_back}"
);
}
}
#[test]
fn test_weibull_quantile_edge_cases() {
let w = Weibull::new(2.0, 10.0).unwrap();
assert_eq!(w.quantile(0.0), Some(0.0));
assert_eq!(w.quantile(1.0), None);
assert_eq!(w.quantile(-0.1), None);
}
#[test]
fn test_weibull_reliability() {
let w = Weibull::new(2.0, 10.0).unwrap();
for &t in &[1.0, 5.0, 10.0, 20.0] {
let r = w.reliability(t);
let f = w.cdf(t);
assert!((r + f - 1.0).abs() < 1e-14);
}
}
#[test]
fn test_weibull_hazard_rate() {
let w = Weibull::new(1.0, 5.0).unwrap();
for &t in &[1.0, 5.0, 10.0] {
assert!((w.hazard_rate(t) - 0.2).abs() < 1e-10);
}
let w2 = Weibull::new(2.0, 10.0).unwrap();
assert!((w2.hazard_rate(5.0) - 2.0 * 5.0 / 100.0).abs() < 1e-10);
}
#[test]
fn test_weibull_invalid() {
assert!(Weibull::new(0.0, 1.0).is_err());
assert!(Weibull::new(-1.0, 1.0).is_err());
assert!(Weibull::new(1.0, 0.0).is_err());
assert!(Weibull::new(1.0, -1.0).is_err());
assert!(Weibull::new(f64::NAN, 1.0).is_err());
assert!(Weibull::new(1.0, f64::INFINITY).is_err());
}
#[test]
fn test_exponential_basic() {
let e = Exponential::new(0.5).unwrap();
assert!((e.rate() - 0.5).abs() < 1e-10);
assert!((e.mean() - 2.0).abs() < 1e-10);
assert!((e.variance() - 4.0).abs() < 1e-10);
}
#[test]
fn test_exponential_pdf() {
let e = Exponential::new(1.0).unwrap();
assert!((e.pdf(0.0) - 1.0).abs() < 1e-10);
assert!((e.pdf(1.0) - (-1.0_f64).exp()).abs() < 1e-10);
assert_eq!(e.pdf(-1.0), 0.0);
}
#[test]
fn test_exponential_cdf() {
let e = Exponential::new(1.0).unwrap();
assert_eq!(e.cdf(-1.0), 0.0);
assert_eq!(e.cdf(0.0), 0.0);
assert!((e.cdf(1.0) - (1.0 - (-1.0_f64).exp())).abs() < 1e-10);
}
#[test]
fn test_exponential_quantile() {
let e = Exponential::new(2.0).unwrap();
assert_eq!(e.quantile(0.0), Some(0.0));
assert_eq!(e.quantile(1.0), Some(f64::INFINITY));
let q = e.quantile(0.5).unwrap();
assert!((q - 2.0_f64.ln() / 2.0).abs() < 1e-10);
assert!(e.quantile(-0.1).is_none());
assert!(e.quantile(1.1).is_none());
}
#[test]
fn test_exponential_quantile_roundtrip() {
let e = Exponential::new(3.0).unwrap();
for &p in &[0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99] {
let x = e.quantile(p).unwrap();
let p_back = e.cdf(x);
assert!((p_back - p).abs() < 1e-10, "p={p}, x={x}, p_back={p_back}");
}
}
#[test]
fn test_exponential_memoryless() {
let e = Exponential::new(1.5).unwrap();
let s = 2.0;
let t = 3.0;
let lhs = e.survival(s + t) / e.survival(s);
let rhs = e.survival(t);
assert!((lhs - rhs).abs() < 1e-10);
}
#[test]
fn test_exponential_invalid() {
assert!(Exponential::new(0.0).is_err());
assert!(Exponential::new(-1.0).is_err());
assert!(Exponential::new(f64::NAN).is_err());
assert!(Exponential::new(f64::INFINITY).is_err());
}
#[test]
fn test_gamma_basic() {
let g = GammaDistribution::new(2.0, 1.0).unwrap();
assert!((g.shape() - 2.0).abs() < 1e-10);
assert!((g.rate() - 1.0).abs() < 1e-10);
assert!((g.scale() - 1.0).abs() < 1e-10);
assert!((g.mean() - 2.0).abs() < 1e-10);
assert!((g.variance() - 2.0).abs() < 1e-10);
assert!((g.mode() - 1.0).abs() < 1e-10);
}
#[test]
fn test_gamma_from_shape_scale() {
let g = GammaDistribution::from_shape_scale(3.0, 2.0).unwrap();
assert!((g.shape() - 3.0).abs() < 1e-10);
assert!((g.rate() - 0.5).abs() < 1e-10);
assert!((g.mean() - 6.0).abs() < 1e-10); }
#[test]
fn test_gamma_pdf_integral() {
let g = GammaDistribution::new(3.0, 2.0).unwrap();
let n = 20_000;
let dt = 20.0 / n as f64;
let integral: f64 = (0..n)
.map(|i| {
let x = (i as f64 + 0.5) * dt;
g.pdf(x) * dt
})
.sum();
assert!((integral - 1.0).abs() < 0.01, "PDF integral = {integral}");
}
#[test]
fn test_gamma_cdf_known() {
let g = GammaDistribution::new(1.0, 1.0).unwrap();
let expected = 1.0 - (-1.0_f64).exp();
assert!((g.cdf(1.0) - expected).abs() < 1e-10);
}
#[test]
fn test_gamma_cdf_chi2() {
let g = GammaDistribution::new(1.0, 0.5).unwrap(); let x = 4.0;
let expected = 1.0 - (-0.5_f64 * x).exp();
assert!(
(g.cdf(x) - expected).abs() < 1e-8,
"CDF({x}) = {} vs {expected}",
g.cdf(x)
);
}
#[test]
fn test_gamma_quantile_roundtrip() {
let g = GammaDistribution::new(5.0, 2.0).unwrap();
for &p in &[0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99] {
let x = g.quantile(p).unwrap();
let p_back = g.cdf(x);
assert!((p_back - p).abs() < 1e-6, "p={p}, x={x}, p_back={p_back}");
}
}
#[test]
fn test_gamma_mode_small_shape() {
let g = GammaDistribution::new(0.5, 1.0).unwrap();
assert_eq!(g.mode(), 0.0); }
#[test]
fn test_gamma_invalid() {
assert!(GammaDistribution::new(0.0, 1.0).is_err());
assert!(GammaDistribution::new(-1.0, 1.0).is_err());
assert!(GammaDistribution::new(1.0, 0.0).is_err());
assert!(GammaDistribution::new(1.0, -1.0).is_err());
assert!(GammaDistribution::new(f64::NAN, 1.0).is_err());
}
#[test]
fn test_beta_mean_variance() {
let b = BetaDistribution::new(2.0, 5.0).unwrap();
assert!((b.mean() - 2.0 / 7.0).abs() < 1e-10);
let expected_var = 2.0 * 5.0 / (7.0 * 7.0 * 8.0);
assert!((b.variance() - expected_var).abs() < 1e-10);
}
#[test]
fn test_beta_symmetric() {
let b = BetaDistribution::new(3.0, 3.0).unwrap();
assert!((b.mean() - 0.5).abs() < 1e-10);
assert!((b.mode().unwrap() - 0.5).abs() < 1e-10);
assert!((b.pdf(0.3) - b.pdf(0.7)).abs() < 1e-10);
}
#[test]
fn test_beta_uniform_special_case() {
let b = BetaDistribution::new(1.0, 1.0).unwrap();
assert!((b.mean() - 0.5).abs() < 1e-10);
assert!((b.cdf(0.5) - 0.5).abs() < 1e-10);
assert!((b.cdf(0.25) - 0.25).abs() < 1e-10);
}
#[test]
fn test_beta_cdf_boundaries() {
let b = BetaDistribution::new(2.0, 3.0).unwrap();
assert_eq!(b.cdf(0.0), 0.0);
assert_eq!(b.cdf(-1.0), 0.0);
assert_eq!(b.cdf(1.0), 1.0);
assert_eq!(b.cdf(2.0), 1.0);
}
#[test]
fn test_beta_pdf_integral() {
let b = BetaDistribution::new(2.0, 5.0).unwrap();
let n = 10_000;
let dt = 1.0 / n as f64;
let integral: f64 = (0..n)
.map(|i| {
let x = (i as f64 + 0.5) * dt;
b.pdf(x) * dt
})
.sum();
assert!((integral - 1.0).abs() < 0.01, "PDF integral = {integral}");
}
#[test]
fn test_beta_quantile_roundtrip() {
let b = BetaDistribution::new(2.0, 5.0).unwrap();
for &p in &[0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99] {
let x = b.quantile(p).unwrap();
let p_back = b.cdf(x);
assert!((p_back - p).abs() < 1e-6, "p={p}, x={x}, p_back={p_back}");
}
}
#[test]
fn test_beta_mode() {
let b = BetaDistribution::new(5.0, 3.0).unwrap();
assert!((b.mode().unwrap() - 2.0 / 3.0).abs() < 1e-10);
}
#[test]
fn test_beta_mode_undefined() {
let b = BetaDistribution::new(0.5, 0.5).unwrap();
assert!(b.mode().is_none());
}
#[test]
fn test_beta_invalid() {
assert!(BetaDistribution::new(0.0, 1.0).is_err());
assert!(BetaDistribution::new(-1.0, 1.0).is_err());
assert!(BetaDistribution::new(1.0, 0.0).is_err());
assert!(BetaDistribution::new(f64::NAN, 1.0).is_err());
}
#[test]
fn test_chi2_mean_variance() {
let chi2 = ChiSquared::new(5.0).unwrap();
assert!((chi2.mean() - 5.0).abs() < 1e-10);
assert!((chi2.variance() - 10.0).abs() < 1e-10);
}
#[test]
fn test_chi2_exp_special_case() {
let chi2 = ChiSquared::new(2.0).unwrap();
let x = 4.0;
let expected = 1.0 - (-x / 2.0_f64).exp();
assert!(
(chi2.cdf(x) - expected).abs() < 1e-8,
"CDF({x}) = {} vs {expected}",
chi2.cdf(x)
);
}
#[test]
fn test_chi2_cdf_boundaries() {
let chi2 = ChiSquared::new(3.0).unwrap();
assert_eq!(chi2.cdf(0.0), 0.0);
assert_eq!(chi2.cdf(-1.0), 0.0);
assert!(chi2.cdf(100.0) > 0.999);
}
#[test]
fn test_chi2_pdf_integral() {
let chi2 = ChiSquared::new(4.0).unwrap();
let n = 20_000;
let dt = 30.0 / n as f64;
let integral: f64 = (0..n)
.map(|i| {
let x = (i as f64 + 0.5) * dt;
chi2.pdf(x) * dt
})
.sum();
assert!((integral - 1.0).abs() < 0.01, "PDF integral = {integral}");
}
#[test]
fn test_chi2_quantile_roundtrip() {
let chi2 = ChiSquared::new(5.0).unwrap();
for &p in &[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99] {
let x = chi2.quantile(p).unwrap();
let p_back = chi2.cdf(x);
assert!((p_back - p).abs() < 1e-6, "p={p}, x={x}, p_back={p_back}");
}
}
#[test]
fn test_chi2_known_quantiles() {
let chi2 = ChiSquared::new(5.0).unwrap();
let q05 = chi2.quantile(0.05).unwrap();
assert!((q05 - 1.1455).abs() < 0.01, "q(0.05) = {q05}");
let q95 = chi2.quantile(0.95).unwrap();
assert!((q95 - 11.0705).abs() < 0.01, "q(0.95) = {q95}");
}
#[test]
fn test_chi2_invalid() {
assert!(ChiSquared::new(0.0).is_err());
assert!(ChiSquared::new(-1.0).is_err());
assert!(ChiSquared::new(f64::NAN).is_err());
}
#[test]
fn test_chi2_gamma_consistency() {
let k = 6.0;
let chi2 = ChiSquared::new(k).unwrap();
let gamma = GammaDistribution::new(k / 2.0, 0.5).unwrap();
let x = 5.0;
assert!(
(chi2.cdf(x) - gamma.cdf(x)).abs() < 1e-10,
"Chi2 and Gamma CDF should match"
);
}
#[test]
fn test_weibull_mean_variance_known() {
let w = Weibull::new(3.6, 1000.0).unwrap();
let mean = w.mean();
assert!(mean > 800.0 && mean < 1000.0, "mean={mean}");
assert!(w.variance() > 0.0);
}
#[test]
fn test_weibull_pdf_integral_approx() {
let w = Weibull::new(2.0, 10.0).unwrap();
let n = 10_000;
let dt = 50.0 / n as f64;
let integral: f64 = (0..n)
.map(|i| {
let t = (i as f64 + 0.5) * dt;
w.pdf(t) * dt
})
.sum();
assert!(
(integral - 1.0).abs() < 0.01,
"PDF integral = {integral}, expected ≈ 1.0"
);
}
}
#[cfg(test)]
mod proptests {
use super::*;
use proptest::prelude::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(300))]
#[test]
fn uniform_cdf_in_01(
min in -100.0_f64..0.0,
max in 1.0_f64..100.0,
x in -200.0_f64..200.0,
) {
let u = Uniform::new(min, max).unwrap();
let c = u.cdf(x);
prop_assert!((0.0..=1.0).contains(&c));
}
#[test]
fn uniform_quantile_roundtrip(
min in -100.0_f64..0.0,
max in 1.0_f64..100.0,
p in 0.0_f64..=1.0,
) {
let u = Uniform::new(min, max).unwrap();
let x = u.quantile(p).unwrap();
let p_back = u.cdf(x);
prop_assert!((p_back - p).abs() < 1e-12, "roundtrip: p={p} -> x={x} -> p_back={p_back}");
}
#[test]
fn triangular_cdf_in_01(
min in -100.0_f64..-1.0,
mode_frac in 0.0_f64..=1.0,
range in 1.0_f64..100.0,
x in -200.0_f64..200.0,
) {
let max = min + range;
let mode = min + mode_frac * range;
let t = Triangular::new(min, mode, max).unwrap();
let c = t.cdf(x);
prop_assert!((0.0..=1.0).contains(&c));
}
#[test]
fn triangular_quantile_roundtrip(
min in -50.0_f64..0.0,
mode_frac in 0.01_f64..0.99,
range in 1.0_f64..50.0,
p in 0.001_f64..0.999,
) {
let max = min + range;
let mode = min + mode_frac * range;
let t = Triangular::new(min, mode, max).unwrap();
let x = t.quantile(p).unwrap();
let p_back = t.cdf(x);
prop_assert!(
(p_back - p).abs() < 1e-8,
"roundtrip: p={p} -> x={x} -> p_back={p_back}"
);
}
#[test]
fn pert_mean_formula(
min in -50.0_f64..0.0,
mode_frac in 0.01_f64..0.99,
range in 1.0_f64..50.0,
) {
let max = min + range;
let mode = min + mode_frac * range;
let p = Pert::new(min, mode, max).unwrap();
let expected = (min + 4.0 * mode + max) / 6.0;
prop_assert!(
(p.mean() - expected).abs() < 1e-10,
"PERT mean: {} vs expected: {}",
p.mean(),
expected
);
}
#[test]
fn pert_variance_positive(
min in -50.0_f64..0.0,
mode_frac in 0.01_f64..0.99,
range in 1.0_f64..50.0,
) {
let max = min + range;
let mode = min + mode_frac * range;
let p = Pert::new(min, mode, max).unwrap();
prop_assert!(p.variance() > 0.0);
}
#[test]
fn pert_cdf_monotonic(
min in -50.0_f64..0.0,
mode_frac in 0.05_f64..0.95,
range in 2.0_f64..50.0,
) {
let max = min + range;
let mode = min + mode_frac * range;
let p = Pert::new(min, mode, max).unwrap();
let mut prev = 0.0;
for i in 0..=20 {
let x = min + (i as f64 / 20.0) * range;
let c = p.cdf(x);
prop_assert!(c >= prev - 1e-10, "CDF not monotonic at x={x}");
prev = c;
}
}
#[test]
fn weibull_cdf_in_01(
shape in 0.1_f64..10.0,
scale in 0.1_f64..100.0,
t in 0.0_f64..200.0,
) {
let w = Weibull::new(shape, scale).unwrap();
let c = w.cdf(t);
prop_assert!((0.0..=1.0).contains(&c), "CDF({t}) = {c} out of [0,1]");
}
#[test]
fn weibull_quantile_roundtrip(
shape in 0.5_f64..10.0,
scale in 1.0_f64..100.0,
p in 0.001_f64..0.999,
) {
let w = Weibull::new(shape, scale).unwrap();
let t = w.quantile(p).unwrap();
let p_back = w.cdf(t);
prop_assert!(
(p_back - p).abs() < 1e-8,
"roundtrip: p={p} -> t={t} -> p_back={p_back}"
);
}
#[test]
fn weibull_pdf_non_negative(
shape in 0.1_f64..10.0,
scale in 0.1_f64..100.0,
t in 0.0_f64..200.0,
) {
let w = Weibull::new(shape, scale).unwrap();
prop_assert!(w.pdf(t) >= 0.0, "PDF({t}) must be >= 0");
}
#[test]
fn weibull_reliability_plus_cdf_is_one(
shape in 0.5_f64..5.0,
scale in 1.0_f64..50.0,
t in 0.001_f64..100.0,
) {
let w = Weibull::new(shape, scale).unwrap();
let sum = w.cdf(t) + w.reliability(t);
prop_assert!(
(sum - 1.0).abs() < 1e-12,
"CDF + R should = 1, got {sum}"
);
}
#[test]
fn weibull_variance_positive(
shape in 0.1_f64..10.0,
scale in 0.1_f64..100.0,
) {
let w = Weibull::new(shape, scale).unwrap();
prop_assert!(w.variance() > 0.0, "variance must be positive");
}
#[test]
fn exponential_cdf_in_01(
rate in 0.01_f64..10.0,
x in 0.0_f64..100.0,
) {
let e = Exponential::new(rate).unwrap();
let c = e.cdf(x);
prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
}
#[test]
fn exponential_quantile_roundtrip(
rate in 0.01_f64..10.0,
p in 0.001_f64..0.999,
) {
let e = Exponential::new(rate).unwrap();
let x = e.quantile(p).unwrap();
let p_back = e.cdf(x);
prop_assert!((p_back - p).abs() < 1e-10);
}
#[test]
fn gamma_cdf_in_01(
shape in 0.5_f64..10.0,
rate in 0.1_f64..10.0,
x in 0.001_f64..50.0,
) {
let g = GammaDistribution::new(shape, rate).unwrap();
let c = g.cdf(x);
prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
}
#[test]
fn gamma_variance_positive(
shape in 0.1_f64..10.0,
rate in 0.1_f64..10.0,
) {
let g = GammaDistribution::new(shape, rate).unwrap();
prop_assert!(g.variance() > 0.0, "variance must be positive");
}
#[test]
fn beta_cdf_in_01(
alpha in 0.5_f64..10.0,
beta_param in 0.5_f64..10.0,
x in 0.001_f64..0.999,
) {
let b = BetaDistribution::new(alpha, beta_param).unwrap();
let c = b.cdf(x);
prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
}
#[test]
fn beta_quantile_roundtrip(
alpha in 1.0_f64..10.0,
beta_param in 1.0_f64..10.0,
p in 0.01_f64..0.99,
) {
let b = BetaDistribution::new(alpha, beta_param).unwrap();
let x = b.quantile(p).unwrap();
let p_back = b.cdf(x);
prop_assert!((p_back - p).abs() < 1e-5, "p={p}, p_back={p_back}");
}
#[test]
fn chi2_cdf_in_01(
k in 1.0_f64..20.0,
x in 0.001_f64..50.0,
) {
let chi2 = ChiSquared::new(k).unwrap();
let c = chi2.cdf(x);
prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
}
#[test]
fn chi2_variance_positive(
k in 0.5_f64..50.0,
) {
let chi2 = ChiSquared::new(k).unwrap();
prop_assert!(chi2.variance() > 0.0, "variance must be positive");
}
}
}