1use crate::special;
27
28#[derive(Debug, Clone, PartialEq)]
30pub enum DistributionError {
31 InvalidParameters(String),
33}
34
35impl std::fmt::Display for DistributionError {
36 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
37 match self {
38 DistributionError::InvalidParameters(msg) => {
39 write!(f, "invalid distribution parameters: {msg}")
40 }
41 }
42 }
43}
44
45impl std::error::Error for DistributionError {}
46
47#[derive(Debug, Clone, PartialEq)]
59pub struct Uniform {
60 min: f64,
61 max: f64,
62}
63
64impl Uniform {
65 pub fn new(min: f64, max: f64) -> Result<Self, DistributionError> {
70 if !min.is_finite() || !max.is_finite() || min >= max {
71 return Err(DistributionError::InvalidParameters(format!(
72 "Uniform requires min < max, got min={min}, max={max}"
73 )));
74 }
75 Ok(Self { min, max })
76 }
77
78 pub fn min(&self) -> f64 {
79 self.min
80 }
81
82 pub fn max(&self) -> f64 {
83 self.max
84 }
85
86 pub fn mean(&self) -> f64 {
87 (self.min + self.max) / 2.0
88 }
89
90 pub fn variance(&self) -> f64 {
91 let range = self.max - self.min;
92 range * range / 12.0
93 }
94
95 pub fn cdf(&self, x: f64) -> f64 {
97 if x <= self.min {
98 0.0
99 } else if x >= self.max {
100 1.0
101 } else {
102 (x - self.min) / (self.max - self.min)
103 }
104 }
105
106 pub fn quantile(&self, p: f64) -> Option<f64> {
110 if !(0.0..=1.0).contains(&p) {
111 return None;
112 }
113 Some(self.min + p * (self.max - self.min))
114 }
115
116 pub fn pdf(&self, x: f64) -> f64 {
118 if x >= self.min && x <= self.max {
119 1.0 / (self.max - self.min)
120 } else {
121 0.0
122 }
123 }
124}
125
126#[derive(Debug, Clone, PartialEq)]
141pub struct Triangular {
142 min: f64,
143 mode: f64,
144 max: f64,
145}
146
147impl Triangular {
148 pub fn new(min: f64, mode: f64, max: f64) -> Result<Self, DistributionError> {
153 if !min.is_finite() || !mode.is_finite() || !max.is_finite() {
154 return Err(DistributionError::InvalidParameters(
155 "Triangular parameters must be finite".into(),
156 ));
157 }
158 if min > mode || mode > max || min >= max {
159 return Err(DistributionError::InvalidParameters(format!(
160 "Triangular requires min ≤ mode ≤ max and min < max, got {min}, {mode}, {max}"
161 )));
162 }
163 Ok(Self { min, mode, max })
164 }
165
166 pub fn min(&self) -> f64 {
167 self.min
168 }
169
170 pub fn mode(&self) -> f64 {
171 self.mode
172 }
173
174 pub fn max(&self) -> f64 {
175 self.max
176 }
177
178 pub fn mean(&self) -> f64 {
180 (self.min + self.mode + self.max) / 3.0
181 }
182
183 pub fn variance(&self) -> f64 {
185 let (a, b, c) = (self.min, self.mode, self.max);
186 (a * a + b * b + c * c - a * b - a * c - b * c) / 18.0
187 }
188
189 pub fn pdf(&self, x: f64) -> f64 {
197 let (a, b, c) = (self.min, self.mode, self.max);
198 if x < a || x > c {
199 0.0
200 } else if x <= b {
201 2.0 * (x - a) / ((c - a) * (b - a).max(f64::MIN_POSITIVE))
202 } else {
203 2.0 * (c - x) / ((c - a) * (c - b).max(f64::MIN_POSITIVE))
204 }
205 }
206
207 pub fn cdf(&self, x: f64) -> f64 {
214 let (a, b, c) = (self.min, self.mode, self.max);
215 if x <= a {
216 0.0
217 } else if x <= b {
218 (x - a) * (x - a) / ((c - a) * (b - a).max(f64::MIN_POSITIVE))
219 } else if x < c {
220 1.0 - (c - x) * (c - x) / ((c - a) * (c - b).max(f64::MIN_POSITIVE))
221 } else {
222 1.0
223 }
224 }
225
226 pub fn quantile(&self, p: f64) -> Option<f64> {
235 if !(0.0..=1.0).contains(&p) {
236 return None;
237 }
238 let (a, b, c) = (self.min, self.mode, self.max);
239 let fc = (b - a) / (c - a); if p < fc {
241 Some(a + ((c - a) * (b - a) * p).sqrt())
242 } else {
243 Some(c - ((c - a) * (c - b) * (1.0 - p)).sqrt())
244 }
245 }
246}
247
248#[derive(Debug, Clone, PartialEq)]
260pub struct Normal {
261 mu: f64,
262 sigma: f64,
263}
264
265impl Normal {
266 pub fn new(mu: f64, sigma: f64) -> Result<Self, DistributionError> {
271 if !mu.is_finite() || !sigma.is_finite() || sigma <= 0.0 {
272 return Err(DistributionError::InvalidParameters(format!(
273 "Normal requires finite μ and σ > 0, got μ={mu}, σ={sigma}"
274 )));
275 }
276 Ok(Self { mu, sigma })
277 }
278
279 pub fn mu(&self) -> f64 {
280 self.mu
281 }
282
283 pub fn sigma(&self) -> f64 {
284 self.sigma
285 }
286
287 pub fn mean(&self) -> f64 {
288 self.mu
289 }
290
291 pub fn variance(&self) -> f64 {
292 self.sigma * self.sigma
293 }
294
295 pub fn std_dev(&self) -> f64 {
296 self.sigma
297 }
298
299 pub fn pdf(&self, x: f64) -> f64 {
301 let z = (x - self.mu) / self.sigma;
302 special::standard_normal_pdf(z) / self.sigma
303 }
304
305 pub fn cdf(&self, x: f64) -> f64 {
307 let z = (x - self.mu) / self.sigma;
308 special::standard_normal_cdf(z)
309 }
310
311 pub fn quantile(&self, p: f64) -> Option<f64> {
315 if p <= 0.0 || p >= 1.0 {
316 return None;
317 }
318 Some(self.mu + self.sigma * special::inverse_normal_cdf(p))
319 }
320}
321
322#[derive(Debug, Clone, PartialEq)]
337pub struct LogNormal {
338 mu: f64,
339 sigma: f64,
340}
341
342impl LogNormal {
343 pub fn new(mu: f64, sigma: f64) -> Result<Self, DistributionError> {
350 if !mu.is_finite() || !sigma.is_finite() || sigma <= 0.0 {
351 return Err(DistributionError::InvalidParameters(format!(
352 "LogNormal requires finite μ and σ > 0, got μ={mu}, σ={sigma}"
353 )));
354 }
355 Ok(Self { mu, sigma })
356 }
357
358 pub fn mu(&self) -> f64 {
359 self.mu
360 }
361
362 pub fn sigma(&self) -> f64 {
363 self.sigma
364 }
365
366 pub fn mean(&self) -> f64 {
368 (self.mu + self.sigma * self.sigma / 2.0).exp()
369 }
370
371 pub fn variance(&self) -> f64 {
373 let s2 = self.sigma * self.sigma;
374 (s2.exp() - 1.0) * (2.0 * self.mu + s2).exp()
375 }
376
377 pub fn pdf(&self, x: f64) -> f64 {
379 if x <= 0.0 {
380 return 0.0;
381 }
382 let ln_x = x.ln();
383 let z = (ln_x - self.mu) / self.sigma;
384 special::standard_normal_pdf(z) / (x * self.sigma)
385 }
386
387 pub fn cdf(&self, x: f64) -> f64 {
389 if x <= 0.0 {
390 return 0.0;
391 }
392 let z = (x.ln() - self.mu) / self.sigma;
393 special::standard_normal_cdf(z)
394 }
395
396 pub fn quantile(&self, p: f64) -> Option<f64> {
400 if p <= 0.0 || p >= 1.0 {
401 return None;
402 }
403 Some((self.mu + self.sigma * special::inverse_normal_cdf(p)).exp())
404 }
405}
406
407#[derive(Debug, Clone, PartialEq)]
443pub struct Pert {
444 min: f64,
445 mode: f64,
446 max: f64,
447 alpha: f64,
448 beta: f64,
449}
450
451impl Pert {
452 pub fn new(min: f64, mode: f64, max: f64) -> Result<Self, DistributionError> {
457 Self::with_shape(min, mode, max, 4.0)
458 }
459
460 pub fn with_shape(
470 min: f64,
471 mode: f64,
472 max: f64,
473 lambda: f64,
474 ) -> Result<Self, DistributionError> {
475 if !min.is_finite() || !mode.is_finite() || !max.is_finite() || !lambda.is_finite() {
476 return Err(DistributionError::InvalidParameters(
477 "PERT parameters must be finite".into(),
478 ));
479 }
480 if min > mode || mode > max || min >= max {
481 return Err(DistributionError::InvalidParameters(format!(
482 "PERT requires min ≤ mode ≤ max and min < max, got {min}, {mode}, {max}"
483 )));
484 }
485 if lambda <= 0.0 {
486 return Err(DistributionError::InvalidParameters(format!(
487 "PERT λ must be > 0, got {lambda}"
488 )));
489 }
490
491 let range = max - min;
492 let alpha = 1.0 + lambda * (mode - min) / range;
493 let beta = 1.0 + lambda * (max - mode) / range;
494
495 Ok(Self {
496 min,
497 mode,
498 max,
499 alpha,
500 beta,
501 })
502 }
503
504 pub fn min(&self) -> f64 {
505 self.min
506 }
507
508 pub fn mode(&self) -> f64 {
509 self.mode
510 }
511
512 pub fn max(&self) -> f64 {
513 self.max
514 }
515
516 pub fn alpha(&self) -> f64 {
517 self.alpha
518 }
519
520 pub fn beta_param(&self) -> f64 {
521 self.beta
522 }
523
524 pub fn mean(&self) -> f64 {
528 let ab = self.alpha + self.beta;
529 self.min + (self.alpha / ab) * (self.max - self.min)
531 }
532
533 pub fn variance(&self) -> f64 {
539 let ab = self.alpha + self.beta;
540 let range = self.max - self.min;
541 (self.alpha * self.beta) / (ab * ab * (ab + 1.0)) * range * range
542 }
543
544 pub fn std_dev(&self) -> f64 {
546 self.variance().sqrt()
547 }
548
549 pub fn cdf(&self, x: f64) -> f64 {
554 if x <= self.min {
555 return 0.0;
556 }
557 if x >= self.max {
558 return 1.0;
559 }
560 let t = (x - self.min) / (self.max - self.min);
561 regularized_incomplete_beta(t, self.alpha, self.beta)
562 }
563
564 pub fn quantile_approx(&self, p: f64) -> Option<f64> {
571 if p <= 0.0 || p >= 1.0 {
572 return None;
573 }
574 let z = special::inverse_normal_cdf(p);
575 let result = self.mean() + z * self.std_dev();
576 Some(result.clamp(self.min, self.max))
578 }
579}
580
581#[derive(Debug, Clone, PartialEq)]
604pub struct Weibull {
605 shape: f64, scale: f64, }
608
609impl Weibull {
610 pub fn new(shape: f64, scale: f64) -> Result<Self, DistributionError> {
615 if !shape.is_finite() || !scale.is_finite() || shape <= 0.0 || scale <= 0.0 {
616 return Err(DistributionError::InvalidParameters(format!(
617 "Weibull requires shape > 0 and scale > 0, got shape={shape}, scale={scale}"
618 )));
619 }
620 Ok(Self { shape, scale })
621 }
622
623 pub fn shape(&self) -> f64 {
625 self.shape
626 }
627
628 pub fn scale(&self) -> f64 {
630 self.scale
631 }
632
633 pub fn mean(&self) -> f64 {
635 self.scale * gamma(1.0 + 1.0 / self.shape)
636 }
637
638 pub fn variance(&self) -> f64 {
640 let g1 = gamma(1.0 + 1.0 / self.shape);
641 let g2 = gamma(1.0 + 2.0 / self.shape);
642 self.scale * self.scale * (g2 - g1 * g1)
643 }
644
645 pub fn pdf(&self, t: f64) -> f64 {
647 if t < 0.0 {
648 return 0.0;
649 }
650 if t == 0.0 {
651 return if self.shape < 1.0 {
652 f64::INFINITY
653 } else if (self.shape - 1.0).abs() < f64::EPSILON {
654 1.0 / self.scale
655 } else {
656 0.0
657 };
658 }
659 let z = t / self.scale;
660 (self.shape / self.scale) * z.powf(self.shape - 1.0) * (-z.powf(self.shape)).exp()
661 }
662
663 pub fn cdf(&self, t: f64) -> f64 {
665 if t <= 0.0 {
666 return 0.0;
667 }
668 let z = t / self.scale;
669 1.0 - (-z.powf(self.shape)).exp()
670 }
671
672 pub fn quantile(&self, p: f64) -> Option<f64> {
676 if !(0.0..1.0).contains(&p) {
677 return None;
678 }
679 if p == 0.0 {
680 return Some(0.0);
681 }
682 Some(self.scale * (-(1.0 - p).ln()).powf(1.0 / self.shape))
683 }
684
685 pub fn hazard_rate(&self, t: f64) -> f64 {
691 if t <= 0.0 {
692 return if t == 0.0 && self.shape >= 1.0 {
693 if (self.shape - 1.0).abs() < f64::EPSILON {
694 1.0 / self.scale
695 } else {
696 0.0
697 }
698 } else {
699 0.0
700 };
701 }
702 let z = t / self.scale;
703 (self.shape / self.scale) * z.powf(self.shape - 1.0)
704 }
705
706 pub fn reliability(&self, t: f64) -> f64 {
708 1.0 - self.cdf(t)
709 }
710}
711
712#[derive(Debug, Clone, Copy)]
731pub struct Exponential {
732 rate: f64,
733}
734
735impl Exponential {
736 pub fn new(rate: f64) -> Result<Self, DistributionError> {
740 if !rate.is_finite() || rate <= 0.0 {
741 return Err(DistributionError::InvalidParameters(format!(
742 "Exponential rate must be positive and finite, got {rate}"
743 )));
744 }
745 Ok(Self { rate })
746 }
747
748 pub fn rate(&self) -> f64 {
750 self.rate
751 }
752
753 pub fn mean(&self) -> f64 {
755 1.0 / self.rate
756 }
757
758 pub fn variance(&self) -> f64 {
760 1.0 / (self.rate * self.rate)
761 }
762
763 pub fn pdf(&self, x: f64) -> f64 {
765 if x < 0.0 {
766 0.0
767 } else {
768 self.rate * (-self.rate * x).exp()
769 }
770 }
771
772 pub fn cdf(&self, x: f64) -> f64 {
774 if x < 0.0 {
775 0.0
776 } else {
777 1.0 - (-self.rate * x).exp()
778 }
779 }
780
781 pub fn quantile(&self, p: f64) -> Option<f64> {
785 if !(0.0..=1.0).contains(&p) {
786 return None;
787 }
788 if p == 1.0 {
789 return Some(f64::INFINITY);
790 }
791 Some(-(1.0 - p).ln() / self.rate)
792 }
793
794 pub fn hazard_rate(&self) -> f64 {
796 self.rate
797 }
798
799 pub fn survival(&self, x: f64) -> f64 {
801 1.0 - self.cdf(x)
802 }
803}
804
805#[derive(Debug, Clone, Copy)]
832pub struct GammaDistribution {
833 shape: f64, rate: f64, }
836
837impl GammaDistribution {
838 pub fn new(shape: f64, rate: f64) -> Result<Self, DistributionError> {
842 if !shape.is_finite() || shape <= 0.0 || !rate.is_finite() || rate <= 0.0 {
843 return Err(DistributionError::InvalidParameters(format!(
844 "Gamma shape and rate must be positive and finite, got shape={shape}, rate={rate}"
845 )));
846 }
847 Ok(Self { shape, rate })
848 }
849
850 pub fn from_shape_scale(shape: f64, scale: f64) -> Result<Self, DistributionError> {
852 if !scale.is_finite() || scale <= 0.0 {
853 return Err(DistributionError::InvalidParameters(format!(
854 "Gamma scale must be positive and finite, got {scale}"
855 )));
856 }
857 Self::new(shape, 1.0 / scale)
858 }
859
860 pub fn shape(&self) -> f64 {
862 self.shape
863 }
864
865 pub fn rate(&self) -> f64 {
867 self.rate
868 }
869
870 pub fn scale(&self) -> f64 {
872 1.0 / self.rate
873 }
874
875 pub fn mean(&self) -> f64 {
877 self.shape / self.rate
878 }
879
880 pub fn variance(&self) -> f64 {
882 self.shape / (self.rate * self.rate)
883 }
884
885 pub fn mode(&self) -> f64 {
887 if self.shape >= 1.0 {
888 (self.shape - 1.0) / self.rate
889 } else {
890 0.0
891 }
892 }
893
894 pub fn pdf(&self, x: f64) -> f64 {
896 if x <= 0.0 {
897 if x == 0.0 && self.shape < 1.0 {
898 return f64::INFINITY;
899 }
900 return 0.0;
901 }
902 let log_pdf = self.shape * self.rate.ln() - ln_gamma(self.shape)
903 + (self.shape - 1.0) * x.ln()
904 - self.rate * x;
905 log_pdf.exp()
906 }
907
908 pub fn cdf(&self, x: f64) -> f64 {
912 if x <= 0.0 {
913 return 0.0;
914 }
915 regularized_lower_gamma(self.shape, self.rate * x)
916 }
917
918 pub fn quantile(&self, p: f64) -> Option<f64> {
922 if !(0.0..=1.0).contains(&p) {
923 return None;
924 }
925 if p == 0.0 {
926 return Some(0.0);
927 }
928 if p == 1.0 {
929 return Some(f64::INFINITY);
930 }
931
932 let z = crate::special::inverse_normal_cdf(p);
934 let v = 1.0 / (9.0 * self.shape);
935 let chi2_approx = self.shape * (1.0 - v + z * v.sqrt()).powi(3).max(0.001);
936 let mut x = chi2_approx / self.rate;
937
938 for _ in 0..50 {
940 let f = self.cdf(x) - p;
941 let fp = self.pdf(x);
942 if fp < 1e-300 {
943 break;
944 }
945 let step = f / fp;
946 x = (x - step).max(1e-15);
947 if step.abs() < 1e-12 * x {
948 break;
949 }
950 }
951
952 Some(x)
953 }
954}
955
956fn regularized_lower_gamma(a: f64, x: f64) -> f64 {
960 if x <= 0.0 {
961 return 0.0;
962 }
963 if x < a + 1.0 {
964 gamma_series(a, x)
966 } else {
967 1.0 - gamma_cf(a, x)
969 }
970}
971
972fn gamma_series(a: f64, x: f64) -> f64 {
974 let mut term = 1.0 / a;
975 let mut sum = term;
976 let mut ap = a;
977
978 for _ in 0..200 {
979 ap += 1.0;
980 term *= x / ap;
981 sum += term;
982 if term.abs() < sum.abs() * 1e-14 {
983 break;
984 }
985 }
986
987 sum * (-x + a * x.ln() - ln_gamma(a)).exp()
988}
989
990fn gamma_cf(a: f64, x: f64) -> f64 {
992 let mut b = x + 1.0 - a;
993 let mut c = 1.0 / 1e-30;
994 let mut d = 1.0 / b;
995 let mut h = d;
996
997 for i in 1..=200 {
998 let an = -(i as f64) * (i as f64 - a);
999 b += 2.0;
1000 d = an * d + b;
1001 if d.abs() < 1e-30 {
1002 d = 1e-30;
1003 }
1004 c = b + an / c;
1005 if c.abs() < 1e-30 {
1006 c = 1e-30;
1007 }
1008 d = 1.0 / d;
1009 let delta = d * c;
1010 h *= delta;
1011 if (delta - 1.0).abs() < 1e-14 {
1012 break;
1013 }
1014 }
1015
1016 h * (-x + a * x.ln() - ln_gamma(a)).exp()
1017}
1018
1019fn gamma(x: f64) -> f64 {
1023 ln_gamma(x).exp()
1024}
1025
1026fn regularized_incomplete_beta(x: f64, a: f64, b: f64) -> f64 {
1040 if x <= 0.0 {
1041 return 0.0;
1042 }
1043 if x >= 1.0 {
1044 return 1.0;
1045 }
1046
1047 if x > (a + 1.0) / (a + b + 2.0) {
1050 return 1.0 - regularized_incomplete_beta(1.0 - x, b, a);
1051 }
1052
1053 let ln_prefix = a * x.ln() + b * (1.0 - x).ln() - ln_beta(a, b);
1054
1055 let cf = beta_cf(x, a, b);
1057
1058 (ln_prefix.exp() / a) * cf
1059}
1060
1061fn ln_beta(a: f64, b: f64) -> f64 {
1063 ln_gamma(a) + ln_gamma(b) - ln_gamma(a + b)
1064}
1065
1066fn ln_gamma(x: f64) -> f64 {
1074 #[allow(clippy::excessive_precision)]
1076 const COEFFICIENTS: [f64; 9] = [
1077 0.99999999999980993,
1078 676.5203681218851,
1079 -1259.1392167224028,
1080 771.32342877765313,
1081 -176.61502916214059,
1082 12.507343278686905,
1083 -0.13857109526572012,
1084 9.9843695780195716e-6,
1085 1.5056327351493116e-7,
1086 ];
1087 const G: f64 = 7.0;
1088
1089 if x < 0.5 {
1090 let pi = std::f64::consts::PI;
1092 return (pi / (pi * x).sin()).ln() - ln_gamma(1.0 - x);
1093 }
1094
1095 let x = x - 1.0;
1096 let mut sum = COEFFICIENTS[0];
1097 for (i, &c) in COEFFICIENTS[1..].iter().enumerate() {
1098 sum += c / (x + i as f64 + 1.0);
1099 }
1100
1101 let t = x + G + 0.5;
1102 0.5 * (2.0 * std::f64::consts::PI).ln() + (x + 0.5) * t.ln() - t + sum.ln()
1103}
1104
1105fn beta_cf(x: f64, a: f64, b: f64) -> f64 {
1109 const MAX_ITER: usize = 200;
1110 const EPS: f64 = 1e-14;
1111 const TINY: f64 = 1e-30;
1112
1113 let mut c = 1.0;
1114 let mut d = 1.0 / (1.0 - (a + b) * x / (a + 1.0)).max(TINY);
1115 let mut h = d;
1116
1117 for m in 1..=MAX_ITER {
1118 let m_f = m as f64;
1119
1120 let num_even = m_f * (b - m_f) * x / ((a + 2.0 * m_f - 1.0) * (a + 2.0 * m_f));
1122 d = 1.0 / (1.0 + num_even * d).max(TINY);
1123 c = (1.0 + num_even / c).max(TINY);
1124 h *= d * c;
1125
1126 let num_odd = -(a + m_f) * (a + b + m_f) * x / ((a + 2.0 * m_f) * (a + 2.0 * m_f + 1.0));
1128 d = 1.0 / (1.0 + num_odd * d).max(TINY);
1129 c = (1.0 + num_odd / c).max(TINY);
1130 let delta = d * c;
1131 h *= delta;
1132
1133 if (delta - 1.0).abs() < EPS {
1134 break;
1135 }
1136 }
1137
1138 h
1139}
1140
1141#[derive(Debug, Clone)]
1170pub struct BetaDistribution {
1171 pub alpha: f64,
1173 pub beta: f64,
1175}
1176
1177impl BetaDistribution {
1178 pub fn new(alpha: f64, beta: f64) -> Result<Self, DistributionError> {
1183 if alpha <= 0.0 || !alpha.is_finite() {
1184 return Err(DistributionError::InvalidParameters(format!(
1185 "alpha must be positive and finite, got {alpha}"
1186 )));
1187 }
1188 if beta <= 0.0 || !beta.is_finite() {
1189 return Err(DistributionError::InvalidParameters(format!(
1190 "beta must be positive and finite, got {beta}"
1191 )));
1192 }
1193 Ok(Self { alpha, beta })
1194 }
1195
1196 pub fn mean(&self) -> f64 {
1198 self.alpha / (self.alpha + self.beta)
1199 }
1200
1201 pub fn variance(&self) -> f64 {
1203 let ab = self.alpha + self.beta;
1204 self.alpha * self.beta / (ab * ab * (ab + 1.0))
1205 }
1206
1207 pub fn mode(&self) -> Option<f64> {
1212 if self.alpha > 1.0 && self.beta > 1.0 {
1213 Some((self.alpha - 1.0) / (self.alpha + self.beta - 2.0))
1214 } else {
1215 None
1216 }
1217 }
1218
1219 pub fn pdf(&self, x: f64) -> f64 {
1221 if x <= 0.0 || x >= 1.0 {
1222 return 0.0;
1223 }
1224 let ln_pdf = (self.alpha - 1.0) * x.ln() + (self.beta - 1.0) * (1.0 - x).ln()
1225 - ln_beta(self.alpha, self.beta);
1226 ln_pdf.exp()
1227 }
1228
1229 pub fn cdf(&self, x: f64) -> f64 {
1231 if x <= 0.0 {
1232 return 0.0;
1233 }
1234 if x >= 1.0 {
1235 return 1.0;
1236 }
1237 regularized_incomplete_beta(x, self.alpha, self.beta)
1238 }
1239
1240 pub fn quantile(&self, p: f64) -> Option<f64> {
1245 if !(0.0..=1.0).contains(&p) {
1246 return None;
1247 }
1248 if p == 0.0 {
1249 return Some(0.0);
1250 }
1251 if (p - 1.0).abs() < 1e-15 {
1252 return Some(1.0);
1253 }
1254
1255 let mut lo = 0.0_f64;
1257 let mut hi = 1.0_f64;
1258
1259 for _ in 0..100 {
1260 let mid = (lo + hi) / 2.0;
1261 if hi - lo < 1e-14 {
1262 break;
1263 }
1264 if self.cdf(mid) < p {
1265 lo = mid;
1266 } else {
1267 hi = mid;
1268 }
1269 }
1270
1271 Some((lo + hi) / 2.0)
1272 }
1273}
1274
1275#[derive(Debug, Clone)]
1306pub struct ChiSquared {
1307 pub k: f64,
1309}
1310
1311impl ChiSquared {
1312 pub fn new(k: f64) -> Result<Self, DistributionError> {
1317 if k <= 0.0 || !k.is_finite() {
1318 return Err(DistributionError::InvalidParameters(format!(
1319 "k must be positive and finite, got {k}"
1320 )));
1321 }
1322 Ok(Self { k })
1323 }
1324
1325 pub fn mean(&self) -> f64 {
1327 self.k
1328 }
1329
1330 pub fn variance(&self) -> f64 {
1332 2.0 * self.k
1333 }
1334
1335 pub fn pdf(&self, x: f64) -> f64 {
1337 if x <= 0.0 {
1338 return 0.0;
1339 }
1340 let half_k = self.k / 2.0;
1341 let ln_pdf = (half_k - 1.0) * x.ln() - x / 2.0 - half_k * 2.0_f64.ln() - ln_gamma(half_k);
1342 ln_pdf.exp()
1343 }
1344
1345 pub fn cdf(&self, x: f64) -> f64 {
1347 if x <= 0.0 {
1348 return 0.0;
1349 }
1350 regularized_lower_gamma(self.k / 2.0, x / 2.0)
1351 }
1352
1353 pub fn quantile(&self, p: f64) -> Option<f64> {
1357 if !(0.0..=1.0).contains(&p) {
1358 return None;
1359 }
1360 if p == 0.0 {
1361 return Some(0.0);
1362 }
1363 if (p - 1.0).abs() < 1e-15 {
1364 return Some(f64::INFINITY);
1365 }
1366
1367 let gamma = GammaDistribution {
1369 shape: self.k / 2.0,
1370 rate: 0.5,
1371 };
1372 gamma.quantile(p)
1373 }
1374}
1375
1376#[cfg(test)]
1381mod tests {
1382 use super::*;
1383
1384 #[test]
1387 fn test_uniform_basic() {
1388 let u = Uniform::new(0.0, 10.0).unwrap();
1389 assert!((u.mean() - 5.0).abs() < 1e-15);
1390 assert!((u.variance() - 100.0 / 12.0).abs() < 1e-10);
1391 }
1392
1393 #[test]
1394 fn test_uniform_cdf() {
1395 let u = Uniform::new(0.0, 10.0).unwrap();
1396 assert_eq!(u.cdf(-1.0), 0.0);
1397 assert!((u.cdf(5.0) - 0.5).abs() < 1e-15);
1398 assert_eq!(u.cdf(11.0), 1.0);
1399 }
1400
1401 #[test]
1402 fn test_uniform_quantile() {
1403 let u = Uniform::new(2.0, 8.0).unwrap();
1404 assert_eq!(u.quantile(0.0), Some(2.0));
1405 assert_eq!(u.quantile(1.0), Some(8.0));
1406 assert!((u.quantile(0.5).unwrap() - 5.0).abs() < 1e-15);
1407 }
1408
1409 #[test]
1410 fn test_uniform_pdf() {
1411 let u = Uniform::new(0.0, 5.0).unwrap();
1412 assert!((u.pdf(2.5) - 0.2).abs() < 1e-15);
1413 assert_eq!(u.pdf(-1.0), 0.0);
1414 }
1415
1416 #[test]
1417 fn test_uniform_invalid() {
1418 assert!(Uniform::new(5.0, 5.0).is_err());
1419 assert!(Uniform::new(5.0, 3.0).is_err());
1420 assert!(Uniform::new(f64::NAN, 5.0).is_err());
1421 }
1422
1423 #[test]
1426 fn test_triangular_mean() {
1427 let t = Triangular::new(0.0, 3.0, 6.0).unwrap();
1428 assert!((t.mean() - 3.0).abs() < 1e-15);
1429 }
1430
1431 #[test]
1432 fn test_triangular_symmetric_variance() {
1433 let t = Triangular::new(0.0, 5.0, 10.0).unwrap();
1434 let expected = (0.0 + 25.0 + 100.0 - 0.0 - 0.0 - 50.0) / 18.0;
1436 assert!((t.variance() - expected).abs() < 1e-10);
1437 }
1438
1439 #[test]
1440 fn test_triangular_cdf() {
1441 let t = Triangular::new(0.0, 5.0, 10.0).unwrap();
1442 assert!((t.cdf(0.0)).abs() < 1e-15);
1443 assert!((t.cdf(10.0) - 1.0).abs() < 1e-15);
1444 assert!((t.cdf(5.0) - 0.5).abs() < 1e-15);
1446 }
1447
1448 #[test]
1449 fn test_triangular_quantile() {
1450 let t = Triangular::new(0.0, 5.0, 10.0).unwrap();
1451 assert!((t.quantile(0.0).unwrap() - 0.0).abs() < 1e-15);
1452 assert!((t.quantile(1.0).unwrap() - 10.0).abs() < 1e-15);
1453 assert!((t.quantile(0.5).unwrap() - 5.0).abs() < 1e-10);
1454 }
1455
1456 #[test]
1457 fn test_triangular_invalid() {
1458 assert!(Triangular::new(5.0, 3.0, 10.0).is_err()); assert!(Triangular::new(0.0, 11.0, 10.0).is_err()); assert!(Triangular::new(5.0, 5.0, 5.0).is_err()); }
1462
1463 #[test]
1466 fn test_normal_standard() {
1467 let n = Normal::new(0.0, 1.0).unwrap();
1468 assert!((n.mean()).abs() < 1e-15);
1469 assert!((n.variance() - 1.0).abs() < 1e-15);
1470 assert!((n.cdf(0.0) - 0.5).abs() < 1e-7);
1471 }
1472
1473 #[test]
1474 fn test_normal_shifted() {
1475 let n = Normal::new(10.0, 2.0).unwrap();
1476 assert!((n.mean() - 10.0).abs() < 1e-15);
1477 assert!((n.variance() - 4.0).abs() < 1e-15);
1478 assert!((n.cdf(10.0) - 0.5).abs() < 1e-7);
1479 }
1480
1481 #[test]
1482 fn test_normal_quantile() {
1483 let n = Normal::new(0.0, 1.0).unwrap();
1484 assert!((n.quantile(0.5).unwrap()).abs() < 0.01);
1485 assert!((n.quantile(0.975).unwrap() - 1.96).abs() < 0.01);
1486 }
1487
1488 #[test]
1489 fn test_normal_invalid() {
1490 assert!(Normal::new(0.0, 0.0).is_err());
1491 assert!(Normal::new(0.0, -1.0).is_err());
1492 }
1493
1494 #[test]
1497 fn test_lognormal_mean() {
1498 let ln = LogNormal::new(0.0, 1.0).unwrap();
1499 let expected = (0.5_f64).exp(); assert!((ln.mean() - expected).abs() < 1e-10);
1501 }
1502
1503 #[test]
1504 fn test_lognormal_cdf() {
1505 let ln = LogNormal::new(0.0, 1.0).unwrap();
1506 assert_eq!(ln.cdf(0.0), 0.0);
1507 assert!((ln.cdf(1.0) - 0.5).abs() < 0.001);
1509 }
1510
1511 #[test]
1512 fn test_lognormal_quantile() {
1513 let ln = LogNormal::new(0.0, 1.0).unwrap();
1514 let q50 = ln.quantile(0.5).unwrap();
1516 assert!((q50 - 1.0).abs() < 0.01);
1517 }
1518
1519 #[test]
1522 fn test_pert_mean() {
1523 let p = Pert::new(1.0, 4.0, 7.0).unwrap();
1524 assert!((p.mean() - 4.0).abs() < 1e-10);
1526 }
1527
1528 #[test]
1529 fn test_pert_symmetric_variance() {
1530 let p = Pert::new(0.0, 5.0, 10.0).unwrap();
1531 let expected = 9.0 / (36.0 * 7.0) * 100.0;
1534 assert!(
1535 (p.variance() - expected).abs() < 1e-10,
1536 "PERT variance: {} vs expected: {}",
1537 p.variance(),
1538 expected
1539 );
1540 }
1541
1542 #[test]
1543 fn test_pert_shape_params() {
1544 let p = Pert::new(0.0, 5.0, 10.0).unwrap();
1545 assert!((p.alpha() - 3.0).abs() < 1e-15);
1548 assert!((p.beta_param() - 3.0).abs() < 1e-15);
1549 }
1550
1551 #[test]
1552 fn test_pert_cdf_bounds() {
1553 let p = Pert::new(1.0, 4.0, 7.0).unwrap();
1554 assert_eq!(p.cdf(0.0), 0.0);
1555 assert_eq!(p.cdf(8.0), 1.0);
1556 let p_sym = Pert::new(0.0, 5.0, 10.0).unwrap();
1558 assert!((p_sym.cdf(5.0) - 0.5).abs() < 0.01);
1559 }
1560
1561 #[test]
1562 fn test_pert_cdf_monotonic() {
1563 let p = Pert::new(0.0, 3.0, 10.0).unwrap();
1564 let mut prev = 0.0;
1565 for i in 0..=100 {
1566 let x = i as f64 * 0.1;
1567 let c = p.cdf(x);
1568 assert!(c >= prev - 1e-15, "CDF not monotonic at x={x}");
1569 prev = c;
1570 }
1571 }
1572
1573 #[test]
1574 fn test_pert_quantile_approx() {
1575 let p = Pert::new(0.0, 5.0, 10.0).unwrap();
1576 let q50 = p.quantile_approx(0.5).unwrap();
1577 assert!((q50 - 5.0).abs() < 0.5, "median approx: {q50}");
1578 }
1579
1580 #[test]
1581 fn test_pert_invalid() {
1582 assert!(Pert::new(5.0, 3.0, 10.0).is_err());
1583 assert!(Pert::new(5.0, 5.0, 5.0).is_err());
1584 }
1585
1586 #[test]
1587 fn test_pert_with_lambda() {
1588 let p4 = Pert::with_shape(0.0, 5.0, 10.0, 4.0).unwrap();
1590 let p8 = Pert::with_shape(0.0, 5.0, 10.0, 8.0).unwrap();
1591 assert!(
1592 p8.variance() < p4.variance(),
1593 "higher λ should give lower variance"
1594 );
1595 }
1596
1597 #[test]
1600 fn test_regularized_beta_bounds() {
1601 assert_eq!(regularized_incomplete_beta(0.0, 2.0, 3.0), 0.0);
1602 assert_eq!(regularized_incomplete_beta(1.0, 2.0, 3.0), 1.0);
1603 }
1604
1605 #[test]
1606 fn test_regularized_beta_symmetric() {
1607 let result = regularized_incomplete_beta(0.5, 3.0, 3.0);
1609 assert!(
1610 (result - 0.5).abs() < 1e-8,
1611 "I_0.5(3,3) = {result}, expected 0.5"
1612 );
1613 }
1614
1615 #[test]
1616 fn test_regularized_beta_known_values() {
1617 for &x in &[0.1, 0.3, 0.5, 0.7, 0.9] {
1619 let result = regularized_incomplete_beta(x, 1.0, 1.0);
1620 assert!(
1621 (result - x).abs() < 1e-10,
1622 "I_{x}(1,1) = {result}, expected {x}"
1623 );
1624 }
1625
1626 for &x in &[0.1, 0.5, 0.9] {
1628 let result = regularized_incomplete_beta(x, 1.0, 3.0);
1629 let expected = 1.0 - (1.0 - x).powi(3);
1630 assert!(
1631 (result - expected).abs() < 1e-10,
1632 "I_{x}(1,3) = {result}, expected {expected}"
1633 );
1634 }
1635 }
1636
1637 #[test]
1640 fn test_ln_gamma_known() {
1641 assert!((ln_gamma(1.0)).abs() < 1e-10);
1643 assert!((ln_gamma(2.0)).abs() < 1e-10);
1645 assert!((ln_gamma(3.0) - 2.0_f64.ln()).abs() < 1e-10);
1647 assert!((ln_gamma(5.0) - 24.0_f64.ln()).abs() < 1e-10);
1649 assert!(
1651 (ln_gamma(0.5) - std::f64::consts::PI.sqrt().ln()).abs() < 1e-10,
1652 "ln Γ(0.5) = {}, expected {}",
1653 ln_gamma(0.5),
1654 std::f64::consts::PI.sqrt().ln()
1655 );
1656 }
1657
1658 #[test]
1661 fn test_weibull_exponential_special_case() {
1662 let w = Weibull::new(1.0, 5.0).unwrap();
1664 assert!((w.mean() - 5.0).abs() < 1e-10);
1665 assert!((w.variance() - 25.0).abs() < 1e-8);
1666 }
1667
1668 #[test]
1669 fn test_weibull_rayleigh_special_case() {
1670 let w = Weibull::new(2.0, 1.0).unwrap();
1672 let expected_mean = std::f64::consts::PI.sqrt() / 2.0;
1673 assert!(
1674 (w.mean() - expected_mean).abs() < 1e-10,
1675 "Weibull(2,1) mean = {}, expected {}",
1676 w.mean(),
1677 expected_mean
1678 );
1679 }
1680
1681 #[test]
1682 fn test_weibull_cdf_known_values() {
1683 let w = Weibull::new(2.0, 10.0).unwrap();
1685 assert!((w.cdf(10.0) - (1.0 - (-1.0_f64).exp())).abs() < 1e-10);
1687 assert_eq!(w.cdf(0.0), 0.0);
1689 assert_eq!(w.cdf(-1.0), 0.0);
1691 }
1692
1693 #[test]
1694 fn test_weibull_pdf_basic() {
1695 let w = Weibull::new(1.0, 1.0).unwrap();
1697 assert!((w.pdf(0.0) - 1.0).abs() < 1e-10);
1698 assert!((w.pdf(1.0) - (-1.0_f64).exp()).abs() < 1e-10);
1699 assert!((w.pdf(2.0) - (-2.0_f64).exp()).abs() < 1e-10);
1700 }
1701
1702 #[test]
1703 fn test_weibull_pdf_negative() {
1704 let w = Weibull::new(2.0, 5.0).unwrap();
1705 assert_eq!(w.pdf(-1.0), 0.0);
1706 }
1707
1708 #[test]
1709 fn test_weibull_quantile_roundtrip() {
1710 let w = Weibull::new(2.5, 100.0).unwrap();
1711 for &p in &[0.1, 0.25, 0.5, 0.75, 0.9] {
1712 let t = w.quantile(p).unwrap();
1713 let p_back = w.cdf(t);
1714 assert!(
1715 (p_back - p).abs() < 1e-10,
1716 "roundtrip: p={p} -> t={t} -> p_back={p_back}"
1717 );
1718 }
1719 }
1720
1721 #[test]
1722 fn test_weibull_quantile_edge_cases() {
1723 let w = Weibull::new(2.0, 10.0).unwrap();
1724 assert_eq!(w.quantile(0.0), Some(0.0));
1725 assert_eq!(w.quantile(1.0), None);
1726 assert_eq!(w.quantile(-0.1), None);
1727 }
1728
1729 #[test]
1730 fn test_weibull_reliability() {
1731 let w = Weibull::new(2.0, 10.0).unwrap();
1732 for &t in &[1.0, 5.0, 10.0, 20.0] {
1734 let r = w.reliability(t);
1735 let f = w.cdf(t);
1736 assert!((r + f - 1.0).abs() < 1e-14);
1737 }
1738 }
1739
1740 #[test]
1741 fn test_weibull_hazard_rate() {
1742 let w = Weibull::new(1.0, 5.0).unwrap();
1744 for &t in &[1.0, 5.0, 10.0] {
1745 assert!((w.hazard_rate(t) - 0.2).abs() < 1e-10);
1746 }
1747 let w2 = Weibull::new(2.0, 10.0).unwrap();
1749 assert!((w2.hazard_rate(5.0) - 2.0 * 5.0 / 100.0).abs() < 1e-10);
1750 }
1751
1752 #[test]
1753 fn test_weibull_invalid() {
1754 assert!(Weibull::new(0.0, 1.0).is_err());
1755 assert!(Weibull::new(-1.0, 1.0).is_err());
1756 assert!(Weibull::new(1.0, 0.0).is_err());
1757 assert!(Weibull::new(1.0, -1.0).is_err());
1758 assert!(Weibull::new(f64::NAN, 1.0).is_err());
1759 assert!(Weibull::new(1.0, f64::INFINITY).is_err());
1760 }
1761
1762 #[test]
1765 fn test_exponential_basic() {
1766 let e = Exponential::new(0.5).unwrap();
1767 assert!((e.rate() - 0.5).abs() < 1e-10);
1768 assert!((e.mean() - 2.0).abs() < 1e-10);
1769 assert!((e.variance() - 4.0).abs() < 1e-10);
1770 }
1771
1772 #[test]
1773 fn test_exponential_pdf() {
1774 let e = Exponential::new(1.0).unwrap();
1775 assert!((e.pdf(0.0) - 1.0).abs() < 1e-10);
1776 assert!((e.pdf(1.0) - (-1.0_f64).exp()).abs() < 1e-10);
1777 assert_eq!(e.pdf(-1.0), 0.0);
1778 }
1779
1780 #[test]
1781 fn test_exponential_cdf() {
1782 let e = Exponential::new(1.0).unwrap();
1783 assert_eq!(e.cdf(-1.0), 0.0);
1784 assert_eq!(e.cdf(0.0), 0.0);
1785 assert!((e.cdf(1.0) - (1.0 - (-1.0_f64).exp())).abs() < 1e-10);
1786 }
1787
1788 #[test]
1789 fn test_exponential_quantile() {
1790 let e = Exponential::new(2.0).unwrap();
1791 assert_eq!(e.quantile(0.0), Some(0.0));
1792 assert_eq!(e.quantile(1.0), Some(f64::INFINITY));
1793 let q = e.quantile(0.5).unwrap();
1794 assert!((q - 2.0_f64.ln() / 2.0).abs() < 1e-10);
1796 assert!(e.quantile(-0.1).is_none());
1797 assert!(e.quantile(1.1).is_none());
1798 }
1799
1800 #[test]
1801 fn test_exponential_quantile_roundtrip() {
1802 let e = Exponential::new(3.0).unwrap();
1803 for &p in &[0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99] {
1804 let x = e.quantile(p).unwrap();
1805 let p_back = e.cdf(x);
1806 assert!((p_back - p).abs() < 1e-10, "p={p}, x={x}, p_back={p_back}");
1807 }
1808 }
1809
1810 #[test]
1811 fn test_exponential_memoryless() {
1812 let e = Exponential::new(1.5).unwrap();
1814 let s = 2.0;
1815 let t = 3.0;
1816 let lhs = e.survival(s + t) / e.survival(s);
1817 let rhs = e.survival(t);
1818 assert!((lhs - rhs).abs() < 1e-10);
1819 }
1820
1821 #[test]
1822 fn test_exponential_invalid() {
1823 assert!(Exponential::new(0.0).is_err());
1824 assert!(Exponential::new(-1.0).is_err());
1825 assert!(Exponential::new(f64::NAN).is_err());
1826 assert!(Exponential::new(f64::INFINITY).is_err());
1827 }
1828
1829 #[test]
1832 fn test_gamma_basic() {
1833 let g = GammaDistribution::new(2.0, 1.0).unwrap();
1834 assert!((g.shape() - 2.0).abs() < 1e-10);
1835 assert!((g.rate() - 1.0).abs() < 1e-10);
1836 assert!((g.scale() - 1.0).abs() < 1e-10);
1837 assert!((g.mean() - 2.0).abs() < 1e-10);
1838 assert!((g.variance() - 2.0).abs() < 1e-10);
1839 assert!((g.mode() - 1.0).abs() < 1e-10);
1840 }
1841
1842 #[test]
1843 fn test_gamma_from_shape_scale() {
1844 let g = GammaDistribution::from_shape_scale(3.0, 2.0).unwrap();
1845 assert!((g.shape() - 3.0).abs() < 1e-10);
1846 assert!((g.rate() - 0.5).abs() < 1e-10);
1847 assert!((g.mean() - 6.0).abs() < 1e-10); }
1849
1850 #[test]
1851 fn test_gamma_pdf_integral() {
1852 let g = GammaDistribution::new(3.0, 2.0).unwrap();
1854 let n = 20_000;
1855 let dt = 20.0 / n as f64;
1856 let integral: f64 = (0..n)
1857 .map(|i| {
1858 let x = (i as f64 + 0.5) * dt;
1859 g.pdf(x) * dt
1860 })
1861 .sum();
1862 assert!((integral - 1.0).abs() < 0.01, "PDF integral = {integral}");
1863 }
1864
1865 #[test]
1866 fn test_gamma_cdf_known() {
1867 let g = GammaDistribution::new(1.0, 1.0).unwrap();
1869 let expected = 1.0 - (-1.0_f64).exp();
1870 assert!((g.cdf(1.0) - expected).abs() < 1e-10);
1871 }
1872
1873 #[test]
1874 fn test_gamma_cdf_chi2() {
1875 let g = GammaDistribution::new(1.0, 0.5).unwrap(); let x = 4.0;
1879 let expected = 1.0 - (-0.5_f64 * x).exp();
1880 assert!(
1881 (g.cdf(x) - expected).abs() < 1e-8,
1882 "CDF({x}) = {} vs {expected}",
1883 g.cdf(x)
1884 );
1885 }
1886
1887 #[test]
1888 fn test_gamma_quantile_roundtrip() {
1889 let g = GammaDistribution::new(5.0, 2.0).unwrap();
1890 for &p in &[0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99] {
1891 let x = g.quantile(p).unwrap();
1892 let p_back = g.cdf(x);
1893 assert!((p_back - p).abs() < 1e-6, "p={p}, x={x}, p_back={p_back}");
1894 }
1895 }
1896
1897 #[test]
1898 fn test_gamma_mode_small_shape() {
1899 let g = GammaDistribution::new(0.5, 1.0).unwrap();
1900 assert_eq!(g.mode(), 0.0); }
1902
1903 #[test]
1904 fn test_gamma_invalid() {
1905 assert!(GammaDistribution::new(0.0, 1.0).is_err());
1906 assert!(GammaDistribution::new(-1.0, 1.0).is_err());
1907 assert!(GammaDistribution::new(1.0, 0.0).is_err());
1908 assert!(GammaDistribution::new(1.0, -1.0).is_err());
1909 assert!(GammaDistribution::new(f64::NAN, 1.0).is_err());
1910 }
1911
1912 #[test]
1915 fn test_beta_mean_variance() {
1916 let b = BetaDistribution::new(2.0, 5.0).unwrap();
1917 assert!((b.mean() - 2.0 / 7.0).abs() < 1e-10);
1919 let expected_var = 2.0 * 5.0 / (7.0 * 7.0 * 8.0);
1921 assert!((b.variance() - expected_var).abs() < 1e-10);
1922 }
1923
1924 #[test]
1925 fn test_beta_symmetric() {
1926 let b = BetaDistribution::new(3.0, 3.0).unwrap();
1928 assert!((b.mean() - 0.5).abs() < 1e-10);
1929 assert!((b.mode().unwrap() - 0.5).abs() < 1e-10);
1930 assert!((b.pdf(0.3) - b.pdf(0.7)).abs() < 1e-10);
1932 }
1933
1934 #[test]
1935 fn test_beta_uniform_special_case() {
1936 let b = BetaDistribution::new(1.0, 1.0).unwrap();
1938 assert!((b.mean() - 0.5).abs() < 1e-10);
1939 assert!((b.cdf(0.5) - 0.5).abs() < 1e-10);
1940 assert!((b.cdf(0.25) - 0.25).abs() < 1e-10);
1941 }
1942
1943 #[test]
1944 fn test_beta_cdf_boundaries() {
1945 let b = BetaDistribution::new(2.0, 3.0).unwrap();
1946 assert_eq!(b.cdf(0.0), 0.0);
1947 assert_eq!(b.cdf(-1.0), 0.0);
1948 assert_eq!(b.cdf(1.0), 1.0);
1949 assert_eq!(b.cdf(2.0), 1.0);
1950 }
1951
1952 #[test]
1953 fn test_beta_pdf_integral() {
1954 let b = BetaDistribution::new(2.0, 5.0).unwrap();
1955 let n = 10_000;
1956 let dt = 1.0 / n as f64;
1957 let integral: f64 = (0..n)
1958 .map(|i| {
1959 let x = (i as f64 + 0.5) * dt;
1960 b.pdf(x) * dt
1961 })
1962 .sum();
1963 assert!((integral - 1.0).abs() < 0.01, "PDF integral = {integral}");
1964 }
1965
1966 #[test]
1967 fn test_beta_quantile_roundtrip() {
1968 let b = BetaDistribution::new(2.0, 5.0).unwrap();
1969 for &p in &[0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99] {
1970 let x = b.quantile(p).unwrap();
1971 let p_back = b.cdf(x);
1972 assert!((p_back - p).abs() < 1e-6, "p={p}, x={x}, p_back={p_back}");
1973 }
1974 }
1975
1976 #[test]
1977 fn test_beta_mode() {
1978 let b = BetaDistribution::new(5.0, 3.0).unwrap();
1979 assert!((b.mode().unwrap() - 2.0 / 3.0).abs() < 1e-10);
1981 }
1982
1983 #[test]
1984 fn test_beta_mode_undefined() {
1985 let b = BetaDistribution::new(0.5, 0.5).unwrap();
1987 assert!(b.mode().is_none());
1988 }
1989
1990 #[test]
1991 fn test_beta_invalid() {
1992 assert!(BetaDistribution::new(0.0, 1.0).is_err());
1993 assert!(BetaDistribution::new(-1.0, 1.0).is_err());
1994 assert!(BetaDistribution::new(1.0, 0.0).is_err());
1995 assert!(BetaDistribution::new(f64::NAN, 1.0).is_err());
1996 }
1997
1998 #[test]
2001 fn test_chi2_mean_variance() {
2002 let chi2 = ChiSquared::new(5.0).unwrap();
2003 assert!((chi2.mean() - 5.0).abs() < 1e-10);
2004 assert!((chi2.variance() - 10.0).abs() < 1e-10);
2005 }
2006
2007 #[test]
2008 fn test_chi2_exp_special_case() {
2009 let chi2 = ChiSquared::new(2.0).unwrap();
2011 let x = 4.0;
2012 let expected = 1.0 - (-x / 2.0_f64).exp();
2013 assert!(
2014 (chi2.cdf(x) - expected).abs() < 1e-8,
2015 "CDF({x}) = {} vs {expected}",
2016 chi2.cdf(x)
2017 );
2018 }
2019
2020 #[test]
2021 fn test_chi2_cdf_boundaries() {
2022 let chi2 = ChiSquared::new(3.0).unwrap();
2023 assert_eq!(chi2.cdf(0.0), 0.0);
2024 assert_eq!(chi2.cdf(-1.0), 0.0);
2025 assert!(chi2.cdf(100.0) > 0.999);
2027 }
2028
2029 #[test]
2030 fn test_chi2_pdf_integral() {
2031 let chi2 = ChiSquared::new(4.0).unwrap();
2032 let n = 20_000;
2033 let dt = 30.0 / n as f64;
2034 let integral: f64 = (0..n)
2035 .map(|i| {
2036 let x = (i as f64 + 0.5) * dt;
2037 chi2.pdf(x) * dt
2038 })
2039 .sum();
2040 assert!((integral - 1.0).abs() < 0.01, "PDF integral = {integral}");
2041 }
2042
2043 #[test]
2044 fn test_chi2_quantile_roundtrip() {
2045 let chi2 = ChiSquared::new(5.0).unwrap();
2046 for &p in &[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99] {
2047 let x = chi2.quantile(p).unwrap();
2048 let p_back = chi2.cdf(x);
2049 assert!((p_back - p).abs() < 1e-6, "p={p}, x={x}, p_back={p_back}");
2050 }
2051 }
2052
2053 #[test]
2054 fn test_chi2_known_quantiles() {
2055 let chi2 = ChiSquared::new(5.0).unwrap();
2058 let q05 = chi2.quantile(0.05).unwrap();
2059 assert!((q05 - 1.1455).abs() < 0.01, "q(0.05) = {q05}");
2060 let q95 = chi2.quantile(0.95).unwrap();
2061 assert!((q95 - 11.0705).abs() < 0.01, "q(0.95) = {q95}");
2062 }
2063
2064 #[test]
2065 fn test_chi2_invalid() {
2066 assert!(ChiSquared::new(0.0).is_err());
2067 assert!(ChiSquared::new(-1.0).is_err());
2068 assert!(ChiSquared::new(f64::NAN).is_err());
2069 }
2070
2071 #[test]
2072 fn test_chi2_gamma_consistency() {
2073 let k = 6.0;
2075 let chi2 = ChiSquared::new(k).unwrap();
2076 let gamma = GammaDistribution::new(k / 2.0, 0.5).unwrap();
2077
2078 let x = 5.0;
2079 assert!(
2080 (chi2.cdf(x) - gamma.cdf(x)).abs() < 1e-10,
2081 "Chi2 and Gamma CDF should match"
2082 );
2083 }
2084
2085 #[test]
2086 fn test_weibull_mean_variance_known() {
2087 let w = Weibull::new(3.6, 1000.0).unwrap();
2090 let mean = w.mean();
2091 assert!(mean > 800.0 && mean < 1000.0, "mean={mean}");
2092 assert!(w.variance() > 0.0);
2093 }
2094
2095 #[test]
2096 fn test_weibull_pdf_integral_approx() {
2097 let w = Weibull::new(2.0, 10.0).unwrap();
2099 let n = 10_000;
2100 let dt = 50.0 / n as f64;
2101 let integral: f64 = (0..n)
2102 .map(|i| {
2103 let t = (i as f64 + 0.5) * dt;
2104 w.pdf(t) * dt
2105 })
2106 .sum();
2107 assert!(
2108 (integral - 1.0).abs() < 0.01,
2109 "PDF integral = {integral}, expected ≈ 1.0"
2110 );
2111 }
2112}
2113
2114#[cfg(test)]
2115mod proptests {
2116 use super::*;
2117 use proptest::prelude::*;
2118
2119 proptest! {
2120 #![proptest_config(ProptestConfig::with_cases(300))]
2121
2122 #[test]
2125 fn uniform_cdf_in_01(
2126 min in -100.0_f64..0.0,
2127 max in 1.0_f64..100.0,
2128 x in -200.0_f64..200.0,
2129 ) {
2130 let u = Uniform::new(min, max).unwrap();
2131 let c = u.cdf(x);
2132 prop_assert!((0.0..=1.0).contains(&c));
2133 }
2134
2135 #[test]
2136 fn uniform_quantile_roundtrip(
2137 min in -100.0_f64..0.0,
2138 max in 1.0_f64..100.0,
2139 p in 0.0_f64..=1.0,
2140 ) {
2141 let u = Uniform::new(min, max).unwrap();
2142 let x = u.quantile(p).unwrap();
2143 let p_back = u.cdf(x);
2144 prop_assert!((p_back - p).abs() < 1e-12, "roundtrip: p={p} -> x={x} -> p_back={p_back}");
2145 }
2146
2147 #[test]
2150 fn triangular_cdf_in_01(
2151 min in -100.0_f64..-1.0,
2152 mode_frac in 0.0_f64..=1.0,
2153 range in 1.0_f64..100.0,
2154 x in -200.0_f64..200.0,
2155 ) {
2156 let max = min + range;
2157 let mode = min + mode_frac * range;
2158 let t = Triangular::new(min, mode, max).unwrap();
2159 let c = t.cdf(x);
2160 prop_assert!((0.0..=1.0).contains(&c));
2161 }
2162
2163 #[test]
2164 fn triangular_quantile_roundtrip(
2165 min in -50.0_f64..0.0,
2166 mode_frac in 0.01_f64..0.99,
2167 range in 1.0_f64..50.0,
2168 p in 0.001_f64..0.999,
2169 ) {
2170 let max = min + range;
2171 let mode = min + mode_frac * range;
2172 let t = Triangular::new(min, mode, max).unwrap();
2173 let x = t.quantile(p).unwrap();
2174 let p_back = t.cdf(x);
2175 prop_assert!(
2176 (p_back - p).abs() < 1e-8,
2177 "roundtrip: p={p} -> x={x} -> p_back={p_back}"
2178 );
2179 }
2180
2181 #[test]
2184 fn pert_mean_formula(
2185 min in -50.0_f64..0.0,
2186 mode_frac in 0.01_f64..0.99,
2187 range in 1.0_f64..50.0,
2188 ) {
2189 let max = min + range;
2190 let mode = min + mode_frac * range;
2191 let p = Pert::new(min, mode, max).unwrap();
2192 let expected = (min + 4.0 * mode + max) / 6.0;
2193 prop_assert!(
2194 (p.mean() - expected).abs() < 1e-10,
2195 "PERT mean: {} vs expected: {}",
2196 p.mean(),
2197 expected
2198 );
2199 }
2200
2201 #[test]
2202 fn pert_variance_positive(
2203 min in -50.0_f64..0.0,
2204 mode_frac in 0.01_f64..0.99,
2205 range in 1.0_f64..50.0,
2206 ) {
2207 let max = min + range;
2208 let mode = min + mode_frac * range;
2209 let p = Pert::new(min, mode, max).unwrap();
2210 prop_assert!(p.variance() > 0.0);
2211 }
2212
2213 #[test]
2214 fn pert_cdf_monotonic(
2215 min in -50.0_f64..0.0,
2216 mode_frac in 0.05_f64..0.95,
2217 range in 2.0_f64..50.0,
2218 ) {
2219 let max = min + range;
2220 let mode = min + mode_frac * range;
2221 let p = Pert::new(min, mode, max).unwrap();
2222 let mut prev = 0.0;
2223 for i in 0..=20 {
2224 let x = min + (i as f64 / 20.0) * range;
2225 let c = p.cdf(x);
2226 prop_assert!(c >= prev - 1e-10, "CDF not monotonic at x={x}");
2227 prev = c;
2228 }
2229 }
2230
2231 #[test]
2234 fn weibull_cdf_in_01(
2235 shape in 0.1_f64..10.0,
2236 scale in 0.1_f64..100.0,
2237 t in 0.0_f64..200.0,
2238 ) {
2239 let w = Weibull::new(shape, scale).unwrap();
2240 let c = w.cdf(t);
2241 prop_assert!((0.0..=1.0).contains(&c), "CDF({t}) = {c} out of [0,1]");
2242 }
2243
2244 #[test]
2245 fn weibull_quantile_roundtrip(
2246 shape in 0.5_f64..10.0,
2247 scale in 1.0_f64..100.0,
2248 p in 0.001_f64..0.999,
2249 ) {
2250 let w = Weibull::new(shape, scale).unwrap();
2251 let t = w.quantile(p).unwrap();
2252 let p_back = w.cdf(t);
2253 prop_assert!(
2254 (p_back - p).abs() < 1e-8,
2255 "roundtrip: p={p} -> t={t} -> p_back={p_back}"
2256 );
2257 }
2258
2259 #[test]
2260 fn weibull_pdf_non_negative(
2261 shape in 0.1_f64..10.0,
2262 scale in 0.1_f64..100.0,
2263 t in 0.0_f64..200.0,
2264 ) {
2265 let w = Weibull::new(shape, scale).unwrap();
2266 prop_assert!(w.pdf(t) >= 0.0, "PDF({t}) must be >= 0");
2267 }
2268
2269 #[test]
2270 fn weibull_reliability_plus_cdf_is_one(
2271 shape in 0.5_f64..5.0,
2272 scale in 1.0_f64..50.0,
2273 t in 0.001_f64..100.0,
2274 ) {
2275 let w = Weibull::new(shape, scale).unwrap();
2276 let sum = w.cdf(t) + w.reliability(t);
2277 prop_assert!(
2278 (sum - 1.0).abs() < 1e-12,
2279 "CDF + R should = 1, got {sum}"
2280 );
2281 }
2282
2283 #[test]
2284 fn weibull_variance_positive(
2285 shape in 0.1_f64..10.0,
2286 scale in 0.1_f64..100.0,
2287 ) {
2288 let w = Weibull::new(shape, scale).unwrap();
2289 prop_assert!(w.variance() > 0.0, "variance must be positive");
2290 }
2291
2292 #[test]
2295 fn exponential_cdf_in_01(
2296 rate in 0.01_f64..10.0,
2297 x in 0.0_f64..100.0,
2298 ) {
2299 let e = Exponential::new(rate).unwrap();
2300 let c = e.cdf(x);
2301 prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
2302 }
2303
2304 #[test]
2305 fn exponential_quantile_roundtrip(
2306 rate in 0.01_f64..10.0,
2307 p in 0.001_f64..0.999,
2308 ) {
2309 let e = Exponential::new(rate).unwrap();
2310 let x = e.quantile(p).unwrap();
2311 let p_back = e.cdf(x);
2312 prop_assert!((p_back - p).abs() < 1e-10);
2313 }
2314
2315 #[test]
2318 fn gamma_cdf_in_01(
2319 shape in 0.5_f64..10.0,
2320 rate in 0.1_f64..10.0,
2321 x in 0.001_f64..50.0,
2322 ) {
2323 let g = GammaDistribution::new(shape, rate).unwrap();
2324 let c = g.cdf(x);
2325 prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
2326 }
2327
2328 #[test]
2329 fn gamma_variance_positive(
2330 shape in 0.1_f64..10.0,
2331 rate in 0.1_f64..10.0,
2332 ) {
2333 let g = GammaDistribution::new(shape, rate).unwrap();
2334 prop_assert!(g.variance() > 0.0, "variance must be positive");
2335 }
2336
2337 #[test]
2340 fn beta_cdf_in_01(
2341 alpha in 0.5_f64..10.0,
2342 beta_param in 0.5_f64..10.0,
2343 x in 0.001_f64..0.999,
2344 ) {
2345 let b = BetaDistribution::new(alpha, beta_param).unwrap();
2346 let c = b.cdf(x);
2347 prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
2348 }
2349
2350 #[test]
2351 fn beta_quantile_roundtrip(
2352 alpha in 1.0_f64..10.0,
2353 beta_param in 1.0_f64..10.0,
2354 p in 0.01_f64..0.99,
2355 ) {
2356 let b = BetaDistribution::new(alpha, beta_param).unwrap();
2357 let x = b.quantile(p).unwrap();
2358 let p_back = b.cdf(x);
2359 prop_assert!((p_back - p).abs() < 1e-5, "p={p}, p_back={p_back}");
2360 }
2361
2362 #[test]
2365 fn chi2_cdf_in_01(
2366 k in 1.0_f64..20.0,
2367 x in 0.001_f64..50.0,
2368 ) {
2369 let chi2 = ChiSquared::new(k).unwrap();
2370 let c = chi2.cdf(x);
2371 prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
2372 }
2373
2374 #[test]
2375 fn chi2_variance_positive(
2376 k in 0.5_f64..50.0,
2377 ) {
2378 let chi2 = ChiSquared::new(k).unwrap();
2379 prop_assert!(chi2.variance() > 0.0, "variance must be positive");
2380 }
2381 }
2382}