1use crate::special;
27
28#[derive(Debug, Clone, PartialEq)]
30pub enum DistributionError {
31 InvalidParameters(String),
33}
34
35impl std::fmt::Display for DistributionError {
36 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
37 match self {
38 DistributionError::InvalidParameters(msg) => {
39 write!(f, "invalid distribution parameters: {msg}")
40 }
41 }
42 }
43}
44
45impl std::error::Error for DistributionError {}
46
47#[derive(Debug, Clone, PartialEq)]
59pub struct Uniform {
60 min: f64,
61 max: f64,
62}
63
64impl Uniform {
65 pub fn new(min: f64, max: f64) -> Result<Self, DistributionError> {
70 if !min.is_finite() || !max.is_finite() || min >= max {
71 return Err(DistributionError::InvalidParameters(format!(
72 "Uniform requires min < max, got min={min}, max={max}"
73 )));
74 }
75 Ok(Self { min, max })
76 }
77
78 pub fn min(&self) -> f64 {
79 self.min
80 }
81
82 pub fn max(&self) -> f64 {
83 self.max
84 }
85
86 pub fn mean(&self) -> f64 {
87 (self.min + self.max) / 2.0
88 }
89
90 pub fn variance(&self) -> f64 {
91 let range = self.max - self.min;
92 range * range / 12.0
93 }
94
95 pub fn cdf(&self, x: f64) -> f64 {
97 if x <= self.min {
98 0.0
99 } else if x >= self.max {
100 1.0
101 } else {
102 (x - self.min) / (self.max - self.min)
103 }
104 }
105
106 pub fn quantile(&self, p: f64) -> Option<f64> {
110 if !(0.0..=1.0).contains(&p) {
111 return None;
112 }
113 Some(self.min + p * (self.max - self.min))
114 }
115
116 pub fn pdf(&self, x: f64) -> f64 {
118 if x >= self.min && x <= self.max {
119 1.0 / (self.max - self.min)
120 } else {
121 0.0
122 }
123 }
124}
125
126#[derive(Debug, Clone, PartialEq)]
141pub struct Triangular {
142 min: f64,
143 mode: f64,
144 max: f64,
145}
146
147impl Triangular {
148 pub fn new(min: f64, mode: f64, max: f64) -> Result<Self, DistributionError> {
153 if !min.is_finite() || !mode.is_finite() || !max.is_finite() {
154 return Err(DistributionError::InvalidParameters(
155 "Triangular parameters must be finite".into(),
156 ));
157 }
158 if min > mode || mode > max || min >= max {
159 return Err(DistributionError::InvalidParameters(format!(
160 "Triangular requires min ≤ mode ≤ max and min < max, got {min}, {mode}, {max}"
161 )));
162 }
163 Ok(Self { min, mode, max })
164 }
165
166 pub fn min(&self) -> f64 {
167 self.min
168 }
169
170 pub fn mode(&self) -> f64 {
171 self.mode
172 }
173
174 pub fn max(&self) -> f64 {
175 self.max
176 }
177
178 pub fn mean(&self) -> f64 {
180 (self.min + self.mode + self.max) / 3.0
181 }
182
183 pub fn variance(&self) -> f64 {
185 let (a, b, c) = (self.min, self.mode, self.max);
186 (a * a + b * b + c * c - a * b - a * c - b * c) / 18.0
187 }
188
189 pub fn pdf(&self, x: f64) -> f64 {
197 let (a, b, c) = (self.min, self.mode, self.max);
198 if x < a || x > c {
199 0.0
200 } else if x <= b {
201 2.0 * (x - a) / ((c - a) * (b - a).max(f64::MIN_POSITIVE))
202 } else {
203 2.0 * (c - x) / ((c - a) * (c - b).max(f64::MIN_POSITIVE))
204 }
205 }
206
207 pub fn cdf(&self, x: f64) -> f64 {
214 let (a, b, c) = (self.min, self.mode, self.max);
215 if x <= a {
216 0.0
217 } else if x <= b {
218 (x - a) * (x - a) / ((c - a) * (b - a).max(f64::MIN_POSITIVE))
219 } else if x < c {
220 1.0 - (c - x) * (c - x) / ((c - a) * (c - b).max(f64::MIN_POSITIVE))
221 } else {
222 1.0
223 }
224 }
225
226 pub fn quantile(&self, p: f64) -> Option<f64> {
235 if !(0.0..=1.0).contains(&p) {
236 return None;
237 }
238 let (a, b, c) = (self.min, self.mode, self.max);
239 let fc = (b - a) / (c - a); if p < fc {
241 Some(a + ((c - a) * (b - a) * p).sqrt())
242 } else {
243 Some(c - ((c - a) * (c - b) * (1.0 - p)).sqrt())
244 }
245 }
246}
247
248#[derive(Debug, Clone, PartialEq)]
260pub struct Normal {
261 mu: f64,
262 sigma: f64,
263}
264
265impl Normal {
266 pub fn new(mu: f64, sigma: f64) -> Result<Self, DistributionError> {
271 if !mu.is_finite() || !sigma.is_finite() || sigma <= 0.0 {
272 return Err(DistributionError::InvalidParameters(format!(
273 "Normal requires finite μ and σ > 0, got μ={mu}, σ={sigma}"
274 )));
275 }
276 Ok(Self { mu, sigma })
277 }
278
279 pub fn mu(&self) -> f64 {
280 self.mu
281 }
282
283 pub fn sigma(&self) -> f64 {
284 self.sigma
285 }
286
287 pub fn mean(&self) -> f64 {
288 self.mu
289 }
290
291 pub fn variance(&self) -> f64 {
292 self.sigma * self.sigma
293 }
294
295 pub fn std_dev(&self) -> f64 {
296 self.sigma
297 }
298
299 pub fn pdf(&self, x: f64) -> f64 {
301 let z = (x - self.mu) / self.sigma;
302 special::standard_normal_pdf(z) / self.sigma
303 }
304
305 pub fn cdf(&self, x: f64) -> f64 {
307 let z = (x - self.mu) / self.sigma;
308 special::standard_normal_cdf(z)
309 }
310
311 pub fn quantile(&self, p: f64) -> Option<f64> {
315 if p <= 0.0 || p >= 1.0 {
316 return None;
317 }
318 Some(self.mu + self.sigma * special::inverse_normal_cdf(p))
319 }
320}
321
322#[derive(Debug, Clone, PartialEq)]
337pub struct LogNormal {
338 mu: f64,
339 sigma: f64,
340}
341
342impl LogNormal {
343 pub fn new(mu: f64, sigma: f64) -> Result<Self, DistributionError> {
350 if !mu.is_finite() || !sigma.is_finite() || sigma <= 0.0 {
351 return Err(DistributionError::InvalidParameters(format!(
352 "LogNormal requires finite μ and σ > 0, got μ={mu}, σ={sigma}"
353 )));
354 }
355 Ok(Self { mu, sigma })
356 }
357
358 pub fn mu(&self) -> f64 {
359 self.mu
360 }
361
362 pub fn sigma(&self) -> f64 {
363 self.sigma
364 }
365
366 pub fn mean(&self) -> f64 {
368 (self.mu + self.sigma * self.sigma / 2.0).exp()
369 }
370
371 pub fn variance(&self) -> f64 {
373 let s2 = self.sigma * self.sigma;
374 (s2.exp() - 1.0) * (2.0 * self.mu + s2).exp()
375 }
376
377 pub fn pdf(&self, x: f64) -> f64 {
379 if x <= 0.0 {
380 return 0.0;
381 }
382 let ln_x = x.ln();
383 let z = (ln_x - self.mu) / self.sigma;
384 special::standard_normal_pdf(z) / (x * self.sigma)
385 }
386
387 pub fn cdf(&self, x: f64) -> f64 {
389 if x <= 0.0 {
390 return 0.0;
391 }
392 let z = (x.ln() - self.mu) / self.sigma;
393 special::standard_normal_cdf(z)
394 }
395
396 pub fn quantile(&self, p: f64) -> Option<f64> {
400 if p <= 0.0 || p >= 1.0 {
401 return None;
402 }
403 Some((self.mu + self.sigma * special::inverse_normal_cdf(p)).exp())
404 }
405}
406
407#[derive(Debug, Clone, PartialEq)]
443pub struct Pert {
444 min: f64,
445 mode: f64,
446 max: f64,
447 alpha: f64,
448 beta: f64,
449}
450
451impl Pert {
452 pub fn new(min: f64, mode: f64, max: f64) -> Result<Self, DistributionError> {
457 Self::with_shape(min, mode, max, 4.0)
458 }
459
460 pub fn with_shape(
470 min: f64,
471 mode: f64,
472 max: f64,
473 lambda: f64,
474 ) -> Result<Self, DistributionError> {
475 if !min.is_finite() || !mode.is_finite() || !max.is_finite() || !lambda.is_finite() {
476 return Err(DistributionError::InvalidParameters(
477 "PERT parameters must be finite".into(),
478 ));
479 }
480 if min > mode || mode > max || min >= max {
481 return Err(DistributionError::InvalidParameters(format!(
482 "PERT requires min ≤ mode ≤ max and min < max, got {min}, {mode}, {max}"
483 )));
484 }
485 if lambda <= 0.0 {
486 return Err(DistributionError::InvalidParameters(format!(
487 "PERT λ must be > 0, got {lambda}"
488 )));
489 }
490
491 let range = max - min;
492 let alpha = 1.0 + lambda * (mode - min) / range;
493 let beta = 1.0 + lambda * (max - mode) / range;
494
495 Ok(Self {
496 min,
497 mode,
498 max,
499 alpha,
500 beta,
501 })
502 }
503
504 pub fn min(&self) -> f64 {
505 self.min
506 }
507
508 pub fn mode(&self) -> f64 {
509 self.mode
510 }
511
512 pub fn max(&self) -> f64 {
513 self.max
514 }
515
516 pub fn alpha(&self) -> f64 {
517 self.alpha
518 }
519
520 pub fn beta_param(&self) -> f64 {
521 self.beta
522 }
523
524 pub fn mean(&self) -> f64 {
528 let ab = self.alpha + self.beta;
529 self.min + (self.alpha / ab) * (self.max - self.min)
531 }
532
533 pub fn variance(&self) -> f64 {
539 let ab = self.alpha + self.beta;
540 let range = self.max - self.min;
541 (self.alpha * self.beta) / (ab * ab * (ab + 1.0)) * range * range
542 }
543
544 pub fn std_dev(&self) -> f64 {
546 self.variance().sqrt()
547 }
548
549 pub fn cdf(&self, x: f64) -> f64 {
554 if x <= self.min {
555 return 0.0;
556 }
557 if x >= self.max {
558 return 1.0;
559 }
560 let t = (x - self.min) / (self.max - self.min);
561 special::regularized_incomplete_beta(t, self.alpha, self.beta)
562 }
563
564 pub fn quantile_approx(&self, p: f64) -> Option<f64> {
571 if p <= 0.0 || p >= 1.0 {
572 return None;
573 }
574 let z = special::inverse_normal_cdf(p);
575 let result = self.mean() + z * self.std_dev();
576 Some(result.clamp(self.min, self.max))
578 }
579}
580
581#[derive(Debug, Clone, PartialEq)]
604pub struct Weibull {
605 shape: f64, scale: f64, }
608
609impl Weibull {
610 pub fn new(shape: f64, scale: f64) -> Result<Self, DistributionError> {
615 if !shape.is_finite() || !scale.is_finite() || shape <= 0.0 || scale <= 0.0 {
616 return Err(DistributionError::InvalidParameters(format!(
617 "Weibull requires shape > 0 and scale > 0, got shape={shape}, scale={scale}"
618 )));
619 }
620 Ok(Self { shape, scale })
621 }
622
623 pub fn shape(&self) -> f64 {
625 self.shape
626 }
627
628 pub fn scale(&self) -> f64 {
630 self.scale
631 }
632
633 pub fn mean(&self) -> f64 {
635 self.scale * special::gamma(1.0 + 1.0 / self.shape)
636 }
637
638 pub fn variance(&self) -> f64 {
640 let g1 = special::gamma(1.0 + 1.0 / self.shape);
641 let g2 = special::gamma(1.0 + 2.0 / self.shape);
642 self.scale * self.scale * (g2 - g1 * g1)
643 }
644
645 pub fn pdf(&self, t: f64) -> f64 {
647 if t < 0.0 {
648 return 0.0;
649 }
650 if t == 0.0 {
651 return if self.shape < 1.0 {
652 f64::INFINITY
653 } else if (self.shape - 1.0).abs() < f64::EPSILON {
654 1.0 / self.scale
655 } else {
656 0.0
657 };
658 }
659 let z = t / self.scale;
660 (self.shape / self.scale) * z.powf(self.shape - 1.0) * (-z.powf(self.shape)).exp()
661 }
662
663 pub fn cdf(&self, t: f64) -> f64 {
665 if t <= 0.0 {
666 return 0.0;
667 }
668 let z = t / self.scale;
669 1.0 - (-z.powf(self.shape)).exp()
670 }
671
672 pub fn quantile(&self, p: f64) -> Option<f64> {
676 if !(0.0..1.0).contains(&p) {
677 return None;
678 }
679 if p == 0.0 {
680 return Some(0.0);
681 }
682 Some(self.scale * (-(1.0 - p).ln()).powf(1.0 / self.shape))
683 }
684
685 pub fn hazard_rate(&self, t: f64) -> f64 {
691 if t <= 0.0 {
692 return if t == 0.0 && self.shape >= 1.0 {
693 if (self.shape - 1.0).abs() < f64::EPSILON {
694 1.0 / self.scale
695 } else {
696 0.0
697 }
698 } else {
699 0.0
700 };
701 }
702 let z = t / self.scale;
703 (self.shape / self.scale) * z.powf(self.shape - 1.0)
704 }
705
706 pub fn reliability(&self, t: f64) -> f64 {
708 1.0 - self.cdf(t)
709 }
710}
711
712#[derive(Debug, Clone, Copy)]
731pub struct Exponential {
732 rate: f64,
733}
734
735impl Exponential {
736 pub fn new(rate: f64) -> Result<Self, DistributionError> {
740 if !rate.is_finite() || rate <= 0.0 {
741 return Err(DistributionError::InvalidParameters(format!(
742 "Exponential rate must be positive and finite, got {rate}"
743 )));
744 }
745 Ok(Self { rate })
746 }
747
748 pub fn rate(&self) -> f64 {
750 self.rate
751 }
752
753 pub fn mean(&self) -> f64 {
755 1.0 / self.rate
756 }
757
758 pub fn variance(&self) -> f64 {
760 1.0 / (self.rate * self.rate)
761 }
762
763 pub fn pdf(&self, x: f64) -> f64 {
765 if x < 0.0 {
766 0.0
767 } else {
768 self.rate * (-self.rate * x).exp()
769 }
770 }
771
772 pub fn cdf(&self, x: f64) -> f64 {
774 if x < 0.0 {
775 0.0
776 } else {
777 1.0 - (-self.rate * x).exp()
778 }
779 }
780
781 pub fn quantile(&self, p: f64) -> Option<f64> {
785 if !(0.0..=1.0).contains(&p) {
786 return None;
787 }
788 if p == 1.0 {
789 return Some(f64::INFINITY);
790 }
791 Some(-(1.0 - p).ln() / self.rate)
792 }
793
794 pub fn hazard_rate(&self) -> f64 {
796 self.rate
797 }
798
799 pub fn survival(&self, x: f64) -> f64 {
801 1.0 - self.cdf(x)
802 }
803}
804
805#[derive(Debug, Clone, Copy)]
832pub struct GammaDistribution {
833 shape: f64, rate: f64, }
836
837impl GammaDistribution {
838 pub fn new(shape: f64, rate: f64) -> Result<Self, DistributionError> {
842 if !shape.is_finite() || shape <= 0.0 || !rate.is_finite() || rate <= 0.0 {
843 return Err(DistributionError::InvalidParameters(format!(
844 "Gamma shape and rate must be positive and finite, got shape={shape}, rate={rate}"
845 )));
846 }
847 Ok(Self { shape, rate })
848 }
849
850 pub fn from_shape_scale(shape: f64, scale: f64) -> Result<Self, DistributionError> {
852 if !scale.is_finite() || scale <= 0.0 {
853 return Err(DistributionError::InvalidParameters(format!(
854 "Gamma scale must be positive and finite, got {scale}"
855 )));
856 }
857 Self::new(shape, 1.0 / scale)
858 }
859
860 pub fn shape(&self) -> f64 {
862 self.shape
863 }
864
865 pub fn rate(&self) -> f64 {
867 self.rate
868 }
869
870 pub fn scale(&self) -> f64 {
872 1.0 / self.rate
873 }
874
875 pub fn mean(&self) -> f64 {
877 self.shape / self.rate
878 }
879
880 pub fn variance(&self) -> f64 {
882 self.shape / (self.rate * self.rate)
883 }
884
885 pub fn mode(&self) -> f64 {
887 if self.shape >= 1.0 {
888 (self.shape - 1.0) / self.rate
889 } else {
890 0.0
891 }
892 }
893
894 pub fn pdf(&self, x: f64) -> f64 {
896 if x <= 0.0 {
897 if x == 0.0 && self.shape < 1.0 {
898 return f64::INFINITY;
899 }
900 return 0.0;
901 }
902 let log_pdf = self.shape * self.rate.ln() - special::ln_gamma(self.shape)
903 + (self.shape - 1.0) * x.ln()
904 - self.rate * x;
905 log_pdf.exp()
906 }
907
908 pub fn cdf(&self, x: f64) -> f64 {
912 if x <= 0.0 {
913 return 0.0;
914 }
915 special::regularized_lower_gamma(self.shape, self.rate * x)
916 }
917
918 pub fn quantile(&self, p: f64) -> Option<f64> {
922 if !(0.0..=1.0).contains(&p) {
923 return None;
924 }
925 if p == 0.0 {
926 return Some(0.0);
927 }
928 if p == 1.0 {
929 return Some(f64::INFINITY);
930 }
931
932 let z = crate::special::inverse_normal_cdf(p);
934 let v = 1.0 / (9.0 * self.shape);
935 let chi2_approx = self.shape * (1.0 - v + z * v.sqrt()).powi(3).max(0.001);
936 let mut x = chi2_approx / self.rate;
937
938 for _ in 0..50 {
940 let f = self.cdf(x) - p;
941 let fp = self.pdf(x);
942 if fp < 1e-300 {
943 break;
944 }
945 let step = f / fp;
946 x = (x - step).max(1e-15);
947 if step.abs() < 1e-12 * x {
948 break;
949 }
950 }
951
952 Some(x)
953 }
954}
955
956#[derive(Debug, Clone)]
985pub struct BetaDistribution {
986 pub alpha: f64,
988 pub beta: f64,
990}
991
992impl BetaDistribution {
993 pub fn new(alpha: f64, beta: f64) -> Result<Self, DistributionError> {
998 if alpha <= 0.0 || !alpha.is_finite() {
999 return Err(DistributionError::InvalidParameters(format!(
1000 "alpha must be positive and finite, got {alpha}"
1001 )));
1002 }
1003 if beta <= 0.0 || !beta.is_finite() {
1004 return Err(DistributionError::InvalidParameters(format!(
1005 "beta must be positive and finite, got {beta}"
1006 )));
1007 }
1008 Ok(Self { alpha, beta })
1009 }
1010
1011 pub fn mean(&self) -> f64 {
1013 self.alpha / (self.alpha + self.beta)
1014 }
1015
1016 pub fn variance(&self) -> f64 {
1018 let ab = self.alpha + self.beta;
1019 self.alpha * self.beta / (ab * ab * (ab + 1.0))
1020 }
1021
1022 pub fn mode(&self) -> Option<f64> {
1027 if self.alpha > 1.0 && self.beta > 1.0 {
1028 Some((self.alpha - 1.0) / (self.alpha + self.beta - 2.0))
1029 } else {
1030 None
1031 }
1032 }
1033
1034 pub fn pdf(&self, x: f64) -> f64 {
1036 if x <= 0.0 || x >= 1.0 {
1037 return 0.0;
1038 }
1039 let ln_pdf = (self.alpha - 1.0) * x.ln() + (self.beta - 1.0) * (1.0 - x).ln()
1040 - special::ln_beta(self.alpha, self.beta);
1041 ln_pdf.exp()
1042 }
1043
1044 pub fn cdf(&self, x: f64) -> f64 {
1046 if x <= 0.0 {
1047 return 0.0;
1048 }
1049 if x >= 1.0 {
1050 return 1.0;
1051 }
1052 special::regularized_incomplete_beta(x, self.alpha, self.beta)
1053 }
1054
1055 pub fn quantile(&self, p: f64) -> Option<f64> {
1060 if !(0.0..=1.0).contains(&p) {
1061 return None;
1062 }
1063 if p == 0.0 {
1064 return Some(0.0);
1065 }
1066 if (p - 1.0).abs() < 1e-15 {
1067 return Some(1.0);
1068 }
1069
1070 let mut lo = 0.0_f64;
1072 let mut hi = 1.0_f64;
1073
1074 for _ in 0..100 {
1075 let mid = (lo + hi) / 2.0;
1076 if hi - lo < 1e-14 {
1077 break;
1078 }
1079 if self.cdf(mid) < p {
1080 lo = mid;
1081 } else {
1082 hi = mid;
1083 }
1084 }
1085
1086 Some((lo + hi) / 2.0)
1087 }
1088}
1089
1090#[derive(Debug, Clone)]
1121pub struct ChiSquared {
1122 pub k: f64,
1124}
1125
1126impl ChiSquared {
1127 pub fn new(k: f64) -> Result<Self, DistributionError> {
1132 if k <= 0.0 || !k.is_finite() {
1133 return Err(DistributionError::InvalidParameters(format!(
1134 "k must be positive and finite, got {k}"
1135 )));
1136 }
1137 Ok(Self { k })
1138 }
1139
1140 pub fn mean(&self) -> f64 {
1142 self.k
1143 }
1144
1145 pub fn variance(&self) -> f64 {
1147 2.0 * self.k
1148 }
1149
1150 pub fn pdf(&self, x: f64) -> f64 {
1152 if x <= 0.0 {
1153 return 0.0;
1154 }
1155 let half_k = self.k / 2.0;
1156 let ln_pdf =
1157 (half_k - 1.0) * x.ln() - x / 2.0 - half_k * 2.0_f64.ln() - special::ln_gamma(half_k);
1158 ln_pdf.exp()
1159 }
1160
1161 pub fn cdf(&self, x: f64) -> f64 {
1163 if x <= 0.0 {
1164 return 0.0;
1165 }
1166 special::regularized_lower_gamma(self.k / 2.0, x / 2.0)
1167 }
1168
1169 pub fn quantile(&self, p: f64) -> Option<f64> {
1173 if !(0.0..=1.0).contains(&p) {
1174 return None;
1175 }
1176 if p == 0.0 {
1177 return Some(0.0);
1178 }
1179 if (p - 1.0).abs() < 1e-15 {
1180 return Some(f64::INFINITY);
1181 }
1182
1183 let gamma = GammaDistribution {
1185 shape: self.k / 2.0,
1186 rate: 0.5,
1187 };
1188 gamma.quantile(p)
1189 }
1190}
1191
1192#[cfg(test)]
1197mod tests {
1198 use super::*;
1199
1200 #[test]
1203 fn test_uniform_basic() {
1204 let u = Uniform::new(0.0, 10.0).unwrap();
1205 assert!((u.mean() - 5.0).abs() < 1e-15);
1206 assert!((u.variance() - 100.0 / 12.0).abs() < 1e-10);
1207 }
1208
1209 #[test]
1210 fn test_uniform_cdf() {
1211 let u = Uniform::new(0.0, 10.0).unwrap();
1212 assert_eq!(u.cdf(-1.0), 0.0);
1213 assert!((u.cdf(5.0) - 0.5).abs() < 1e-15);
1214 assert_eq!(u.cdf(11.0), 1.0);
1215 }
1216
1217 #[test]
1218 fn test_uniform_quantile() {
1219 let u = Uniform::new(2.0, 8.0).unwrap();
1220 assert_eq!(u.quantile(0.0), Some(2.0));
1221 assert_eq!(u.quantile(1.0), Some(8.0));
1222 assert!((u.quantile(0.5).unwrap() - 5.0).abs() < 1e-15);
1223 }
1224
1225 #[test]
1226 fn test_uniform_pdf() {
1227 let u = Uniform::new(0.0, 5.0).unwrap();
1228 assert!((u.pdf(2.5) - 0.2).abs() < 1e-15);
1229 assert_eq!(u.pdf(-1.0), 0.0);
1230 }
1231
1232 #[test]
1233 fn test_uniform_invalid() {
1234 assert!(Uniform::new(5.0, 5.0).is_err());
1235 assert!(Uniform::new(5.0, 3.0).is_err());
1236 assert!(Uniform::new(f64::NAN, 5.0).is_err());
1237 }
1238
1239 #[test]
1242 fn test_triangular_mean() {
1243 let t = Triangular::new(0.0, 3.0, 6.0).unwrap();
1244 assert!((t.mean() - 3.0).abs() < 1e-15);
1245 }
1246
1247 #[test]
1248 fn test_triangular_symmetric_variance() {
1249 let t = Triangular::new(0.0, 5.0, 10.0).unwrap();
1250 let expected = (0.0 + 25.0 + 100.0 - 0.0 - 0.0 - 50.0) / 18.0;
1252 assert!((t.variance() - expected).abs() < 1e-10);
1253 }
1254
1255 #[test]
1256 fn test_triangular_cdf() {
1257 let t = Triangular::new(0.0, 5.0, 10.0).unwrap();
1258 assert!((t.cdf(0.0)).abs() < 1e-15);
1259 assert!((t.cdf(10.0) - 1.0).abs() < 1e-15);
1260 assert!((t.cdf(5.0) - 0.5).abs() < 1e-15);
1262 }
1263
1264 #[test]
1265 fn test_triangular_quantile() {
1266 let t = Triangular::new(0.0, 5.0, 10.0).unwrap();
1267 assert!((t.quantile(0.0).unwrap() - 0.0).abs() < 1e-15);
1268 assert!((t.quantile(1.0).unwrap() - 10.0).abs() < 1e-15);
1269 assert!((t.quantile(0.5).unwrap() - 5.0).abs() < 1e-10);
1270 }
1271
1272 #[test]
1273 fn test_triangular_invalid() {
1274 assert!(Triangular::new(5.0, 3.0, 10.0).is_err()); assert!(Triangular::new(0.0, 11.0, 10.0).is_err()); assert!(Triangular::new(5.0, 5.0, 5.0).is_err()); }
1278
1279 #[test]
1282 fn test_normal_standard() {
1283 let n = Normal::new(0.0, 1.0).unwrap();
1284 assert!((n.mean()).abs() < 1e-15);
1285 assert!((n.variance() - 1.0).abs() < 1e-15);
1286 assert!((n.cdf(0.0) - 0.5).abs() < 1e-7);
1287 }
1288
1289 #[test]
1290 fn test_normal_shifted() {
1291 let n = Normal::new(10.0, 2.0).unwrap();
1292 assert!((n.mean() - 10.0).abs() < 1e-15);
1293 assert!((n.variance() - 4.0).abs() < 1e-15);
1294 assert!((n.cdf(10.0) - 0.5).abs() < 1e-7);
1295 }
1296
1297 #[test]
1298 fn test_normal_quantile() {
1299 let n = Normal::new(0.0, 1.0).unwrap();
1300 assert!((n.quantile(0.5).unwrap()).abs() < 0.01);
1301 assert!((n.quantile(0.975).unwrap() - 1.96).abs() < 0.01);
1302 }
1303
1304 #[test]
1305 fn test_normal_invalid() {
1306 assert!(Normal::new(0.0, 0.0).is_err());
1307 assert!(Normal::new(0.0, -1.0).is_err());
1308 }
1309
1310 #[test]
1313 fn test_lognormal_mean() {
1314 let ln = LogNormal::new(0.0, 1.0).unwrap();
1315 let expected = (0.5_f64).exp(); assert!((ln.mean() - expected).abs() < 1e-10);
1317 }
1318
1319 #[test]
1320 fn test_lognormal_cdf() {
1321 let ln = LogNormal::new(0.0, 1.0).unwrap();
1322 assert_eq!(ln.cdf(0.0), 0.0);
1323 assert!((ln.cdf(1.0) - 0.5).abs() < 0.001);
1325 }
1326
1327 #[test]
1328 fn test_lognormal_quantile() {
1329 let ln = LogNormal::new(0.0, 1.0).unwrap();
1330 let q50 = ln.quantile(0.5).unwrap();
1332 assert!((q50 - 1.0).abs() < 0.01);
1333 }
1334
1335 #[test]
1338 fn test_pert_mean() {
1339 let p = Pert::new(1.0, 4.0, 7.0).unwrap();
1340 assert!((p.mean() - 4.0).abs() < 1e-10);
1342 }
1343
1344 #[test]
1345 fn test_pert_symmetric_variance() {
1346 let p = Pert::new(0.0, 5.0, 10.0).unwrap();
1347 let expected = 9.0 / (36.0 * 7.0) * 100.0;
1350 assert!(
1351 (p.variance() - expected).abs() < 1e-10,
1352 "PERT variance: {} vs expected: {}",
1353 p.variance(),
1354 expected
1355 );
1356 }
1357
1358 #[test]
1359 fn test_pert_shape_params() {
1360 let p = Pert::new(0.0, 5.0, 10.0).unwrap();
1361 assert!((p.alpha() - 3.0).abs() < 1e-15);
1364 assert!((p.beta_param() - 3.0).abs() < 1e-15);
1365 }
1366
1367 #[test]
1368 fn test_pert_cdf_bounds() {
1369 let p = Pert::new(1.0, 4.0, 7.0).unwrap();
1370 assert_eq!(p.cdf(0.0), 0.0);
1371 assert_eq!(p.cdf(8.0), 1.0);
1372 let p_sym = Pert::new(0.0, 5.0, 10.0).unwrap();
1374 assert!((p_sym.cdf(5.0) - 0.5).abs() < 0.01);
1375 }
1376
1377 #[test]
1378 fn test_pert_cdf_monotonic() {
1379 let p = Pert::new(0.0, 3.0, 10.0).unwrap();
1380 let mut prev = 0.0;
1381 for i in 0..=100 {
1382 let x = i as f64 * 0.1;
1383 let c = p.cdf(x);
1384 assert!(c >= prev - 1e-15, "CDF not monotonic at x={x}");
1385 prev = c;
1386 }
1387 }
1388
1389 #[test]
1390 fn test_pert_quantile_approx() {
1391 let p = Pert::new(0.0, 5.0, 10.0).unwrap();
1392 let q50 = p.quantile_approx(0.5).unwrap();
1393 assert!((q50 - 5.0).abs() < 0.5, "median approx: {q50}");
1394 }
1395
1396 #[test]
1397 fn test_pert_invalid() {
1398 assert!(Pert::new(5.0, 3.0, 10.0).is_err());
1399 assert!(Pert::new(5.0, 5.0, 5.0).is_err());
1400 }
1401
1402 #[test]
1403 fn test_pert_with_lambda() {
1404 let p4 = Pert::with_shape(0.0, 5.0, 10.0, 4.0).unwrap();
1406 let p8 = Pert::with_shape(0.0, 5.0, 10.0, 8.0).unwrap();
1407 assert!(
1408 p8.variance() < p4.variance(),
1409 "higher λ should give lower variance"
1410 );
1411 }
1412
1413 #[test]
1416 fn test_regularized_beta_bounds() {
1417 assert_eq!(special::regularized_incomplete_beta(0.0, 2.0, 3.0), 0.0);
1418 assert_eq!(special::regularized_incomplete_beta(1.0, 2.0, 3.0), 1.0);
1419 }
1420
1421 #[test]
1422 fn test_regularized_beta_symmetric() {
1423 let result = special::regularized_incomplete_beta(0.5, 3.0, 3.0);
1425 assert!(
1426 (result - 0.5).abs() < 1e-8,
1427 "I_0.5(3,3) = {result}, expected 0.5"
1428 );
1429 }
1430
1431 #[test]
1432 fn test_regularized_beta_known_values() {
1433 for &x in &[0.1, 0.3, 0.5, 0.7, 0.9] {
1435 let result = special::regularized_incomplete_beta(x, 1.0, 1.0);
1436 assert!(
1437 (result - x).abs() < 1e-10,
1438 "I_{x}(1,1) = {result}, expected {x}"
1439 );
1440 }
1441
1442 for &x in &[0.1, 0.5, 0.9] {
1444 let result = special::regularized_incomplete_beta(x, 1.0, 3.0);
1445 let expected = 1.0 - (1.0 - x).powi(3);
1446 assert!(
1447 (result - expected).abs() < 1e-10,
1448 "I_{x}(1,3) = {result}, expected {expected}"
1449 );
1450 }
1451 }
1452
1453 #[test]
1456 fn test_ln_gamma_known() {
1457 assert!((special::ln_gamma(1.0)).abs() < 1e-10);
1459 assert!((special::ln_gamma(2.0)).abs() < 1e-10);
1461 assert!((special::ln_gamma(3.0) - 2.0_f64.ln()).abs() < 1e-10);
1463 assert!((special::ln_gamma(5.0) - 24.0_f64.ln()).abs() < 1e-10);
1465 assert!(
1467 (special::ln_gamma(0.5) - std::f64::consts::PI.sqrt().ln()).abs() < 1e-10,
1468 "ln Γ(0.5) = {}, expected {}",
1469 special::ln_gamma(0.5),
1470 std::f64::consts::PI.sqrt().ln()
1471 );
1472 }
1473
1474 #[test]
1477 fn test_weibull_exponential_special_case() {
1478 let w = Weibull::new(1.0, 5.0).unwrap();
1480 assert!((w.mean() - 5.0).abs() < 1e-10);
1481 assert!((w.variance() - 25.0).abs() < 1e-8);
1482 }
1483
1484 #[test]
1485 fn test_weibull_rayleigh_special_case() {
1486 let w = Weibull::new(2.0, 1.0).unwrap();
1488 let expected_mean = std::f64::consts::PI.sqrt() / 2.0;
1489 assert!(
1490 (w.mean() - expected_mean).abs() < 1e-10,
1491 "Weibull(2,1) mean = {}, expected {}",
1492 w.mean(),
1493 expected_mean
1494 );
1495 }
1496
1497 #[test]
1498 fn test_weibull_cdf_known_values() {
1499 let w = Weibull::new(2.0, 10.0).unwrap();
1501 assert!((w.cdf(10.0) - (1.0 - (-1.0_f64).exp())).abs() < 1e-10);
1503 assert_eq!(w.cdf(0.0), 0.0);
1505 assert_eq!(w.cdf(-1.0), 0.0);
1507 }
1508
1509 #[test]
1510 fn test_weibull_pdf_basic() {
1511 let w = Weibull::new(1.0, 1.0).unwrap();
1513 assert!((w.pdf(0.0) - 1.0).abs() < 1e-10);
1514 assert!((w.pdf(1.0) - (-1.0_f64).exp()).abs() < 1e-10);
1515 assert!((w.pdf(2.0) - (-2.0_f64).exp()).abs() < 1e-10);
1516 }
1517
1518 #[test]
1519 fn test_weibull_pdf_negative() {
1520 let w = Weibull::new(2.0, 5.0).unwrap();
1521 assert_eq!(w.pdf(-1.0), 0.0);
1522 }
1523
1524 #[test]
1525 fn test_weibull_quantile_roundtrip() {
1526 let w = Weibull::new(2.5, 100.0).unwrap();
1527 for &p in &[0.1, 0.25, 0.5, 0.75, 0.9] {
1528 let t = w.quantile(p).unwrap();
1529 let p_back = w.cdf(t);
1530 assert!(
1531 (p_back - p).abs() < 1e-10,
1532 "roundtrip: p={p} -> t={t} -> p_back={p_back}"
1533 );
1534 }
1535 }
1536
1537 #[test]
1538 fn test_weibull_quantile_edge_cases() {
1539 let w = Weibull::new(2.0, 10.0).unwrap();
1540 assert_eq!(w.quantile(0.0), Some(0.0));
1541 assert_eq!(w.quantile(1.0), None);
1542 assert_eq!(w.quantile(-0.1), None);
1543 }
1544
1545 #[test]
1546 fn test_weibull_reliability() {
1547 let w = Weibull::new(2.0, 10.0).unwrap();
1548 for &t in &[1.0, 5.0, 10.0, 20.0] {
1550 let r = w.reliability(t);
1551 let f = w.cdf(t);
1552 assert!((r + f - 1.0).abs() < 1e-14);
1553 }
1554 }
1555
1556 #[test]
1557 fn test_weibull_hazard_rate() {
1558 let w = Weibull::new(1.0, 5.0).unwrap();
1560 for &t in &[1.0, 5.0, 10.0] {
1561 assert!((w.hazard_rate(t) - 0.2).abs() < 1e-10);
1562 }
1563 let w2 = Weibull::new(2.0, 10.0).unwrap();
1565 assert!((w2.hazard_rate(5.0) - 2.0 * 5.0 / 100.0).abs() < 1e-10);
1566 }
1567
1568 #[test]
1569 fn test_weibull_invalid() {
1570 assert!(Weibull::new(0.0, 1.0).is_err());
1571 assert!(Weibull::new(-1.0, 1.0).is_err());
1572 assert!(Weibull::new(1.0, 0.0).is_err());
1573 assert!(Weibull::new(1.0, -1.0).is_err());
1574 assert!(Weibull::new(f64::NAN, 1.0).is_err());
1575 assert!(Weibull::new(1.0, f64::INFINITY).is_err());
1576 }
1577
1578 #[test]
1581 fn test_exponential_basic() {
1582 let e = Exponential::new(0.5).unwrap();
1583 assert!((e.rate() - 0.5).abs() < 1e-10);
1584 assert!((e.mean() - 2.0).abs() < 1e-10);
1585 assert!((e.variance() - 4.0).abs() < 1e-10);
1586 }
1587
1588 #[test]
1589 fn test_exponential_pdf() {
1590 let e = Exponential::new(1.0).unwrap();
1591 assert!((e.pdf(0.0) - 1.0).abs() < 1e-10);
1592 assert!((e.pdf(1.0) - (-1.0_f64).exp()).abs() < 1e-10);
1593 assert_eq!(e.pdf(-1.0), 0.0);
1594 }
1595
1596 #[test]
1597 fn test_exponential_cdf() {
1598 let e = Exponential::new(1.0).unwrap();
1599 assert_eq!(e.cdf(-1.0), 0.0);
1600 assert_eq!(e.cdf(0.0), 0.0);
1601 assert!((e.cdf(1.0) - (1.0 - (-1.0_f64).exp())).abs() < 1e-10);
1602 }
1603
1604 #[test]
1605 fn test_exponential_quantile() {
1606 let e = Exponential::new(2.0).unwrap();
1607 assert_eq!(e.quantile(0.0), Some(0.0));
1608 assert_eq!(e.quantile(1.0), Some(f64::INFINITY));
1609 let q = e.quantile(0.5).unwrap();
1610 assert!((q - 2.0_f64.ln() / 2.0).abs() < 1e-10);
1612 assert!(e.quantile(-0.1).is_none());
1613 assert!(e.quantile(1.1).is_none());
1614 }
1615
1616 #[test]
1617 fn test_exponential_quantile_roundtrip() {
1618 let e = Exponential::new(3.0).unwrap();
1619 for &p in &[0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99] {
1620 let x = e.quantile(p).unwrap();
1621 let p_back = e.cdf(x);
1622 assert!((p_back - p).abs() < 1e-10, "p={p}, x={x}, p_back={p_back}");
1623 }
1624 }
1625
1626 #[test]
1627 fn test_exponential_memoryless() {
1628 let e = Exponential::new(1.5).unwrap();
1630 let s = 2.0;
1631 let t = 3.0;
1632 let lhs = e.survival(s + t) / e.survival(s);
1633 let rhs = e.survival(t);
1634 assert!((lhs - rhs).abs() < 1e-10);
1635 }
1636
1637 #[test]
1638 fn test_exponential_invalid() {
1639 assert!(Exponential::new(0.0).is_err());
1640 assert!(Exponential::new(-1.0).is_err());
1641 assert!(Exponential::new(f64::NAN).is_err());
1642 assert!(Exponential::new(f64::INFINITY).is_err());
1643 }
1644
1645 #[test]
1648 fn test_gamma_basic() {
1649 let g = GammaDistribution::new(2.0, 1.0).unwrap();
1650 assert!((g.shape() - 2.0).abs() < 1e-10);
1651 assert!((g.rate() - 1.0).abs() < 1e-10);
1652 assert!((g.scale() - 1.0).abs() < 1e-10);
1653 assert!((g.mean() - 2.0).abs() < 1e-10);
1654 assert!((g.variance() - 2.0).abs() < 1e-10);
1655 assert!((g.mode() - 1.0).abs() < 1e-10);
1656 }
1657
1658 #[test]
1659 fn test_gamma_from_shape_scale() {
1660 let g = GammaDistribution::from_shape_scale(3.0, 2.0).unwrap();
1661 assert!((g.shape() - 3.0).abs() < 1e-10);
1662 assert!((g.rate() - 0.5).abs() < 1e-10);
1663 assert!((g.mean() - 6.0).abs() < 1e-10); }
1665
1666 #[test]
1667 fn test_gamma_pdf_integral() {
1668 let g = GammaDistribution::new(3.0, 2.0).unwrap();
1670 let n = 20_000;
1671 let dt = 20.0 / n as f64;
1672 let integral: f64 = (0..n)
1673 .map(|i| {
1674 let x = (i as f64 + 0.5) * dt;
1675 g.pdf(x) * dt
1676 })
1677 .sum();
1678 assert!((integral - 1.0).abs() < 0.01, "PDF integral = {integral}");
1679 }
1680
1681 #[test]
1682 fn test_gamma_cdf_known() {
1683 let g = GammaDistribution::new(1.0, 1.0).unwrap();
1685 let expected = 1.0 - (-1.0_f64).exp();
1686 assert!((g.cdf(1.0) - expected).abs() < 1e-10);
1687 }
1688
1689 #[test]
1690 fn test_gamma_cdf_chi2() {
1691 let g = GammaDistribution::new(1.0, 0.5).unwrap(); let x = 4.0;
1695 let expected = 1.0 - (-0.5_f64 * x).exp();
1696 assert!(
1697 (g.cdf(x) - expected).abs() < 1e-8,
1698 "CDF({x}) = {} vs {expected}",
1699 g.cdf(x)
1700 );
1701 }
1702
1703 #[test]
1704 fn test_gamma_quantile_roundtrip() {
1705 let g = GammaDistribution::new(5.0, 2.0).unwrap();
1706 for &p in &[0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99] {
1707 let x = g.quantile(p).unwrap();
1708 let p_back = g.cdf(x);
1709 assert!((p_back - p).abs() < 1e-6, "p={p}, x={x}, p_back={p_back}");
1710 }
1711 }
1712
1713 #[test]
1714 fn test_gamma_mode_small_shape() {
1715 let g = GammaDistribution::new(0.5, 1.0).unwrap();
1716 assert_eq!(g.mode(), 0.0); }
1718
1719 #[test]
1720 fn test_gamma_invalid() {
1721 assert!(GammaDistribution::new(0.0, 1.0).is_err());
1722 assert!(GammaDistribution::new(-1.0, 1.0).is_err());
1723 assert!(GammaDistribution::new(1.0, 0.0).is_err());
1724 assert!(GammaDistribution::new(1.0, -1.0).is_err());
1725 assert!(GammaDistribution::new(f64::NAN, 1.0).is_err());
1726 }
1727
1728 #[test]
1731 fn test_beta_mean_variance() {
1732 let b = BetaDistribution::new(2.0, 5.0).unwrap();
1733 assert!((b.mean() - 2.0 / 7.0).abs() < 1e-10);
1735 let expected_var = 2.0 * 5.0 / (7.0 * 7.0 * 8.0);
1737 assert!((b.variance() - expected_var).abs() < 1e-10);
1738 }
1739
1740 #[test]
1741 fn test_beta_symmetric() {
1742 let b = BetaDistribution::new(3.0, 3.0).unwrap();
1744 assert!((b.mean() - 0.5).abs() < 1e-10);
1745 assert!((b.mode().unwrap() - 0.5).abs() < 1e-10);
1746 assert!((b.pdf(0.3) - b.pdf(0.7)).abs() < 1e-10);
1748 }
1749
1750 #[test]
1751 fn test_beta_uniform_special_case() {
1752 let b = BetaDistribution::new(1.0, 1.0).unwrap();
1754 assert!((b.mean() - 0.5).abs() < 1e-10);
1755 assert!((b.cdf(0.5) - 0.5).abs() < 1e-10);
1756 assert!((b.cdf(0.25) - 0.25).abs() < 1e-10);
1757 }
1758
1759 #[test]
1760 fn test_beta_cdf_boundaries() {
1761 let b = BetaDistribution::new(2.0, 3.0).unwrap();
1762 assert_eq!(b.cdf(0.0), 0.0);
1763 assert_eq!(b.cdf(-1.0), 0.0);
1764 assert_eq!(b.cdf(1.0), 1.0);
1765 assert_eq!(b.cdf(2.0), 1.0);
1766 }
1767
1768 #[test]
1769 fn test_beta_pdf_integral() {
1770 let b = BetaDistribution::new(2.0, 5.0).unwrap();
1771 let n = 10_000;
1772 let dt = 1.0 / n as f64;
1773 let integral: f64 = (0..n)
1774 .map(|i| {
1775 let x = (i as f64 + 0.5) * dt;
1776 b.pdf(x) * dt
1777 })
1778 .sum();
1779 assert!((integral - 1.0).abs() < 0.01, "PDF integral = {integral}");
1780 }
1781
1782 #[test]
1783 fn test_beta_quantile_roundtrip() {
1784 let b = BetaDistribution::new(2.0, 5.0).unwrap();
1785 for &p in &[0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99] {
1786 let x = b.quantile(p).unwrap();
1787 let p_back = b.cdf(x);
1788 assert!((p_back - p).abs() < 1e-6, "p={p}, x={x}, p_back={p_back}");
1789 }
1790 }
1791
1792 #[test]
1793 fn test_beta_mode() {
1794 let b = BetaDistribution::new(5.0, 3.0).unwrap();
1795 assert!((b.mode().unwrap() - 2.0 / 3.0).abs() < 1e-10);
1797 }
1798
1799 #[test]
1800 fn test_beta_mode_undefined() {
1801 let b = BetaDistribution::new(0.5, 0.5).unwrap();
1803 assert!(b.mode().is_none());
1804 }
1805
1806 #[test]
1807 fn test_beta_invalid() {
1808 assert!(BetaDistribution::new(0.0, 1.0).is_err());
1809 assert!(BetaDistribution::new(-1.0, 1.0).is_err());
1810 assert!(BetaDistribution::new(1.0, 0.0).is_err());
1811 assert!(BetaDistribution::new(f64::NAN, 1.0).is_err());
1812 }
1813
1814 #[test]
1817 fn test_chi2_mean_variance() {
1818 let chi2 = ChiSquared::new(5.0).unwrap();
1819 assert!((chi2.mean() - 5.0).abs() < 1e-10);
1820 assert!((chi2.variance() - 10.0).abs() < 1e-10);
1821 }
1822
1823 #[test]
1824 fn test_chi2_exp_special_case() {
1825 let chi2 = ChiSquared::new(2.0).unwrap();
1827 let x = 4.0;
1828 let expected = 1.0 - (-x / 2.0_f64).exp();
1829 assert!(
1830 (chi2.cdf(x) - expected).abs() < 1e-8,
1831 "CDF({x}) = {} vs {expected}",
1832 chi2.cdf(x)
1833 );
1834 }
1835
1836 #[test]
1837 fn test_chi2_cdf_boundaries() {
1838 let chi2 = ChiSquared::new(3.0).unwrap();
1839 assert_eq!(chi2.cdf(0.0), 0.0);
1840 assert_eq!(chi2.cdf(-1.0), 0.0);
1841 assert!(chi2.cdf(100.0) > 0.999);
1843 }
1844
1845 #[test]
1846 fn test_chi2_pdf_integral() {
1847 let chi2 = ChiSquared::new(4.0).unwrap();
1848 let n = 20_000;
1849 let dt = 30.0 / n as f64;
1850 let integral: f64 = (0..n)
1851 .map(|i| {
1852 let x = (i as f64 + 0.5) * dt;
1853 chi2.pdf(x) * dt
1854 })
1855 .sum();
1856 assert!((integral - 1.0).abs() < 0.01, "PDF integral = {integral}");
1857 }
1858
1859 #[test]
1860 fn test_chi2_quantile_roundtrip() {
1861 let chi2 = ChiSquared::new(5.0).unwrap();
1862 for &p in &[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99] {
1863 let x = chi2.quantile(p).unwrap();
1864 let p_back = chi2.cdf(x);
1865 assert!((p_back - p).abs() < 1e-6, "p={p}, x={x}, p_back={p_back}");
1866 }
1867 }
1868
1869 #[test]
1870 fn test_chi2_known_quantiles() {
1871 let chi2 = ChiSquared::new(5.0).unwrap();
1874 let q05 = chi2.quantile(0.05).unwrap();
1875 assert!((q05 - 1.1455).abs() < 0.01, "q(0.05) = {q05}");
1876 let q95 = chi2.quantile(0.95).unwrap();
1877 assert!((q95 - 11.0705).abs() < 0.01, "q(0.95) = {q95}");
1878 }
1879
1880 #[test]
1881 fn test_chi2_invalid() {
1882 assert!(ChiSquared::new(0.0).is_err());
1883 assert!(ChiSquared::new(-1.0).is_err());
1884 assert!(ChiSquared::new(f64::NAN).is_err());
1885 }
1886
1887 #[test]
1888 fn test_chi2_gamma_consistency() {
1889 let k = 6.0;
1891 let chi2 = ChiSquared::new(k).unwrap();
1892 let gamma = GammaDistribution::new(k / 2.0, 0.5).unwrap();
1893
1894 let x = 5.0;
1895 assert!(
1896 (chi2.cdf(x) - gamma.cdf(x)).abs() < 1e-10,
1897 "Chi2 and Gamma CDF should match"
1898 );
1899 }
1900
1901 #[test]
1902 fn test_weibull_mean_variance_known() {
1903 let w = Weibull::new(3.6, 1000.0).unwrap();
1906 let mean = w.mean();
1907 assert!(mean > 800.0 && mean < 1000.0, "mean={mean}");
1908 assert!(w.variance() > 0.0);
1909 }
1910
1911 #[test]
1912 fn test_weibull_pdf_integral_approx() {
1913 let w = Weibull::new(2.0, 10.0).unwrap();
1915 let n = 10_000;
1916 let dt = 50.0 / n as f64;
1917 let integral: f64 = (0..n)
1918 .map(|i| {
1919 let t = (i as f64 + 0.5) * dt;
1920 w.pdf(t) * dt
1921 })
1922 .sum();
1923 assert!(
1924 (integral - 1.0).abs() < 0.01,
1925 "PDF integral = {integral}, expected ≈ 1.0"
1926 );
1927 }
1928}
1929
1930#[cfg(test)]
1931mod proptests {
1932 use super::*;
1933 use proptest::prelude::*;
1934
1935 proptest! {
1936 #![proptest_config(ProptestConfig::with_cases(300))]
1937
1938 #[test]
1941 fn uniform_cdf_in_01(
1942 min in -100.0_f64..0.0,
1943 max in 1.0_f64..100.0,
1944 x in -200.0_f64..200.0,
1945 ) {
1946 let u = Uniform::new(min, max).unwrap();
1947 let c = u.cdf(x);
1948 prop_assert!((0.0..=1.0).contains(&c));
1949 }
1950
1951 #[test]
1952 fn uniform_quantile_roundtrip(
1953 min in -100.0_f64..0.0,
1954 max in 1.0_f64..100.0,
1955 p in 0.0_f64..=1.0,
1956 ) {
1957 let u = Uniform::new(min, max).unwrap();
1958 let x = u.quantile(p).unwrap();
1959 let p_back = u.cdf(x);
1960 prop_assert!((p_back - p).abs() < 1e-12, "roundtrip: p={p} -> x={x} -> p_back={p_back}");
1961 }
1962
1963 #[test]
1966 fn triangular_cdf_in_01(
1967 min in -100.0_f64..-1.0,
1968 mode_frac in 0.0_f64..=1.0,
1969 range in 1.0_f64..100.0,
1970 x in -200.0_f64..200.0,
1971 ) {
1972 let max = min + range;
1973 let mode = min + mode_frac * range;
1974 let t = Triangular::new(min, mode, max).unwrap();
1975 let c = t.cdf(x);
1976 prop_assert!((0.0..=1.0).contains(&c));
1977 }
1978
1979 #[test]
1980 fn triangular_quantile_roundtrip(
1981 min in -50.0_f64..0.0,
1982 mode_frac in 0.01_f64..0.99,
1983 range in 1.0_f64..50.0,
1984 p in 0.001_f64..0.999,
1985 ) {
1986 let max = min + range;
1987 let mode = min + mode_frac * range;
1988 let t = Triangular::new(min, mode, max).unwrap();
1989 let x = t.quantile(p).unwrap();
1990 let p_back = t.cdf(x);
1991 prop_assert!(
1992 (p_back - p).abs() < 1e-8,
1993 "roundtrip: p={p} -> x={x} -> p_back={p_back}"
1994 );
1995 }
1996
1997 #[test]
2000 fn pert_mean_formula(
2001 min in -50.0_f64..0.0,
2002 mode_frac in 0.01_f64..0.99,
2003 range in 1.0_f64..50.0,
2004 ) {
2005 let max = min + range;
2006 let mode = min + mode_frac * range;
2007 let p = Pert::new(min, mode, max).unwrap();
2008 let expected = (min + 4.0 * mode + max) / 6.0;
2009 prop_assert!(
2010 (p.mean() - expected).abs() < 1e-10,
2011 "PERT mean: {} vs expected: {}",
2012 p.mean(),
2013 expected
2014 );
2015 }
2016
2017 #[test]
2018 fn pert_variance_positive(
2019 min in -50.0_f64..0.0,
2020 mode_frac in 0.01_f64..0.99,
2021 range in 1.0_f64..50.0,
2022 ) {
2023 let max = min + range;
2024 let mode = min + mode_frac * range;
2025 let p = Pert::new(min, mode, max).unwrap();
2026 prop_assert!(p.variance() > 0.0);
2027 }
2028
2029 #[test]
2030 fn pert_cdf_monotonic(
2031 min in -50.0_f64..0.0,
2032 mode_frac in 0.05_f64..0.95,
2033 range in 2.0_f64..50.0,
2034 ) {
2035 let max = min + range;
2036 let mode = min + mode_frac * range;
2037 let p = Pert::new(min, mode, max).unwrap();
2038 let mut prev = 0.0;
2039 for i in 0..=20 {
2040 let x = min + (i as f64 / 20.0) * range;
2041 let c = p.cdf(x);
2042 prop_assert!(c >= prev - 1e-10, "CDF not monotonic at x={x}");
2043 prev = c;
2044 }
2045 }
2046
2047 #[test]
2050 fn weibull_cdf_in_01(
2051 shape in 0.1_f64..10.0,
2052 scale in 0.1_f64..100.0,
2053 t in 0.0_f64..200.0,
2054 ) {
2055 let w = Weibull::new(shape, scale).unwrap();
2056 let c = w.cdf(t);
2057 prop_assert!((0.0..=1.0).contains(&c), "CDF({t}) = {c} out of [0,1]");
2058 }
2059
2060 #[test]
2061 fn weibull_quantile_roundtrip(
2062 shape in 0.5_f64..10.0,
2063 scale in 1.0_f64..100.0,
2064 p in 0.001_f64..0.999,
2065 ) {
2066 let w = Weibull::new(shape, scale).unwrap();
2067 let t = w.quantile(p).unwrap();
2068 let p_back = w.cdf(t);
2069 prop_assert!(
2070 (p_back - p).abs() < 1e-8,
2071 "roundtrip: p={p} -> t={t} -> p_back={p_back}"
2072 );
2073 }
2074
2075 #[test]
2076 fn weibull_pdf_non_negative(
2077 shape in 0.1_f64..10.0,
2078 scale in 0.1_f64..100.0,
2079 t in 0.0_f64..200.0,
2080 ) {
2081 let w = Weibull::new(shape, scale).unwrap();
2082 prop_assert!(w.pdf(t) >= 0.0, "PDF({t}) must be >= 0");
2083 }
2084
2085 #[test]
2086 fn weibull_reliability_plus_cdf_is_one(
2087 shape in 0.5_f64..5.0,
2088 scale in 1.0_f64..50.0,
2089 t in 0.001_f64..100.0,
2090 ) {
2091 let w = Weibull::new(shape, scale).unwrap();
2092 let sum = w.cdf(t) + w.reliability(t);
2093 prop_assert!(
2094 (sum - 1.0).abs() < 1e-12,
2095 "CDF + R should = 1, got {sum}"
2096 );
2097 }
2098
2099 #[test]
2100 fn weibull_variance_positive(
2101 shape in 0.1_f64..10.0,
2102 scale in 0.1_f64..100.0,
2103 ) {
2104 let w = Weibull::new(shape, scale).unwrap();
2105 prop_assert!(w.variance() > 0.0, "variance must be positive");
2106 }
2107
2108 #[test]
2111 fn exponential_cdf_in_01(
2112 rate in 0.01_f64..10.0,
2113 x in 0.0_f64..100.0,
2114 ) {
2115 let e = Exponential::new(rate).unwrap();
2116 let c = e.cdf(x);
2117 prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
2118 }
2119
2120 #[test]
2121 fn exponential_quantile_roundtrip(
2122 rate in 0.01_f64..10.0,
2123 p in 0.001_f64..0.999,
2124 ) {
2125 let e = Exponential::new(rate).unwrap();
2126 let x = e.quantile(p).unwrap();
2127 let p_back = e.cdf(x);
2128 prop_assert!((p_back - p).abs() < 1e-10);
2129 }
2130
2131 #[test]
2134 fn gamma_cdf_in_01(
2135 shape in 0.5_f64..10.0,
2136 rate in 0.1_f64..10.0,
2137 x in 0.001_f64..50.0,
2138 ) {
2139 let g = GammaDistribution::new(shape, rate).unwrap();
2140 let c = g.cdf(x);
2141 prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
2142 }
2143
2144 #[test]
2145 fn gamma_variance_positive(
2146 shape in 0.1_f64..10.0,
2147 rate in 0.1_f64..10.0,
2148 ) {
2149 let g = GammaDistribution::new(shape, rate).unwrap();
2150 prop_assert!(g.variance() > 0.0, "variance must be positive");
2151 }
2152
2153 #[test]
2156 fn beta_cdf_in_01(
2157 alpha in 0.5_f64..10.0,
2158 beta_param in 0.5_f64..10.0,
2159 x in 0.001_f64..0.999,
2160 ) {
2161 let b = BetaDistribution::new(alpha, beta_param).unwrap();
2162 let c = b.cdf(x);
2163 prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
2164 }
2165
2166 #[test]
2167 fn beta_quantile_roundtrip(
2168 alpha in 1.0_f64..10.0,
2169 beta_param in 1.0_f64..10.0,
2170 p in 0.01_f64..0.99,
2171 ) {
2172 let b = BetaDistribution::new(alpha, beta_param).unwrap();
2173 let x = b.quantile(p).unwrap();
2174 let p_back = b.cdf(x);
2175 prop_assert!((p_back - p).abs() < 1e-5, "p={p}, p_back={p_back}");
2176 }
2177
2178 #[test]
2181 fn chi2_cdf_in_01(
2182 k in 1.0_f64..20.0,
2183 x in 0.001_f64..50.0,
2184 ) {
2185 let chi2 = ChiSquared::new(k).unwrap();
2186 let c = chi2.cdf(x);
2187 prop_assert!((0.0..=1.0).contains(&c), "CDF({x}) = {c}");
2188 }
2189
2190 #[test]
2191 fn chi2_variance_positive(
2192 k in 0.5_f64..50.0,
2193 ) {
2194 let chi2 = ChiSquared::new(k).unwrap();
2195 prop_assert!(chi2.variance() > 0.0, "variance must be positive");
2196 }
2197 }
2198}