ngboost_rs/dist/
normal.rs

1use crate::dist::{Distribution, DistributionMethods, RegressionDistn};
2use crate::scores::{CRPScore, LogScore, Scorable};
3use ndarray::{array, Array1, Array2, Array3};
4use rand::prelude::*;
5use statrs::distribution::{Continuous, ContinuousCDF, Normal as NormalDist};
6use statrs::statistics::Statistics;
7
8/// Minimum scale (standard deviation) to avoid numerical issues.
9const MIN_SCALE: f64 = 1e-6;
10/// Maximum scale to prevent overflow in variance calculations.
11const MAX_SCALE: f64 = 1e6;
12
13/// The Normal (Gaussian) distribution.
14#[derive(Debug, Clone)]
15pub struct Normal {
16    /// The mean of the distribution (loc).
17    pub loc: Array1<f64>,
18    /// The standard deviation of the distribution (scale).
19    pub scale: Array1<f64>,
20    /// The variance of the distribution.
21    pub var: Array1<f64>,
22    /// The parameters of the distribution, stored as a 2D array.
23    _params: Array2<f64>,
24}
25
26impl Distribution for Normal {
27    fn from_params(params: &Array2<f64>) -> Self {
28        let loc = params.column(0).to_owned();
29        // Clamp scale to [MIN_SCALE, MAX_SCALE] for numerical stability
30        let scale = params
31            .column(1)
32            .mapv(|p| f64::exp(p).clamp(MIN_SCALE, MAX_SCALE));
33        let var = &scale * &scale;
34        Normal {
35            loc,
36            scale,
37            var,
38            _params: params.clone(),
39        }
40    }
41
42    fn fit(y: &Array1<f64>) -> Array1<f64> {
43        let mean = y.mean();
44        let std_dev = if y.len() <= 1 {
45            1.0 // Fallback when we can't compute std dev (matches scipy behavior)
46        } else {
47            y.std(0.0)
48        };
49        // The parameters are loc and log(scale)
50        // Handle edge case where std_dev is 0 or very small - match scipy's robust behavior
51        let safe_std_dev = if std_dev <= 0.0 { 1.0 } else { std_dev };
52        array![mean, safe_std_dev.ln()]
53    }
54
55    fn n_params(&self) -> usize {
56        2
57    }
58
59    fn predict(&self) -> Array1<f64> {
60        self.loc.clone()
61    }
62
63    fn params(&self) -> &Array2<f64> {
64        &self._params
65    }
66}
67
68impl RegressionDistn for Normal {}
69
70impl DistributionMethods for Normal {
71    fn mean(&self) -> Array1<f64> {
72        self.loc.clone()
73    }
74
75    fn variance(&self) -> Array1<f64> {
76        self.var.clone()
77    }
78
79    fn std(&self) -> Array1<f64> {
80        self.scale.clone()
81    }
82
83    fn pdf(&self, y: &Array1<f64>) -> Array1<f64> {
84        let mut result = Array1::zeros(y.len());
85        for i in 0..y.len() {
86            if let Ok(d) = NormalDist::new(self.loc[i], self.scale[i]) {
87                result[i] = d.pdf(y[i]);
88            }
89        }
90        result
91    }
92
93    fn logpdf(&self, y: &Array1<f64>) -> Array1<f64> {
94        let mut result = Array1::zeros(y.len());
95        for i in 0..y.len() {
96            if let Ok(d) = NormalDist::new(self.loc[i], self.scale[i]) {
97                result[i] = d.ln_pdf(y[i]);
98            }
99        }
100        result
101    }
102
103    fn cdf(&self, y: &Array1<f64>) -> Array1<f64> {
104        let mut result = Array1::zeros(y.len());
105        for i in 0..y.len() {
106            if let Ok(d) = NormalDist::new(self.loc[i], self.scale[i]) {
107                result[i] = d.cdf(y[i]);
108            }
109        }
110        result
111    }
112
113    fn ppf(&self, q: &Array1<f64>) -> Array1<f64> {
114        let mut result = Array1::zeros(q.len());
115        for i in 0..q.len() {
116            if let Ok(d) = NormalDist::new(self.loc[i], self.scale[i]) {
117                result[i] = d.inverse_cdf(q[i]);
118            }
119        }
120        result
121    }
122
123    fn sample(&self, n_samples: usize) -> Array2<f64> {
124        let n_obs = self.loc.len();
125        let mut samples = Array2::zeros((n_samples, n_obs));
126        let mut rng = rand::rng();
127
128        for i in 0..n_obs {
129            if let Ok(d) = NormalDist::new(self.loc[i], self.scale[i]) {
130                for s in 0..n_samples {
131                    let u: f64 = rng.random();
132                    samples[[s, i]] = d.inverse_cdf(u);
133                }
134            }
135        }
136        samples
137    }
138
139    fn mode(&self) -> Array1<f64> {
140        // For Normal, mode = mean
141        self.loc.clone()
142    }
143}
144
145impl Scorable<LogScore> for Normal {
146    fn score(&self, y: &Array1<f64>) -> Array1<f64> {
147        // -logpdf(y) with enhanced numerical stability and uncertainty handling
148        let mut scores = Array1::zeros(y.len());
149        for (i, &y_i) in y.iter().enumerate() {
150            // Handle edge cases to avoid NaN/Inf
151            let safe_loc = if self.loc[i].is_finite() {
152                self.loc[i]
153            } else {
154                0.0
155            };
156            let safe_scale = if self.scale[i] >= MIN_SCALE && self.scale[i].is_finite() {
157                self.scale[i]
158            } else {
159                1.0
160            };
161
162            // Use the original scale for normal operation
163            if let Ok(d) = NormalDist::new(safe_loc, safe_scale) {
164                let pdf = d.ln_pdf(y_i);
165                scores[i] = if pdf.is_finite() { -pdf } else { f64::MAX };
166            } else {
167                scores[i] = f64::MAX;
168            }
169        }
170        scores
171    }
172
173    fn d_score(&self, y: &Array1<f64>) -> Array2<f64> {
174        // Derivative wrt loc and log(scale)
175        let n_obs = y.len();
176        let mut d_params = Array2::zeros((n_obs, 2));
177
178        let err = &self.loc - y;
179
180        // d/d(loc)
181        d_params.column_mut(0).assign(&(&err / &self.var));
182
183        // d/d(log(scale))
184        let term2 = (&err * &err) / &self.var;
185        d_params.column_mut(1).assign(&(1.0 - term2));
186
187        d_params
188    }
189
190    fn metric(&self) -> Array3<f64> {
191        // Fisher Information Matrix
192        let n_obs = self.loc.len();
193        let mut fi = Array3::zeros((n_obs, 2, 2));
194
195        for i in 0..n_obs {
196            fi[[i, 0, 0]] = 1.0 / self.var[i];
197            fi[[i, 1, 1]] = 2.0;
198        }
199
200        fi
201    }
202}
203
204impl Scorable<CRPScore> for Normal {
205    fn score(&self, y: &Array1<f64>) -> Array1<f64> {
206        // CRPS for Normal distribution
207        let std_normal = NormalDist::new(0.0, 1.0).unwrap();
208        let sqrt_pi = std::f64::consts::PI.sqrt();
209
210        let mut scores = Array1::zeros(y.len());
211        for i in 0..y.len() {
212            let z = (y[i] - self.loc[i]) / self.scale[i];
213            let pdf_z = std_normal.pdf(z);
214            let cdf_z = std_normal.cdf(z);
215            scores[i] = self.scale[i] * (z * (2.0 * cdf_z - 1.0) + 2.0 * pdf_z - 1.0 / sqrt_pi);
216        }
217        scores
218    }
219
220    fn d_score(&self, y: &Array1<f64>) -> Array2<f64> {
221        let std_normal = NormalDist::new(0.0, 1.0).unwrap();
222        let n_obs = y.len();
223        let mut d_params = Array2::zeros((n_obs, 2));
224
225        for i in 0..n_obs {
226            let z = (y[i] - self.loc[i]) / self.scale[i];
227            let cdf_z = std_normal.cdf(z);
228
229            // d/d(loc)
230            d_params[[i, 0]] = -(2.0 * cdf_z - 1.0);
231
232            // d/d(log(scale)) - need to compute score first
233            let pdf_z = std_normal.pdf(z);
234            let sqrt_pi = std::f64::consts::PI.sqrt();
235            let score_i = self.scale[i] * (z * (2.0 * cdf_z - 1.0) + 2.0 * pdf_z - 1.0 / sqrt_pi);
236            d_params[[i, 1]] = score_i + (y[i] - self.loc[i]) * d_params[[i, 0]];
237        }
238
239        d_params
240    }
241
242    fn metric(&self) -> Array3<f64> {
243        // CRPS metric for Normal
244        let sqrt_pi = std::f64::consts::PI.sqrt();
245        let n_obs = self.loc.len();
246        let mut fi = Array3::zeros((n_obs, 2, 2));
247
248        for i in 0..n_obs {
249            fi[[i, 0, 0]] = 2.0;
250            fi[[i, 1, 1]] = self.var[i];
251        }
252
253        // Scale by 1/(2*sqrt(pi))
254        fi.mapv_inplace(|x| x / (2.0 * sqrt_pi));
255        fi
256    }
257}
258
259// ============================================================================
260// NormalFixedVar - Normal distribution with fixed variance = 1
261// ============================================================================
262
263/// Normal distribution with variance fixed at 1.
264///
265/// Has one parameter: loc (mean).
266#[derive(Debug, Clone)]
267pub struct NormalFixedVar {
268    /// The location parameter (mean).
269    pub loc: Array1<f64>,
270    /// The scale parameter (fixed at 1.0).
271    pub scale: Array1<f64>,
272    /// The variance (fixed at 1.0).
273    pub var: Array1<f64>,
274    /// The parameters of the distribution.
275    _params: Array2<f64>,
276}
277
278impl Distribution for NormalFixedVar {
279    fn from_params(params: &Array2<f64>) -> Self {
280        let loc = params.column(0).to_owned();
281        let n = loc.len();
282        let scale = Array1::ones(n);
283        let var = Array1::ones(n);
284        NormalFixedVar {
285            loc,
286            scale,
287            var,
288            _params: params.clone(),
289        }
290    }
291
292    fn fit(y: &Array1<f64>) -> Array1<f64> {
293        let mean = y.mean();
294        array![mean]
295    }
296
297    fn n_params(&self) -> usize {
298        1
299    }
300
301    fn predict(&self) -> Array1<f64> {
302        self.loc.clone()
303    }
304
305    fn params(&self) -> &Array2<f64> {
306        &self._params
307    }
308}
309
310impl RegressionDistn for NormalFixedVar {}
311
312impl DistributionMethods for NormalFixedVar {
313    fn mean(&self) -> Array1<f64> {
314        self.loc.clone()
315    }
316
317    fn variance(&self) -> Array1<f64> {
318        self.var.clone()
319    }
320
321    fn std(&self) -> Array1<f64> {
322        self.scale.clone()
323    }
324
325    fn pdf(&self, y: &Array1<f64>) -> Array1<f64> {
326        let mut result = Array1::zeros(y.len());
327        for i in 0..y.len() {
328            let d = NormalDist::new(self.loc[i], self.scale[i]).unwrap();
329            result[i] = d.pdf(y[i]);
330        }
331        result
332    }
333
334    fn cdf(&self, y: &Array1<f64>) -> Array1<f64> {
335        let mut result = Array1::zeros(y.len());
336        for i in 0..y.len() {
337            let d = NormalDist::new(self.loc[i], self.scale[i]).unwrap();
338            result[i] = d.cdf(y[i]);
339        }
340        result
341    }
342
343    fn ppf(&self, q: &Array1<f64>) -> Array1<f64> {
344        let mut result = Array1::zeros(q.len());
345        for i in 0..q.len() {
346            let d = NormalDist::new(self.loc[i], self.scale[i]).unwrap();
347            result[i] = d.inverse_cdf(q[i]);
348        }
349        result
350    }
351
352    fn sample(&self, n_samples: usize) -> Array2<f64> {
353        let n_obs = self.loc.len();
354        let mut samples = Array2::zeros((n_samples, n_obs));
355        let mut rng = rand::rng();
356
357        for i in 0..n_obs {
358            let d = NormalDist::new(self.loc[i], self.scale[i]).unwrap();
359            for s in 0..n_samples {
360                let u: f64 = rng.random();
361                samples[[s, i]] = d.inverse_cdf(u);
362            }
363        }
364        samples
365    }
366}
367
368impl Scorable<LogScore> for NormalFixedVar {
369    fn score(&self, y: &Array1<f64>) -> Array1<f64> {
370        let mut scores = Array1::zeros(y.len());
371        for (i, &y_i) in y.iter().enumerate() {
372            let d = NormalDist::new(self.loc[i], self.scale[i]).unwrap();
373            scores[i] = -d.ln_pdf(y_i);
374        }
375        scores
376    }
377
378    fn d_score(&self, y: &Array1<f64>) -> Array2<f64> {
379        let n_obs = y.len();
380        let mut d_params = Array2::zeros((n_obs, 1));
381
382        for i in 0..n_obs {
383            // d/d(loc) = (loc - y) / var
384            d_params[[i, 0]] = (self.loc[i] - y[i]) / self.var[i];
385        }
386
387        d_params
388    }
389
390    fn metric(&self) -> Array3<f64> {
391        let n_obs = self.loc.len();
392        let mut fi = Array3::zeros((n_obs, 1, 1));
393
394        for i in 0..n_obs {
395            fi[[i, 0, 0]] = 1.0 / self.var[i] + 1e-5;
396        }
397
398        fi
399    }
400}
401
402impl Scorable<CRPScore> for NormalFixedVar {
403    fn score(&self, y: &Array1<f64>) -> Array1<f64> {
404        let std_normal = NormalDist::new(0.0, 1.0).unwrap();
405        let sqrt_pi = std::f64::consts::PI.sqrt();
406
407        let mut scores = Array1::zeros(y.len());
408        for i in 0..y.len() {
409            let z = (y[i] - self.loc[i]) / self.scale[i];
410            let pdf_z = std_normal.pdf(z);
411            let cdf_z = std_normal.cdf(z);
412            scores[i] = self.scale[i] * (z * (2.0 * cdf_z - 1.0) + 2.0 * pdf_z - 1.0 / sqrt_pi);
413        }
414        scores
415    }
416
417    fn d_score(&self, y: &Array1<f64>) -> Array2<f64> {
418        let std_normal = NormalDist::new(0.0, 1.0).unwrap();
419        let n_obs = y.len();
420        let mut d_params = Array2::zeros((n_obs, 1));
421
422        for i in 0..n_obs {
423            let z = (y[i] - self.loc[i]) / self.scale[i];
424            let cdf_z = std_normal.cdf(z);
425            d_params[[i, 0]] = -(2.0 * cdf_z - 1.0);
426        }
427
428        d_params
429    }
430
431    fn metric(&self) -> Array3<f64> {
432        let sqrt_pi = std::f64::consts::PI.sqrt();
433        let n_obs = self.loc.len();
434        let mut fi = Array3::zeros((n_obs, 1, 1));
435
436        for i in 0..n_obs {
437            fi[[i, 0, 0]] = 2.0 / (2.0 * sqrt_pi);
438        }
439
440        fi
441    }
442}
443
444// ============================================================================
445// NormalFixedMean - Normal distribution with fixed mean = 0
446// ============================================================================
447
448/// Normal distribution with mean fixed at 0.
449///
450/// Has one parameter: log(scale).
451#[derive(Debug, Clone)]
452pub struct NormalFixedMean {
453    /// The location parameter (fixed at 0.0).
454    pub loc: Array1<f64>,
455    /// The scale parameter.
456    pub scale: Array1<f64>,
457    /// The variance.
458    pub var: Array1<f64>,
459    /// The parameters of the distribution.
460    _params: Array2<f64>,
461}
462
463impl Distribution for NormalFixedMean {
464    fn from_params(params: &Array2<f64>) -> Self {
465        let scale = params.column(0).mapv(f64::exp);
466        let var = &scale * &scale;
467        let n = scale.len();
468        let loc = Array1::zeros(n);
469        NormalFixedMean {
470            loc,
471            scale,
472            var,
473            _params: params.clone(),
474        }
475    }
476
477    fn fit(y: &Array1<f64>) -> Array1<f64> {
478        let std_dev = y.std(0.0).max(1e-6);
479        array![std_dev.ln()]
480    }
481
482    fn n_params(&self) -> usize {
483        1
484    }
485
486    fn predict(&self) -> Array1<f64> {
487        self.loc.clone()
488    }
489
490    fn params(&self) -> &Array2<f64> {
491        &self._params
492    }
493}
494
495impl RegressionDistn for NormalFixedMean {}
496
497impl DistributionMethods for NormalFixedMean {
498    fn mean(&self) -> Array1<f64> {
499        self.loc.clone()
500    }
501
502    fn variance(&self) -> Array1<f64> {
503        self.var.clone()
504    }
505
506    fn std(&self) -> Array1<f64> {
507        self.scale.clone()
508    }
509
510    fn pdf(&self, y: &Array1<f64>) -> Array1<f64> {
511        let mut result = Array1::zeros(y.len());
512        for i in 0..y.len() {
513            let d = NormalDist::new(self.loc[i], self.scale[i]).unwrap();
514            result[i] = d.pdf(y[i]);
515        }
516        result
517    }
518
519    fn cdf(&self, y: &Array1<f64>) -> Array1<f64> {
520        let mut result = Array1::zeros(y.len());
521        for i in 0..y.len() {
522            let d = NormalDist::new(self.loc[i], self.scale[i]).unwrap();
523            result[i] = d.cdf(y[i]);
524        }
525        result
526    }
527
528    fn ppf(&self, q: &Array1<f64>) -> Array1<f64> {
529        let mut result = Array1::zeros(q.len());
530        for i in 0..q.len() {
531            let d = NormalDist::new(self.loc[i], self.scale[i]).unwrap();
532            result[i] = d.inverse_cdf(q[i]);
533        }
534        result
535    }
536
537    fn sample(&self, n_samples: usize) -> Array2<f64> {
538        let n_obs = self.loc.len();
539        let mut samples = Array2::zeros((n_samples, n_obs));
540        let mut rng = rand::rng();
541
542        for i in 0..n_obs {
543            let d = NormalDist::new(self.loc[i], self.scale[i]).unwrap();
544            for s in 0..n_samples {
545                let u: f64 = rng.random();
546                samples[[s, i]] = d.inverse_cdf(u);
547            }
548        }
549        samples
550    }
551}
552
553impl Scorable<LogScore> for NormalFixedMean {
554    fn score(&self, y: &Array1<f64>) -> Array1<f64> {
555        let mut scores = Array1::zeros(y.len());
556        for (i, &y_i) in y.iter().enumerate() {
557            let d = NormalDist::new(self.loc[i], self.scale[i]).unwrap();
558            scores[i] = -d.ln_pdf(y_i);
559        }
560        scores
561    }
562
563    fn d_score(&self, y: &Array1<f64>) -> Array2<f64> {
564        let n_obs = y.len();
565        let mut d_params = Array2::zeros((n_obs, 1));
566
567        for i in 0..n_obs {
568            let err = self.loc[i] - y[i];
569            // d/d(log(scale)) = 1 - (loc - y)^2 / var
570            d_params[[i, 0]] = 1.0 - (err * err) / self.var[i];
571        }
572
573        d_params
574    }
575
576    fn metric(&self) -> Array3<f64> {
577        let n_obs = self.loc.len();
578        let mut fi = Array3::zeros((n_obs, 1, 1));
579
580        for i in 0..n_obs {
581            fi[[i, 0, 0]] = 2.0;
582        }
583
584        fi
585    }
586}
587
588impl Scorable<CRPScore> for NormalFixedMean {
589    fn score(&self, y: &Array1<f64>) -> Array1<f64> {
590        let std_normal = NormalDist::new(0.0, 1.0).unwrap();
591        let sqrt_pi = std::f64::consts::PI.sqrt();
592
593        let mut scores = Array1::zeros(y.len());
594        for i in 0..y.len() {
595            let z = (y[i] - self.loc[i]) / self.scale[i];
596            let pdf_z = std_normal.pdf(z);
597            let cdf_z = std_normal.cdf(z);
598            scores[i] = self.scale[i] * (z * (2.0 * cdf_z - 1.0) + 2.0 * pdf_z - 1.0 / sqrt_pi);
599        }
600        scores
601    }
602
603    fn d_score(&self, y: &Array1<f64>) -> Array2<f64> {
604        let std_normal = NormalDist::new(0.0, 1.0).unwrap();
605        let sqrt_pi = std::f64::consts::PI.sqrt();
606        let n_obs = y.len();
607        let mut d_params = Array2::zeros((n_obs, 1));
608
609        for i in 0..n_obs {
610            let z = (y[i] - self.loc[i]) / self.scale[i];
611            let pdf_z = std_normal.pdf(z);
612            let cdf_z = std_normal.cdf(z);
613            let score_i = self.scale[i] * (z * (2.0 * cdf_z - 1.0) + 2.0 * pdf_z - 1.0 / sqrt_pi);
614            let d_loc = -(2.0 * cdf_z - 1.0);
615            d_params[[i, 0]] = score_i + (y[i] - self.loc[i]) * d_loc;
616        }
617
618        d_params
619    }
620
621    fn metric(&self) -> Array3<f64> {
622        let sqrt_pi = std::f64::consts::PI.sqrt();
623        let n_obs = self.loc.len();
624        let mut fi = Array3::zeros((n_obs, 1, 1));
625
626        for i in 0..n_obs {
627            fi[[i, 0, 0]] = self.var[i] / (2.0 * sqrt_pi);
628        }
629
630        fi
631    }
632}
633
634#[cfg(test)]
635mod tests {
636    use super::*;
637    use approx::assert_relative_eq;
638
639    #[test]
640    fn test_normal_distribution_methods() {
641        let params =
642            Array2::from_shape_vec((3, 2), vec![0.0, 0.0, 1.0, 0.0, 2.0, 1.0_f64.ln()]).unwrap();
643        let dist = Normal::from_params(&params);
644
645        // Test mean
646        let mean = dist.mean();
647        assert_relative_eq!(mean[0], 0.0, epsilon = 1e-10);
648        assert_relative_eq!(mean[1], 1.0, epsilon = 1e-10);
649        assert_relative_eq!(mean[2], 2.0, epsilon = 1e-10);
650
651        // Test variance
652        let var = dist.variance();
653        assert_relative_eq!(var[0], 1.0, epsilon = 1e-10);
654        assert_relative_eq!(var[1], 1.0, epsilon = 1e-10);
655        assert_relative_eq!(var[2], 1.0, epsilon = 1e-10);
656
657        // Test CDF
658        let y = Array1::from_vec(vec![0.0, 1.0, 2.0]);
659        let cdf = dist.cdf(&y);
660        assert_relative_eq!(cdf[0], 0.5, epsilon = 1e-10);
661        assert_relative_eq!(cdf[1], 0.5, epsilon = 1e-10);
662        assert_relative_eq!(cdf[2], 0.5, epsilon = 1e-10);
663
664        // Test PPF (inverse CDF)
665        let q = Array1::from_vec(vec![0.5, 0.5, 0.5]);
666        let ppf = dist.ppf(&q);
667        assert_relative_eq!(ppf[0], 0.0, epsilon = 1e-10);
668        assert_relative_eq!(ppf[1], 1.0, epsilon = 1e-10);
669        assert_relative_eq!(ppf[2], 2.0, epsilon = 1e-10);
670
671        // Test interval
672        let (lower, upper) = dist.interval(0.05);
673        assert!(lower[0] < 0.0);
674        assert!(upper[0] > 0.0);
675    }
676
677    #[test]
678    fn test_normal_sample() {
679        let params = Array2::from_shape_vec((2, 2), vec![0.0, 0.0, 5.0, 1.0_f64.ln()]).unwrap();
680        let dist = Normal::from_params(&params);
681
682        let samples = dist.sample(1000);
683        assert_eq!(samples.shape(), &[1000, 2]);
684
685        // Check that samples have approximately correct mean
686        let sample_mean_0: f64 = samples.column(0).iter().sum::<f64>() / samples.nrows() as f64;
687        let sample_mean_1: f64 = samples.column(1).iter().sum::<f64>() / samples.nrows() as f64;
688
689        assert!((sample_mean_0 - 0.0).abs() < 0.2);
690        assert!((sample_mean_1 - 5.0).abs() < 0.2);
691    }
692
693    #[test]
694    fn test_normal_fit() {
695        let y = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
696        let params = Normal::fit(&y);
697        assert_eq!(params.len(), 2);
698        assert_relative_eq!(params[0], 3.0, epsilon = 1e-10); // mean
699    }
700
701    #[test]
702    fn test_normal_fixed_var_distribution_methods() {
703        let params = Array2::from_shape_vec((2, 1), vec![0.0, 5.0]).unwrap();
704        let dist = NormalFixedVar::from_params(&params);
705
706        assert_relative_eq!(dist.mean()[0], 0.0, epsilon = 1e-10);
707        assert_relative_eq!(dist.mean()[1], 5.0, epsilon = 1e-10);
708        assert_relative_eq!(dist.variance()[0], 1.0, epsilon = 1e-10);
709        assert_relative_eq!(dist.variance()[1], 1.0, epsilon = 1e-10);
710    }
711
712    #[test]
713    fn test_normal_fixed_mean_distribution_methods() {
714        // params = [log(scale)] for NormalFixedMean
715        // First obs: log(scale) = 0, so scale = 1
716        // Second obs: log(scale) = 1, so scale = e
717        let params = Array2::from_shape_vec((2, 1), vec![0.0, 1.0]).unwrap();
718        let dist = NormalFixedMean::from_params(&params);
719
720        assert_relative_eq!(dist.mean()[0], 0.0, epsilon = 1e-10);
721        assert_relative_eq!(dist.mean()[1], 0.0, epsilon = 1e-10);
722        assert_relative_eq!(dist.std()[0], 1.0, epsilon = 1e-10);
723        assert_relative_eq!(dist.std()[1], std::f64::consts::E, epsilon = 1e-10);
724    }
725}