#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use crate::consts::*;
use crate::impl_display;
use crate::traits::*;
use rand::Rng;
use special::Error as _;
use std::f64::consts::SQRT_2;
use std::fmt;
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct LogNormal {
mu: f64,
sigma: f64,
}
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum LogNormalError {
MuNotFinite { mu: f64 },
SigmaTooLow { sigma: f64 },
SigmaNotFinite { sigma: f64 },
}
impl LogNormal {
#[inline]
pub fn new(mu: f64, sigma: f64) -> Result<Self, LogNormalError> {
if !mu.is_finite() {
Err(LogNormalError::MuNotFinite { mu })
} else if sigma <= 0.0 {
Err(LogNormalError::SigmaTooLow { sigma })
} else if !sigma.is_finite() {
Err(LogNormalError::SigmaNotFinite { sigma })
} else {
Ok(LogNormal { mu, sigma })
}
}
#[inline]
pub fn new_unchecked(mu: f64, sigma: f64) -> Self {
LogNormal { mu, sigma }
}
#[inline]
pub fn standard() -> Self {
LogNormal {
mu: 0.0,
sigma: 1.0,
}
}
#[inline]
pub fn mu(&self) -> f64 {
self.mu
}
#[inline]
pub fn set_mu(&mut self, mu: f64) -> Result<(), LogNormalError> {
if !mu.is_finite() {
Err(LogNormalError::MuNotFinite { mu })
} else {
self.set_mu_unchecked(mu);
Ok(())
}
}
#[inline]
pub fn set_mu_unchecked(&mut self, mu: f64) {
self.mu = mu;
}
#[inline]
pub fn sigma(&self) -> f64 {
self.sigma
}
#[inline]
pub fn set_sigma(&mut self, sigma: f64) -> Result<(), LogNormalError> {
if sigma <= 0.0 {
Err(LogNormalError::SigmaTooLow { sigma })
} else if !sigma.is_finite() {
Err(LogNormalError::SigmaNotFinite { sigma })
} else {
self.set_sigma_unchecked(sigma);
Ok(())
}
}
#[inline]
pub fn set_sigma_unchecked(&mut self, sigma: f64) {
self.sigma = sigma;
}
}
impl Default for LogNormal {
fn default() -> Self {
LogNormal::standard()
}
}
impl From<&LogNormal> for String {
fn from(lognorm: &LogNormal) -> String {
format!("LogNormal(μ: {}, σ: {})", lognorm.mu, lognorm.sigma)
}
}
impl_display!(LogNormal);
macro_rules! impl_traits {
($kind: ty) => {
impl Rv<$kind> for LogNormal {
fn ln_f(&self, x: &$kind) -> f64 {
let xk = f64::from(*x);
let xk_ln = xk.ln();
let d = (xk_ln - self.mu) / self.sigma;
-xk_ln - self.sigma.ln() - HALF_LN_2PI - 0.5 * d * d
}
fn draw<R: Rng>(&self, rng: &mut R) -> $kind {
let g =
rand_distr::LogNormal::new(self.mu, self.sigma).unwrap();
rng.sample(g) as $kind
}
fn sample<R: Rng>(&self, n: usize, rng: &mut R) -> Vec<$kind> {
let g =
rand_distr::LogNormal::new(self.mu, self.sigma).unwrap();
(0..n).map(|_| rng.sample(g) as $kind).collect()
}
}
impl ContinuousDistr<$kind> for LogNormal {}
impl Support<$kind> for LogNormal {
fn supports(&self, x: &$kind) -> bool {
*x > 0.0 && x.is_finite()
}
}
impl Cdf<$kind> for LogNormal {
fn cdf(&self, x: &$kind) -> f64 {
let xk = f64::from(*x);
0.5 + 0.5
* ((xk.ln() - self.mu) / (SQRT_2 * self.sigma)).error()
}
}
impl InverseCdf<$kind> for LogNormal {
fn invcdf(&self, p: f64) -> $kind {
(self.mu + SQRT_2 * self.sigma * (2.0 * p - 1.0).inv_error())
.exp() as $kind
}
}
impl Mean<$kind> for LogNormal {
fn mean(&self) -> Option<$kind> {
Some((self.mu + self.sigma * self.sigma / 2.0).exp() as $kind)
}
}
impl Median<$kind> for LogNormal {
fn median(&self) -> Option<$kind> {
Some(self.mu.exp() as $kind)
}
}
impl Mode<$kind> for LogNormal {
fn mode(&self) -> Option<$kind> {
Some((self.mu - self.sigma * self.sigma) as $kind)
}
}
};
}
impl Variance<f64> for LogNormal {
fn variance(&self) -> Option<f64> {
Some(
((self.sigma * self.sigma).exp() - 1.0)
* (2.0 * self.mu + self.sigma * self.sigma).exp(),
)
}
}
impl Entropy for LogNormal {
fn entropy(&self) -> f64 {
(self.mu + 0.5) + self.sigma.ln() + HALF_LN_2PI
}
}
impl Skewness for LogNormal {
fn skewness(&self) -> Option<f64> {
let e_sigma_2 = (self.sigma * self.sigma).exp();
Some((e_sigma_2 + 2.0) * (e_sigma_2 - 1.0).sqrt())
}
}
impl Kurtosis for LogNormal {
fn kurtosis(&self) -> Option<f64> {
let s2 = self.sigma * self.sigma;
Some(
(4.0 * s2).exp() + 2.0 * (3.0 * s2).exp() + 3.0 * (2.0 * s2).exp()
- 6.0,
)
}
}
impl_traits!(f32);
impl_traits!(f64);
impl std::error::Error for LogNormalError {}
impl fmt::Display for LogNormalError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::MuNotFinite { mu } => write!(f, "non-finite mu: {}", mu),
Self::SigmaTooLow { sigma } => {
write!(f, "sigma ({}) must be greater than zero", sigma)
}
Self::SigmaNotFinite { sigma } => {
write!(f, "non-finite sigma: {}", sigma)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_basic_impls;
use std::f64;
const TOL: f64 = 1E-12;
test_basic_impls!([continuous] LogNormal::default());
#[test]
fn new() {
let lognorm = LogNormal::new(1.2, 3.0).unwrap();
assert::close(lognorm.mu, 1.2, TOL);
assert::close(lognorm.sigma, 3.0, TOL);
}
#[test]
fn mean() {
let mu = 3.0;
let sigma = 2.0;
let mean: f64 = LogNormal::new(mu, sigma).unwrap().mean().unwrap();
assert::close(mean, 5.0_f64.exp(), TOL);
}
#[test]
fn median_should_be_exp_mu() {
let mu = 3.4;
let median: f64 = LogNormal::new(mu, 0.5).unwrap().median().unwrap();
assert::close(median, mu.exp(), TOL);
}
#[test]
fn mode() {
let mode: f64 = LogNormal::new(4.0, 2.0).unwrap().mode().unwrap();
assert::close(mode, 0.0, TOL);
}
#[test]
fn variance() {
let lognorm_1 = LogNormal::new(3.4, 1.0).unwrap();
let lognorm_2 = LogNormal::new(1.0, 3.0).unwrap();
assert::close(
lognorm_1.variance().unwrap(),
(1.0_f64.exp() - 1.0) * 7.8_f64.exp(),
TOL,
);
assert::close(
lognorm_2.variance().unwrap(),
(9.0_f64.exp() - 1.0) * 11.0_f64.exp(),
TOL,
);
}
#[test]
fn draws_should_be_finite() {
let mut rng = rand::thread_rng();
let lognorm = LogNormal::standard();
for _ in 0..100 {
let x: f64 = lognorm.draw(&mut rng);
assert!(x.is_finite())
}
}
#[test]
fn sample_length() {
let mut rng = rand::thread_rng();
let lognorm = LogNormal::standard();
let xs: Vec<f64> = lognorm.sample(10, &mut rng);
assert_eq!(xs.len(), 10);
}
#[test]
fn standard_ln_pdf_at_one() {
let lognorm = LogNormal::standard();
assert::close(lognorm.ln_pdf(&1.0_f64), -0.918_938_533_204_672_7, TOL);
}
#[test]
fn standard_ln_pdf_at_e() {
let lognorm = LogNormal::standard();
assert::close(
lognorm.ln_pdf(&f64::consts::E),
-2.4189385332046727,
TOL,
);
}
#[test]
fn should_contain_positve_finite_values() {
let lognorm = LogNormal::standard();
assert!(lognorm.supports(&1E-8_f32));
assert!(lognorm.supports(&10E8_f64));
}
#[test]
fn should_not_contain_negative_or_zero() {
let lognorm = LogNormal::standard();
assert!(!lognorm.supports(&-1.0_f64));
assert!(!lognorm.supports(&0.0_f64));
}
#[test]
fn should_not_contain_nan() {
let lognorm = LogNormal::standard();
assert!(!lognorm.supports(&f64::NAN));
}
#[test]
fn should_not_contain_positive_or_negative_infinity() {
let lognorm = LogNormal::standard();
assert!(!lognorm.supports(&f64::INFINITY));
assert!(!lognorm.supports(&f64::NEG_INFINITY));
}
#[test]
fn skewness() {
let lognorm = LogNormal::new(-1.2, 3.4).unwrap();
assert::close(lognorm.skewness().unwrap(), 33_936_928.306_623_81, TOL);
}
#[test]
fn kurtosis() {
let lognorm = LogNormal::new(-1.2, 1.0).unwrap();
assert::close(lognorm.kurtosis().unwrap(), 110.93639217631153, TOL);
}
#[test]
fn cdf_standard_at_one_should_be_one_half() {
let lognorm1 = LogNormal::new(0.0, 1.0).unwrap();
assert::close(lognorm1.cdf(&1.0_f64), 0.5, TOL);
}
#[test]
fn cdf_standard_value_at_two() {
let lognorm = LogNormal::standard();
assert::close(lognorm.cdf(&2.0_f64), 0.755_891_404_214_417_3, TOL);
}
#[test]
fn quantile_agree_with_cdf() {
let mut rng = rand::thread_rng();
let lognorm = LogNormal::standard();
let xs: Vec<f64> = lognorm.sample(100, &mut rng);
xs.iter().for_each(|x| {
let p = lognorm.cdf(x);
let y: f64 = lognorm.quantile(p);
assert::close(y, *x, TOL);
})
}
#[test]
fn entropy() {
let lognorm = LogNormal::new(1.2, 3.4).unwrap();
assert::close(lognorm.entropy(), 3.8427139648267885, TOL);
}
#[test]
fn entropy_standard() {
let lognorm = LogNormal::standard();
assert::close(lognorm.entropy(), 1.4189385332046727, TOL);
}
}