#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use crate::consts::HALF_LN_2PI;
use crate::impl_display;
use crate::traits::{
Cdf, ContinuousDistr, Entropy, HasDensity, InverseCdf, Kurtosis, Mean,
Median, Mode, Parameterized, Sampleable, Scalable, Shiftable, Skewness,
Support, Variance,
};
use rand::Rng;
use special::Error as _;
use std::f64::consts::SQRT_2;
use std::fmt;
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct LogNormal {
mu: f64,
sigma: f64,
}
crate::impl_shiftable!(LogNormal);
impl Scalable for LogNormal {
type Output = LogNormal;
type Error = LogNormalError;
fn scaled(self, scale: f64) -> Result<Self::Output, Self::Error>
where
Self: Sized,
{
LogNormal::new(self.mu() + scale.ln(), self.sigma())
}
fn scaled_unchecked(self, scale: f64) -> Self::Output
where
Self: Sized,
{
LogNormal::new_unchecked(self.mu() + scale.ln(), self.sigma())
}
}
pub struct LogNormalParameters {
pub mu: f64,
pub sigma: f64,
}
impl Parameterized for LogNormal {
type Parameters = LogNormalParameters;
fn emit_params(&self) -> Self::Parameters {
Self::Parameters {
mu: self.mu(),
sigma: self.sigma(),
}
}
fn from_params(params: Self::Parameters) -> Self {
Self::new_unchecked(params.mu, params.sigma)
}
}
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde1", serde(rename_all = "snake_case"))]
pub enum LogNormalError {
MuNotFinite { mu: f64 },
SigmaTooLow { sigma: f64 },
SigmaNotFinite { sigma: f64 },
}
impl LogNormal {
#[inline]
pub fn new(mu: f64, sigma: f64) -> Result<Self, LogNormalError> {
if !mu.is_finite() {
Err(LogNormalError::MuNotFinite { mu })
} else if sigma <= 0.0 {
Err(LogNormalError::SigmaTooLow { sigma })
} else if !sigma.is_finite() {
Err(LogNormalError::SigmaNotFinite { sigma })
} else {
Ok(LogNormal { mu, sigma })
}
}
#[inline]
#[must_use]
pub fn new_unchecked(mu: f64, sigma: f64) -> Self {
LogNormal { mu, sigma }
}
#[inline]
#[must_use]
pub fn standard() -> Self {
LogNormal {
mu: 0.0,
sigma: 1.0,
}
}
#[inline]
#[must_use]
pub fn mu(&self) -> f64 {
self.mu
}
#[inline]
pub fn set_mu(&mut self, mu: f64) -> Result<(), LogNormalError> {
if mu.is_finite() {
self.set_mu_unchecked(mu);
Ok(())
} else {
Err(LogNormalError::MuNotFinite { mu })
}
}
#[inline]
pub fn set_mu_unchecked(&mut self, mu: f64) {
self.mu = mu;
}
#[inline]
#[must_use]
pub fn sigma(&self) -> f64 {
self.sigma
}
#[inline]
pub fn set_sigma(&mut self, sigma: f64) -> Result<(), LogNormalError> {
if sigma <= 0.0 {
Err(LogNormalError::SigmaTooLow { sigma })
} else if !sigma.is_finite() {
Err(LogNormalError::SigmaNotFinite { sigma })
} else {
self.set_sigma_unchecked(sigma);
Ok(())
}
}
#[inline]
pub fn set_sigma_unchecked(&mut self, sigma: f64) {
self.sigma = sigma;
}
}
impl Default for LogNormal {
fn default() -> Self {
LogNormal::standard()
}
}
impl From<&LogNormal> for String {
fn from(lognorm: &LogNormal) -> String {
format!("LogNormal(μ: {}, σ: {})", lognorm.mu, lognorm.sigma)
}
}
impl_display!(LogNormal);
macro_rules! impl_traits {
($kind: ty) => {
impl HasDensity<$kind> for LogNormal {
fn ln_f(&self, x: &$kind) -> f64 {
let xk = f64::from(*x);
let xk_ln = xk.ln();
let d = (xk_ln - self.mu) / self.sigma;
(0.5 * d).mul_add(-d, -xk_ln - self.sigma.ln() - HALF_LN_2PI)
}
}
impl Sampleable<$kind> for LogNormal {
fn draw<R: Rng>(&self, rng: &mut R) -> $kind {
let g =
rand_distr::LogNormal::new(self.mu, self.sigma).unwrap();
rng.sample(g) as $kind
}
fn sample<R: Rng>(&self, n: usize, rng: &mut R) -> Vec<$kind> {
let g =
rand_distr::LogNormal::new(self.mu, self.sigma).unwrap();
(0..n).map(|_| rng.sample(g) as $kind).collect()
}
}
impl ContinuousDistr<$kind> for LogNormal {}
impl Support<$kind> for LogNormal {
fn supports(&self, x: &$kind) -> bool {
*x > 0.0 && x.is_finite()
}
}
impl Cdf<$kind> for LogNormal {
fn cdf(&self, x: &$kind) -> f64 {
let xk = f64::from(*x);
0.5_f64.mul_add(
((xk.ln() - self.mu) / (SQRT_2 * self.sigma)).error(),
0.5,
)
}
}
impl InverseCdf<$kind> for LogNormal {
fn invcdf(&self, p: f64) -> $kind {
SQRT_2
.mul_add(
self.sigma * 2.0_f64.mul_add(p, -1.0).inv_error(),
self.mu,
)
.exp() as $kind
}
}
impl Mean<$kind> for LogNormal {
fn mean(&self) -> Option<$kind> {
Some((self.mu + self.sigma * self.sigma / 2.0).exp() as $kind)
}
}
impl Median<$kind> for LogNormal {
fn median(&self) -> Option<$kind> {
Some(self.mu.exp() as $kind)
}
}
impl Mode<$kind> for LogNormal {
fn mode(&self) -> Option<$kind> {
Some((self.sigma.mul_add(-self.sigma, self.mu) as $kind).exp())
}
}
};
}
impl Variance<f64> for LogNormal {
fn variance(&self) -> Option<f64> {
Some(
(self.sigma * self.sigma).exp_m1()
* self.sigma.mul_add(self.sigma, 2.0 * self.mu).exp(),
)
}
}
impl Entropy for LogNormal {
fn entropy(&self) -> f64 {
(self.mu + 0.5) + self.sigma.ln() + HALF_LN_2PI
}
}
impl Skewness for LogNormal {
fn skewness(&self) -> Option<f64> {
let e_sigma_2 = (self.sigma * self.sigma).exp();
Some((e_sigma_2 + 2.0) * (e_sigma_2 - 1.0).sqrt())
}
}
impl Kurtosis for LogNormal {
fn kurtosis(&self) -> Option<f64> {
let s2 = self.sigma * self.sigma;
Some(
3.0_f64.mul_add(
(2.0 * s2).exp(),
(3.0 * s2).exp().mul_add(2.0, (4.0 * s2).exp()),
) - 6.0,
)
}
}
impl_traits!(f32);
impl_traits!(f64);
impl std::error::Error for LogNormalError {}
#[cfg_attr(coverage_nightly, coverage(off))]
impl fmt::Display for LogNormalError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::MuNotFinite { mu } => write!(f, "non-finite mu: {mu}"),
Self::SigmaTooLow { sigma } => {
write!(f, "sigma ({sigma}) must be greater than zero")
}
Self::SigmaNotFinite { sigma } => {
write!(f, "non-finite sigma: {sigma}")
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_basic_impls;
use proptest::prelude::*;
use std::f64;
const TOL: f64 = 1E-12;
test_basic_impls!(f64, LogNormal);
#[test]
fn new() {
let lognorm = LogNormal::new(1.2, 3.0).unwrap();
assert::close(lognorm.mu, 1.2, TOL);
assert::close(lognorm.sigma, 3.0, TOL);
}
#[test]
fn mean() {
let mu = 3.0;
let sigma = 2.0;
let mean: f64 = LogNormal::new(mu, sigma).unwrap().mean().unwrap();
assert::close(mean, 5.0_f64.exp(), TOL);
}
#[test]
fn median_should_be_exp_mu() {
let mu = 3.4;
let median: f64 = LogNormal::new(mu, 0.5).unwrap().median().unwrap();
assert::close(median, mu.exp(), TOL);
}
#[test]
fn variance() {
let lognorm_1 = LogNormal::new(3.4, 1.0).unwrap();
let lognorm_2 = LogNormal::new(1.0, 3.0).unwrap();
assert::close(
lognorm_1.variance().unwrap(),
1.0_f64.exp_m1() * 7.8_f64.exp(),
TOL,
);
assert::close(
lognorm_2.variance().unwrap(),
9.0_f64.exp_m1() * 11.0_f64.exp(),
TOL,
);
}
#[test]
fn draws_should_be_finite() {
let mut rng = rand::rng();
let lognorm = LogNormal::standard();
for _ in 0..100 {
let x: f64 = lognorm.draw(&mut rng);
assert!(x.is_finite());
}
}
#[test]
fn sample_length() {
let mut rng = rand::rng();
let lognorm = LogNormal::standard();
let xs: Vec<f64> = lognorm.sample(10, &mut rng);
assert_eq!(xs.len(), 10);
}
#[test]
fn standard_ln_pdf_at_one() {
let lognorm = LogNormal::standard();
assert::close(lognorm.ln_pdf(&1.0_f64), -0.918_938_533_204_672_7, TOL);
}
#[test]
fn standard_ln_pdf_at_e() {
let lognorm = LogNormal::standard();
assert::close(
lognorm.ln_pdf(&f64::consts::E),
-2.418_938_533_204_672_7,
TOL,
);
}
#[test]
fn should_contain_positive_finite_values() {
let lognorm = LogNormal::standard();
assert!(lognorm.supports(&1E-8_f32));
assert!(lognorm.supports(&10E8_f64));
}
#[test]
fn should_not_contain_negative_or_zero() {
let lognorm = LogNormal::standard();
assert!(!lognorm.supports(&-1.0_f64));
assert!(!lognorm.supports(&0.0_f64));
}
#[test]
fn should_not_contain_nan() {
let lognorm = LogNormal::standard();
assert!(!lognorm.supports(&f64::NAN));
}
#[test]
fn should_not_contain_positive_or_negative_infinity() {
let lognorm = LogNormal::standard();
assert!(!lognorm.supports(&f64::INFINITY));
assert!(!lognorm.supports(&f64::NEG_INFINITY));
}
#[test]
fn skewness() {
let lognorm = LogNormal::new(-1.2, 3.4).unwrap();
assert::close(lognorm.skewness().unwrap(), 33_936_928.306_623_81, TOL);
}
#[test]
fn kurtosis() {
let lognorm = LogNormal::new(-1.2, 1.0).unwrap();
assert::close(lognorm.kurtosis().unwrap(), 110.936_392_176_311_53, TOL);
}
#[test]
fn cdf_standard_at_one_should_be_one_half() {
let lognorm1 = LogNormal::new(0.0, 1.0).unwrap();
assert::close(lognorm1.cdf(&1.0_f64), 0.5, TOL);
}
#[test]
fn cdf_standard_value_at_two() {
let lognorm = LogNormal::standard();
assert::close(lognorm.cdf(&2.0_f64), 0.755_891_404_214_417_3, TOL);
}
proptest! {
#[test]
fn quantile_agree_with_cdf(p in 0.0..1.0) {
prop_assume!(p > 0.0);
prop_assume!(p < 1.0);
let lognorm = LogNormal::standard();
let y: f64 = lognorm.quantile(p);
let p2 = lognorm.cdf(&y);
assert::close(p, p2, TOL);
}
}
#[test]
fn entropy() {
let lognorm = LogNormal::new(1.2, 3.4).unwrap();
assert::close(lognorm.entropy(), 3.842_713_964_826_788_5, TOL);
}
#[test]
fn entropy_standard() {
let lognorm = LogNormal::standard();
assert::close(lognorm.entropy(), 1.418_938_533_204_672_7, TOL);
}
use crate::test_scalable_cdf;
use crate::test_scalable_density;
use crate::test_scalable_entropy;
use crate::test_scalable_method;
test_scalable_method!(LogNormal::new(2.0, 1.0).unwrap(), mean);
test_scalable_method!(LogNormal::new(2.0, 1.0).unwrap(), median);
test_scalable_method!(LogNormal::new(2.0, 1.0).unwrap(), mode);
test_scalable_method!(LogNormal::new(2.0, 1.0).unwrap(), variance);
test_scalable_method!(LogNormal::new(2.0, 1.0).unwrap(), skewness);
test_scalable_method!(LogNormal::new(2.0, 1.0).unwrap(), kurtosis);
test_scalable_density!(LogNormal::new(2.0, 1.0).unwrap());
test_scalable_entropy!(LogNormal::new(2.0, 1.0).unwrap());
test_scalable_cdf!(LogNormal::new(2.0, 1.0).unwrap());
}