use crate::distributions::multivariate::normal::MultivariateNormal;
use crate::error::StatsResult;
use crate::sampling::SampleableDistribution;
use scirs2_core::ndarray::{s, Array1, Array2, ArrayBase, ArrayView1, ArrayView2, Data, Ix1, Ix2};
use std::fmt::Debug;
#[derive(Debug, Clone)]
pub struct MultivariateLognormal {
pub mu: Array1<f64>,
pub sigma: Array2<f64>,
pub dim: usize,
mvn: MultivariateNormal,
}
impl MultivariateLognormal {
pub fn new<D1, D2>(mu: ArrayBase<D1, Ix1>, sigma: ArrayBase<D2, Ix2>) -> StatsResult<Self>
where
D1: Data<Elem = f64>,
D2: Data<Elem = f64>,
{
let mvn = MultivariateNormal::new(mu.to_owned(), sigma.to_owned())?;
let dim = mvn.dim();
Ok(MultivariateLognormal {
mu: mu.to_owned(),
sigma: sigma.to_owned(),
dim,
mvn,
})
}
pub fn pdf<D>(&self, x: &ArrayBase<D, Ix1>) -> f64
where
D: Data<Elem = f64>,
{
if x.len() != self.dim {
return 0.0; }
for &xi in x.iter() {
if xi <= 0.0 {
return 0.0; }
}
let log_x = x.mapv(|xi| xi.ln());
let normal_pdf = self.mvn.pdf(&log_x);
let jacobian_factor = x.iter().fold(1.0, |acc, &xi| acc * xi);
normal_pdf / jacobian_factor
}
pub fn logpdf<D>(&self, x: &ArrayBase<D, Ix1>) -> f64
where
D: Data<Elem = f64>,
{
if x.len() != self.dim {
return f64::NEG_INFINITY; }
for &xi in x.iter() {
if xi <= 0.0 {
return f64::NEG_INFINITY; }
}
let log_x = x.mapv(|xi| xi.ln());
let normal_logpdf = self.mvn.logpdf(&log_x);
let sum_log_x = x.iter().fold(0.0, |acc, &xi| acc + xi.ln());
normal_logpdf - sum_log_x
}
pub fn rvs(&self, size: usize) -> StatsResult<Array2<f64>> {
let normal_samples = self.mvn.rvs(size)?;
let lognormal_samples = normal_samples.mapv(|x| x.exp());
Ok(lognormal_samples)
}
pub fn rvs_single(&self) -> StatsResult<Array1<f64>> {
let normal_sample = self.mvn.rvs_single()?;
let lognormal_sample = normal_sample.mapv(|x| x.exp());
Ok(lognormal_sample)
}
pub fn mean(&self) -> Array1<f64> {
let mut mean = Array1::zeros(self.dim);
for i in 0..self.dim {
mean[i] = (self.mu[i] + self.sigma[[i, i]] / 2.0).exp();
}
mean
}
pub fn median(&self) -> Array1<f64> {
self.mu.mapv(|mu_i| mu_i.exp())
}
pub fn mode(&self) -> Array1<f64> {
let mut mode = Array1::zeros(self.dim);
for i in 0..self.dim {
mode[i] = (self.mu[i] - self.sigma[[i, i]]).exp();
}
mode
}
pub fn cov(&self) -> Array2<f64> {
let mut cov = Array2::zeros((self.dim, self.dim));
for i in 0..self.dim {
for j in 0..self.dim {
let mean_i = (self.mu[i] + self.sigma[[i, i]] / 2.0).exp();
let mean_j = (self.mu[j] + self.sigma[[j, j]] / 2.0).exp();
let term = (self.sigma[[i, j]]).exp() - 1.0;
cov[[i, j]] = mean_i * mean_j * term;
}
}
cov
}
pub fn dim(&self) -> usize {
self.dim
}
pub fn mu(&self) -> ArrayView1<f64> {
self.mu.view()
}
pub fn sigma(&self) -> ArrayView2<f64> {
self.sigma.view()
}
}
#[allow(dead_code)]
pub fn multivariate_lognormal<D1, D2>(
mu: ArrayBase<D1, Ix1>,
sigma: ArrayBase<D2, Ix2>,
) -> StatsResult<MultivariateLognormal>
where
D1: Data<Elem = f64>,
D2: Data<Elem = f64>,
{
MultivariateLognormal::new(mu, sigma)
}
impl SampleableDistribution<Array1<f64>> for MultivariateLognormal {
fn rvs(&self, size: usize) -> StatsResult<Vec<Array1<f64>>> {
let samples_matrix = self.rvs(size)?;
let mut result = Vec::with_capacity(size);
for i in 0..size {
let row = samples_matrix.slice(s![i, ..]).to_owned();
result.push(row);
}
Ok(result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use scirs2_core::ndarray::{array, Axis};
#[test]
fn test_mvln_creation() {
let mu = array![0.0, 0.0];
let sigma = array![[0.5, 0.0], [0.0, 0.5]];
let mvln = MultivariateLognormal::new(mu.clone(), sigma.clone()).expect("Operation failed");
assert_eq!(mvln.dim, 2);
assert_eq!(mvln.mu, mu);
assert_eq!(mvln.sigma, sigma);
let mu3 = array![1.0, 2.0, 3.0];
let sigma3 = array![[0.5, 0.1, 0.1], [0.1, 0.5, 0.1], [0.1, 0.1, 0.5]];
let mvln3 =
MultivariateLognormal::new(mu3.clone(), sigma3.clone()).expect("Operation failed");
assert_eq!(mvln3.dim, 3);
assert_eq!(mvln3.mu, mu3);
assert_eq!(mvln3.sigma, sigma3);
}
#[test]
fn test_mvln_creation_errors() {
let mu = array![0.0, 0.0, 0.0];
let sigma = array![[0.5, 0.0], [0.0, 0.5]];
assert!(MultivariateLognormal::new(mu, sigma).is_err());
let mu = array![0.0, 0.0];
let sigma = array![[1.0, 2.0], [2.0, 1.0]]; assert!(MultivariateLognormal::new(mu, sigma).is_err());
}
#[test]
fn test_mvln_pdf() {
let mu = array![0.0, 0.0];
let sigma = array![[0.5, 0.0], [0.0, 0.5]];
let mvln = MultivariateLognormal::new(mu, sigma).expect("Operation failed");
let x1 = array![1.0, 1.0];
let pdf1 = mvln.pdf(&x1);
assert!(pdf1 > 0.0);
let x2 = array![-1.0, 1.0];
let pdf2 = mvln.pdf(&x2);
assert_eq!(pdf2, 0.0);
let x3 = array![0.0, 1.0];
let pdf3 = mvln.pdf(&x3);
assert_eq!(pdf3, 0.0);
let x4 = array![1.0, 1.0, 1.0];
let pdf4 = mvln.pdf(&x4);
assert_eq!(pdf4, 0.0);
}
#[test]
fn test_mvln_logpdf() {
let mu = array![0.0, 0.0];
let sigma = array![[0.5, 0.0], [0.0, 0.5]];
let mvln = MultivariateLognormal::new(mu, sigma).expect("Operation failed");
let x1 = array![1.0, 1.0];
let pdf1 = mvln.pdf(&x1);
let logpdf1 = mvln.logpdf(&x1);
assert_relative_eq!(logpdf1.exp(), pdf1, epsilon = 1e-10);
let x2 = array![-1.0, 1.0];
let logpdf2 = mvln.logpdf(&x2);
assert_eq!(logpdf2, f64::NEG_INFINITY);
}
#[test]
fn test_mvln_statistics() {
let mu = array![0.0, 0.0];
let sigma = array![[0.5, 0.0], [0.0, 0.5]];
let mvln = MultivariateLognormal::new(mu, sigma).expect("Operation failed");
let mean = mvln.mean();
let expected_mean = array![(0.5_f64 / 2.0).exp(), (0.5_f64 / 2.0).exp()];
assert_relative_eq!(mean[0], expected_mean[0], epsilon = 1e-10);
assert_relative_eq!(mean[1], expected_mean[1], epsilon = 1e-10);
let median = mvln.median();
let expected_median = array![1.0, 1.0]; assert_relative_eq!(median[0], expected_median[0], epsilon = 1e-10);
assert_relative_eq!(median[1], expected_median[1], epsilon = 1e-10);
let mode = mvln.mode();
let expected_mode = array![(-0.5_f64).exp(), (-0.5_f64).exp()]; assert_relative_eq!(mode[0], expected_mode[0], epsilon = 1e-10);
assert_relative_eq!(mode[1], expected_mode[1], epsilon = 1e-10);
let cov = mvln.cov();
let mean_i = (0.5_f64 / 2.0).exp();
let var0 = mean_i * mean_i * ((0.5_f64).exp() - 1.0);
assert_relative_eq!(cov[[0, 0]], var0, epsilon = 1e-10);
assert_relative_eq!(cov[[1, 1]], var0, epsilon = 1e-10);
assert_relative_eq!(cov[[0, 1]], 0.0, epsilon = 1e-10);
assert_relative_eq!(cov[[1, 0]], 0.0, epsilon = 1e-10);
}
#[test]
fn test_mvln_rvs() {
let mu = array![0.0, 0.0];
let sigma = array![[0.5, 0.2], [0.2, 0.5]];
let mvln = MultivariateLognormal::new(mu, sigma).expect("Operation failed");
let n_samples_ = 1000;
let samples = mvln.rvs(n_samples_).expect("Operation failed");
assert_eq!(samples.shape(), &[n_samples_, 2]);
for i in 0..n_samples_ {
for j in 0..2 {
assert!(samples[[i, j]] > 0.0);
}
}
let sample_mean = samples.mean_axis(Axis(0)).expect("Operation failed");
let expected_mean = mvln.mean();
assert_relative_eq!(sample_mean[0], expected_mean[0], epsilon = 0.2);
assert_relative_eq!(sample_mean[1], expected_mean[1], epsilon = 0.2);
let single_sample = mvln.rvs_single().expect("Operation failed");
assert_eq!(single_sample.len(), 2);
assert!(single_sample[0] > 0.0);
assert!(single_sample[1] > 0.0);
}
}