use scirs2_core::ndarray::{Array2, ArrayView1, ArrayView2};
use scirs2_core::numeric::{Float, One, Zero};
use crate::decomposition::cholesky;
use crate::error::{LinalgError, LinalgResult};
use crate::random::random_normalmatrix;
use crate::stats::distributions::{MatrixNormalParams, WishartParams};
#[allow(dead_code)]
pub fn sample_multivariate_normal<F>(
mean: &ArrayView1<F>,
cov: &ArrayView2<F>,
n_samples: usize,
rng_seed: Option<u64>,
) -> LinalgResult<Array2<F>>
where
F: Float
+ Zero
+ One
+ Copy
+ std::fmt::Debug
+ scirs2_core::ndarray::ScalarOperand
+ scirs2_core::numeric::FromPrimitive
+ scirs2_core::numeric::NumAssign
+ std::iter::Sum
+ Send
+ Sync
+ 'static,
{
let p = mean.len();
if cov.nrows() != p || cov.ncols() != p {
return Err(LinalgError::ShapeError(format!(
"Covariance matrix must be {}x{}, got {:?}",
p,
p,
cov.shape()
)));
}
let z = random_normalmatrix::<F>((n_samples, p), rng_seed)?;
let l = cholesky(cov, None)?;
let mut samples = Array2::zeros((n_samples, p));
for i in 0..n_samples {
let z_row = z.row(i);
let transformed = l.t().dot(&z_row);
for j in 0..p {
samples[[i, j]] = mean[j] + transformed[j];
}
}
Ok(samples)
}
#[allow(dead_code)]
pub fn samplematrix_normal_multiple<F>(
params: &MatrixNormalParams<F>,
n_samples: usize,
rng_seed: Option<u64>,
) -> LinalgResult<Vec<Array2<F>>>
where
F: Float
+ Zero
+ One
+ Copy
+ std::fmt::Debug
+ scirs2_core::ndarray::ScalarOperand
+ scirs2_core::numeric::FromPrimitive
+ scirs2_core::numeric::NumAssign
+ std::iter::Sum
+ Send
+ Sync
+ 'static,
{
let mut samples = Vec::with_capacity(n_samples);
for i in 0..n_samples {
let seed = rng_seed.map(|s| s.wrapping_add(i as u64));
let sample = crate::stats::distributions::samplematrix_normal(params, seed)?;
samples.push(sample);
}
Ok(samples)
}
#[allow(dead_code)]
pub fn sample_wishart_multiple<F>(
params: &WishartParams<F>,
n_samples: usize,
rng_seed: Option<u64>,
) -> LinalgResult<Vec<Array2<F>>>
where
F: Float
+ Zero
+ One
+ Copy
+ std::fmt::Debug
+ scirs2_core::ndarray::ScalarOperand
+ scirs2_core::numeric::FromPrimitive
+ scirs2_core::numeric::NumAssign
+ std::iter::Sum
+ Send
+ Sync
+ 'static,
{
let mut samples = Vec::with_capacity(n_samples);
for i in 0..n_samples {
let _seed = rng_seed.map(|s| s.wrapping_add(i as u64));
let sample = crate::stats::distributions::sample_wishart(params, _seed)?;
samples.push(sample);
}
Ok(samples)
}
#[allow(dead_code)]
pub fn sample_inverse_wishart<F>(
scale: &ArrayView2<F>,
dof: F,
n_samples: usize,
rng_seed: Option<u64>,
) -> LinalgResult<Vec<Array2<F>>>
where
F: Float
+ Zero
+ One
+ Copy
+ std::fmt::Debug
+ std::fmt::Display
+ scirs2_core::ndarray::ScalarOperand
+ scirs2_core::numeric::FromPrimitive
+ scirs2_core::numeric::NumAssign
+ std::iter::Sum
+ Send
+ Sync,
{
let _p = scale.nrows();
if scale.nrows() != scale.ncols() {
return Err(LinalgError::ShapeError(
"Scale matrix must be square".to_string(),
));
}
let scale_inv = crate::basic::inv(scale, None)?;
let wishart_params = WishartParams::new(scale_inv, dof)?;
let mut samples = Vec::with_capacity(n_samples);
for i in 0..n_samples {
let _seed = rng_seed.map(|s| s.wrapping_add(i as u64));
let wishart_sample = crate::stats::distributions::sample_wishart(&wishart_params, _seed)?;
let inverse_wishart_sample = crate::basic::inv(&wishart_sample.view(), None)?;
samples.push(inverse_wishart_sample);
}
Ok(samples)
}
#[allow(dead_code)]
pub fn samplematrix_t<F>(
mean: &ArrayView2<F>,
row_cov: &ArrayView2<F>,
col_cov: &ArrayView2<F>,
dof: F,
n_samples: usize,
rng_seed: Option<u64>,
) -> LinalgResult<Vec<Array2<F>>>
where
F: Float
+ Zero
+ One
+ Copy
+ std::fmt::Debug
+ std::fmt::Display
+ scirs2_core::ndarray::ScalarOperand
+ scirs2_core::numeric::FromPrimitive
+ scirs2_core::numeric::NumAssign
+ std::iter::Sum
+ Send
+ Sync,
{
let (m, n) = mean.dim();
if row_cov.dim() != (m, m) || col_cov.dim() != (n, n) {
return Err(LinalgError::ShapeError(format!(
"Covariance matrices have incompatible dimensions: mean {:?}, row_cov {:?}, col_cov {:?}",
mean.shape(), row_cov.shape(), col_cov.shape()
)));
}
let mut samples = Vec::with_capacity(n_samples);
for i in 0..n_samples {
let _seed = rng_seed.map(|s| s.wrapping_add(i as u64));
let matrix_normal_params =
MatrixNormalParams::new(mean.to_owned(), row_cov.to_owned(), col_cov.to_owned())?;
let normal_sample =
crate::stats::distributions::samplematrix_normal(&matrix_normal_params, _seed)?;
let chi_approx = random_normalmatrix::<F>((1, 1), _seed)?;
let scale_factor = (dof / (dof + chi_approx[[0, 0]] * chi_approx[[0, 0]])).sqrt();
let t_sample = mean + &((&normal_sample - mean) * scale_factor);
samples.push(t_sample);
}
Ok(samples)
}
#[allow(dead_code)]
pub fn bootstrap_sample<F>(
data: &ArrayView2<F>,
n_bootstrap: usize,
samplesize: Option<usize>,
rng_seed: Option<u64>,
) -> LinalgResult<Vec<Array2<F>>>
where
F: Float + Copy,
{
let n_original = data.nrows();
let p = data.ncols();
let n_sample = samplesize.unwrap_or(n_original);
let mut samples = Vec::with_capacity(n_bootstrap);
let mut _seed = rng_seed.unwrap_or(42);
for _ in 0..n_bootstrap {
let mut bootstrap_sample = Array2::zeros((n_sample, p));
for i in 0..n_sample {
_seed = _seed.wrapping_mul(1664525).wrapping_add(1013904223);
let index = (_seed as usize) % n_original;
for j in 0..p {
bootstrap_sample[[i, j]] = data[[index, j]];
}
}
samples.push(bootstrap_sample);
}
Ok(samples)
}
#[allow(dead_code)]
pub fn permutation_sample<F>(
data: &ArrayView2<F>,
n_permutations: usize,
rng_seed: Option<u64>,
) -> LinalgResult<Vec<Array2<F>>>
where
F: Float + Copy,
{
let n = data.nrows();
let p = data.ncols();
let mut samples = Vec::with_capacity(n_permutations);
let mut _seed = rng_seed.unwrap_or(42);
for _ in 0..n_permutations {
let mut permuted_sample = data.to_owned();
for i in (1..n).rev() {
_seed = _seed.wrapping_mul(1664525).wrapping_add(1013904223);
let j = (_seed as usize) % (i + 1);
for k in 0..p {
let temp = permuted_sample[[i, k]];
permuted_sample[[i, k]] = permuted_sample[[j, k]];
permuted_sample[[j, k]] = temp;
}
}
samples.push(permuted_sample);
}
Ok(samples)
}
#[allow(dead_code)]
pub fn metropolis_hastings_sample<F>(
log_density: impl Fn(&ArrayView2<F>) -> LinalgResult<F>,
initial_value: Array2<F>,
proposal_cov: &ArrayView2<F>,
n_samples: usize,
burn_in: usize,
rng_seed: Option<u64>,
) -> LinalgResult<Vec<Array2<F>>>
where
F: Float
+ Zero
+ One
+ Copy
+ std::fmt::Debug
+ scirs2_core::ndarray::ScalarOperand
+ scirs2_core::numeric::FromPrimitive
+ scirs2_core::numeric::NumAssign
+ std::iter::Sum
+ Send
+ Sync,
{
let (m, n) = initial_value.dim();
let total_samples = n_samples + burn_in;
let mut current = initial_value;
let mut current_log_density = log_density(¤t.view())?;
let mut samples = Vec::with_capacity(n_samples);
let mut accepted = 0;
let l = cholesky(proposal_cov, None)?;
for i in 0..total_samples {
let _seed = rng_seed.map(|s| s.wrapping_add(i as u64));
let noise = random_normalmatrix::<F>((m, n), _seed)?;
let proposal_noise = l.t().dot(&noise);
let proposal = ¤t + &proposal_noise;
let proposal_log_density = match log_density(&proposal.view()) {
Ok(_density) => _density,
Err(_) => F::neg_infinity(), };
let log_alpha = proposal_log_density - current_log_density;
let uniform_sample = random_normalmatrix::<F>((1, 1), _seed)?;
let uniform = (uniform_sample[[0, 0]].abs() % F::one()).abs();
if log_alpha > uniform.ln() {
current = proposal;
current_log_density = proposal_log_density;
accepted += 1;
}
if i >= burn_in {
samples.push(current.clone());
}
}
let acceptance_rate = accepted as f64 / total_samples as f64;
if !(0.2..=0.7).contains(&acceptance_rate) {
eprintln!(
"Warning: MCMC acceptance rate is {acceptance_rate:.3}, consider adjusting proposal covariance"
);
}
Ok(samples)
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
use scirs2_core::ndarray::array;
#[test]
fn test_sample_multivariate_normal() {
let mean = array![0.0, 0.0];
let cov = array![[1.0, 0.0], [0.0, 1.0]];
let samples = sample_multivariate_normal(&mean.view(), &cov.view(), 100, Some(42))
.expect("Operation failed");
assert_eq!(samples.dim(), (100, 2));
assert!(samples.iter().all(|&x| x.is_finite()));
let sample_mean = samples
.mean_axis(scirs2_core::ndarray::Axis(0))
.expect("Operation failed");
assert_abs_diff_eq!(sample_mean[0], 0.0, epsilon = 0.5);
assert_abs_diff_eq!(sample_mean[1], 0.0, epsilon = 0.5);
}
#[test]
fn test_bootstrap_sample() {
let data = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0],];
let bootstrap_samples =
bootstrap_sample(&data.view(), 10, Some(2), Some(42)).expect("Operation failed");
assert_eq!(bootstrap_samples.len(), 10);
for sample in &bootstrap_samples {
assert_eq!(sample.dim(), (2, 2));
assert!(sample.iter().all(|&x| x.is_finite()));
}
}
#[test]
fn test_permutation_sample() {
let data = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0],];
let permuted_samples =
permutation_sample(&data.view(), 5, Some(42)).expect("Operation failed");
assert_eq!(permuted_samples.len(), 5);
for sample in &permuted_samples {
assert_eq!(sample.dim(), (3, 2));
assert!(sample.iter().all(|&x| x.is_finite()));
}
}
#[test]
fn test_samplematrix_normal_multiple() {
let mean = array![[0.0, 0.0], [0.0, 0.0]];
let row_cov = array![[1.0, 0.0], [0.0, 1.0]];
let col_cov = array![[1.0, 0.0], [0.0, 1.0]];
let params = MatrixNormalParams::new(mean, row_cov, col_cov).expect("Operation failed");
let samples = samplematrix_normal_multiple(¶ms, 5, Some(42)).expect("Operation failed");
assert_eq!(samples.len(), 5);
for sample in &samples {
assert_eq!(sample.dim(), (2, 2));
assert!(sample.iter().all(|&x| x.is_finite()));
}
}
}