use crate::error::{StatsError, StatsResult};
use scirs2_core::ndarray::{Array1, ArrayView1};
use scirs2_core::numeric::{Float, NumCast};
use scirs2_core::random::uniform::SampleUniform;
use scirs2_core::random::{Rng, RngExt};
use scirs2_core::simd_ops::{AutoOptimizer, SimdUnifiedOps};
macro_rules! build_rng {
($seed:expr) => {{
let s: u64 = $seed.unwrap_or_else(|| {
use scirs2_core::random::{Rng, RngExt};
scirs2_core::random::thread_rng().random()
});
scirs2_core::random::seeded_rng(s)
}};
}
pub fn sample_normal_batch<F>(
n: usize,
mean: F,
std_dev: F,
seed: Option<u64>,
) -> StatsResult<Array1<F>>
where
F: Float + NumCast + SimdUnifiedOps + SampleUniform,
{
if n == 0 {
return Err(StatsError::InvalidArgument(
"n must be at least 1".to_string(),
));
}
if std_dev <= F::zero() {
return Err(StatsError::InvalidArgument(
"std_dev must be positive for the Normal distribution".to_string(),
));
}
let n_pairs = (n + 1) / 2;
let n_total = n_pairs * 2;
let mut rng = build_rng!(seed);
let eps = F::epsilon().to_f64().unwrap_or(1e-15_f64).max(1e-15_f64);
let u1: Array1<F> = Array1::from_shape_fn(n_pairs, |_| {
F::from(rng.gen_range(eps..1.0_f64)).unwrap_or_else(|| F::one())
});
let u2: Array1<F> = Array1::from_shape_fn(n_pairs, |_| {
F::from(rng.gen_range(0.0_f64..1.0_f64)).unwrap_or_else(|| F::zero())
});
let optimizer = AutoOptimizer::new();
let (z0, z1) = if optimizer.should_use_simd(n_pairs) {
let ln_u1 = F::simd_ln(&u1.view());
let neg_two = F::from(-2.0_f64).unwrap_or_else(|| -F::one() - F::one());
let neg_two_arr = Array1::from_elem(n_pairs, neg_two);
let neg2_ln_u1 = F::simd_mul(&neg_two_arr.view(), &ln_u1.view());
let r = F::simd_sqrt(&neg2_ln_u1.view());
let two_pi = F::from(2.0_f64 * std::f64::consts::PI).unwrap_or_else(|| F::one());
let two_pi_arr = Array1::from_elem(n_pairs, two_pi);
let theta = F::simd_mul(&two_pi_arr.view(), &u2.view());
let cos_theta = F::simd_cos(&theta.view());
let sin_theta = F::simd_sin(&theta.view());
let raw_z0 = F::simd_mul(&r.view(), &cos_theta.view());
let raw_z1 = F::simd_mul(&r.view(), &sin_theta.view());
let std_arr = Array1::from_elem(n_pairs, std_dev);
let mean_arr = Array1::from_elem(n_pairs, mean);
let z0 = F::simd_fma(&raw_z0.view(), &std_arr.view(), &mean_arr.view());
let z1 = F::simd_fma(&raw_z1.view(), &std_arr.view(), &mean_arr.view());
(z0, z1)
} else {
let two = F::from(2.0_f64).unwrap_or_else(|| F::one() + F::one());
let two_pi = F::from(2.0_f64 * std::f64::consts::PI).unwrap_or_else(|| F::one());
let mut z0 = Array1::zeros(n_pairs);
let mut z1 = Array1::zeros(n_pairs);
for i in 0..n_pairs {
let r = (-two * u1[i].ln()).sqrt();
let theta = two_pi * u2[i];
z0[i] = mean + std_dev * r * theta.cos();
z1[i] = mean + std_dev * r * theta.sin();
}
(z0, z1)
};
let mut result: Array1<F> = Array1::zeros(n_total);
for i in 0..n_pairs {
result[2 * i] = z0[i];
if 2 * i + 1 < n_total {
result[2 * i + 1] = z1[i];
}
}
Ok(result.slice(scirs2_core::ndarray::s![..n]).to_owned())
}
pub fn sample_uniform_batch<F>(
n: usize,
low: F,
high: F,
seed: Option<u64>,
) -> StatsResult<Array1<F>>
where
F: Float + NumCast + SimdUnifiedOps + SampleUniform,
{
if n == 0 {
return Err(StatsError::InvalidArgument(
"n must be at least 1".to_string(),
));
}
if low >= high {
return Err(StatsError::InvalidArgument(
"low must be strictly less than high for the Uniform distribution".to_string(),
));
}
let mut rng = build_rng!(seed);
let u: Array1<F> = Array1::from_shape_fn(n, |_| {
F::from(rng.gen_range(0.0_f64..1.0_f64)).unwrap_or_else(|| F::zero())
});
let optimizer = AutoOptimizer::new();
let width = high - low;
if optimizer.should_use_simd(n) {
let width_arr = Array1::from_elem(n, width);
let low_arr = Array1::from_elem(n, low);
Ok(F::simd_fma(&u.view(), &width_arr.view(), &low_arr.view()))
} else {
Ok(u.mapv(|ui| low + ui * width))
}
}
pub fn sample_exponential_batch<F>(n: usize, rate: F, seed: Option<u64>) -> StatsResult<Array1<F>>
where
F: Float + NumCast + SimdUnifiedOps + SampleUniform,
{
if n == 0 {
return Err(StatsError::InvalidArgument(
"n must be at least 1".to_string(),
));
}
if rate <= F::zero() {
return Err(StatsError::InvalidArgument(
"rate must be positive for the Exponential distribution".to_string(),
));
}
let eps = F::epsilon().to_f64().unwrap_or(1e-15_f64).max(1e-15_f64);
let mut rng = build_rng!(seed);
let u: Array1<F> = Array1::from_shape_fn(n, |_| {
F::from(rng.gen_range(eps..1.0_f64)).unwrap_or_else(|| F::one())
});
let optimizer = AutoOptimizer::new();
if optimizer.should_use_simd(n) {
let ln_u = F::simd_ln(&u.view());
let neg_one = F::from(-1.0_f64).unwrap_or_else(|| -F::one());
let neg_one_arr = Array1::from_elem(n, neg_one);
let neg_ln_u = F::simd_mul(&neg_one_arr.view(), &ln_u.view());
let inv_rate = F::one() / rate;
let inv_rate_arr = Array1::from_elem(n, inv_rate);
Ok(F::simd_mul(&neg_ln_u.view(), &inv_rate_arr.view()))
} else {
Ok(u.mapv(|ui| -ui.ln() / rate))
}
}
pub fn normal_pdf_batch<F>(x: &ArrayView1<F>, mean: F, std_dev: F) -> StatsResult<Array1<F>>
where
F: Float + NumCast + SimdUnifiedOps,
{
if x.is_empty() {
return Err(StatsError::InvalidArgument("x is empty".to_string()));
}
if std_dev <= F::zero() {
return Err(StatsError::InvalidArgument(
"std_dev must be positive".to_string(),
));
}
let n = x.len();
let two = F::from(2.0_f64).unwrap_or_else(|| F::one() + F::one());
let two_pi = F::from(2.0_f64 * std::f64::consts::PI).unwrap_or_else(|| F::one());
let norm_const = F::one() / (std_dev * two_pi.sqrt());
let two_var = two * std_dev * std_dev;
let optimizer = AutoOptimizer::new();
if optimizer.should_use_simd(n) {
let mean_arr = Array1::from_elem(n, mean);
let diff = F::simd_sub(x, &mean_arr.view());
let diff_sq = F::simd_mul(&diff.view(), &diff.view());
let inv_two_var = F::one() / two_var;
let neg_inv = F::from(-1.0_f64).unwrap_or_else(|| -F::one()) * inv_two_var;
let neg_inv_arr = Array1::from_elem(n, neg_inv);
let exponent = F::simd_mul(&neg_inv_arr.view(), &diff_sq.view());
let exp_vals = F::simd_exp(&exponent.view());
let norm_arr = Array1::from_elem(n, norm_const);
Ok(F::simd_mul(&norm_arr.view(), &exp_vals.view()))
} else {
Ok(x.mapv(|xi| {
let z = (xi - mean) / std_dev;
norm_const * (-(z * z) / two).exp()
}))
}
}
pub fn normal_cdf_batch<F>(x: &ArrayView1<F>, mean: F, std_dev: F) -> StatsResult<Array1<F>>
where
F: Float + NumCast + SimdUnifiedOps,
{
if x.is_empty() {
return Err(StatsError::InvalidArgument("x is empty".to_string()));
}
if std_dev <= F::zero() {
return Err(StatsError::InvalidArgument(
"std_dev must be positive".to_string(),
));
}
let n = x.len();
let mean_arr = Array1::from_elem(n, mean);
let std_arr = Array1::from_elem(n, std_dev);
let optimizer = AutoOptimizer::new();
let z_owned = if optimizer.should_use_simd(n) {
let diff = F::simd_sub(x, &mean_arr.view());
F::simd_div(&diff.view(), &std_arr.view())
} else {
x.mapv(|xi| (xi - mean) / std_dev)
};
let half = F::from(0.5_f64).unwrap_or_else(|| F::one() / (F::one() + F::one()));
let a1 = F::from(0.319_381_530_f64).unwrap_or_else(|| F::zero());
let a2 = F::from(-0.356_563_782_f64).unwrap_or_else(|| F::zero());
let a3 = F::from(1.781_477_937_f64).unwrap_or_else(|| F::zero());
let a4 = F::from(-1.821_255_978_f64).unwrap_or_else(|| F::zero());
let a5 = F::from(1.330_274_429_f64).unwrap_or_else(|| F::zero());
let p = F::from(0.231_641_9_f64).unwrap_or_else(|| F::zero());
let two_pi = F::from(2.0_f64 * std::f64::consts::PI).unwrap_or_else(|| F::one());
let result: Array1<F> = z_owned.mapv(|z| {
let abs_z = z.abs();
let t = F::one() / (F::one() + p * abs_z);
let poly = t * (a1 + t * (a2 + t * (a3 + t * (a4 + t * a5))));
let pdf_z = (-(z * z) / (F::one() + F::one())).exp() / two_pi.sqrt();
let cdf_abs = F::one() - pdf_z * poly;
if z >= F::zero() {
cdf_abs
} else {
F::one() - cdf_abs
}
.max(F::zero())
.min(F::one())
});
Ok(result)
}
pub fn uniform_pdf_batch<F>(x: &ArrayView1<F>, low: F, high: F) -> StatsResult<Array1<F>>
where
F: Float + NumCast + SimdUnifiedOps,
{
if x.is_empty() {
return Err(StatsError::InvalidArgument("x is empty".to_string()));
}
if low >= high {
return Err(StatsError::InvalidArgument(
"low must be strictly less than high".to_string(),
));
}
let density = F::one() / (high - low);
let result = x.mapv(|xi| {
if xi >= low && xi < high {
density
} else {
F::zero()
}
});
Ok(result)
}
pub fn uniform_cdf_batch<F>(x: &ArrayView1<F>, low: F, high: F) -> StatsResult<Array1<F>>
where
F: Float + NumCast + SimdUnifiedOps,
{
if x.is_empty() {
return Err(StatsError::InvalidArgument("x is empty".to_string()));
}
if low >= high {
return Err(StatsError::InvalidArgument(
"low must be strictly less than high".to_string(),
));
}
let width = high - low;
let result = x.mapv(|xi| {
if xi < low {
F::zero()
} else if xi >= high {
F::one()
} else {
(xi - low) / width
}
});
Ok(result)
}
pub fn exponential_pdf_batch<F>(x: &ArrayView1<F>, rate: F) -> StatsResult<Array1<F>>
where
F: Float + NumCast + SimdUnifiedOps,
{
if x.is_empty() {
return Err(StatsError::InvalidArgument("x is empty".to_string()));
}
if rate <= F::zero() {
return Err(StatsError::InvalidArgument(
"rate must be positive".to_string(),
));
}
let n = x.len();
let optimizer = AutoOptimizer::new();
if optimizer.should_use_simd(n) {
let mask: Array1<F> = x.mapv(|xi| if xi >= F::zero() { F::one() } else { F::zero() });
let neg_rate = F::from(-1.0_f64).unwrap_or_else(|| -F::one()) * rate;
let neg_rate_arr = Array1::from_elem(n, neg_rate);
let exponent = F::simd_mul(&neg_rate_arr.view(), x);
let exp_vals = F::simd_exp(&exponent.view());
let rate_arr = Array1::from_elem(n, rate);
let pdf = F::simd_mul(&rate_arr.view(), &exp_vals.view());
let result = F::simd_mul(&mask.view(), &pdf.view());
Ok(result)
} else {
Ok(x.mapv(|xi| {
if xi >= F::zero() {
rate * (-rate * xi).exp()
} else {
F::zero()
}
}))
}
}
pub fn exponential_cdf_batch<F>(x: &ArrayView1<F>, rate: F) -> StatsResult<Array1<F>>
where
F: Float + NumCast + SimdUnifiedOps,
{
if x.is_empty() {
return Err(StatsError::InvalidArgument("x is empty".to_string()));
}
if rate <= F::zero() {
return Err(StatsError::InvalidArgument(
"rate must be positive".to_string(),
));
}
let n = x.len();
let optimizer = AutoOptimizer::new();
if optimizer.should_use_simd(n) {
let mask: Array1<F> = x.mapv(|xi| if xi >= F::zero() { F::one() } else { F::zero() });
let neg_rate = F::from(-1.0_f64).unwrap_or_else(|| -F::one()) * rate;
let neg_rate_arr = Array1::from_elem(n, neg_rate);
let exponent = F::simd_mul(&neg_rate_arr.view(), x);
let exp_vals = F::simd_exp(&exponent.view());
let ones = Array1::from_elem(n, F::one());
let cdf_positive = F::simd_sub(&ones.view(), &exp_vals.view());
Ok(F::simd_mul(&mask.view(), &cdf_positive.view()))
} else {
Ok(x.mapv(|xi| {
if xi >= F::zero() {
F::one() - (-rate * xi).exp()
} else {
F::zero()
}
}))
}
}
pub fn parallel_normal_sample(
n: usize,
mean: f64,
std_dev: f64,
seed: Option<u64>,
) -> StatsResult<Vec<f64>> {
if n == 0 {
return Err(StatsError::InvalidArgument(
"n must be at least 1".to_string(),
));
}
if std_dev <= 0.0 {
return Err(StatsError::InvalidArgument(
"std_dev must be positive for the Normal distribution".to_string(),
));
}
use scirs2_core::parallel_ops::*;
let base_seed: u64 = seed.unwrap_or_else(|| {
use scirs2_core::random::{Rng, RngExt};
scirs2_core::random::thread_rng().random()
});
let n_threads = num_cpus::get().max(1).min(n);
let chunk_size = (n + n_threads - 1) / n_threads;
let seeds: Vec<u64> = (0..n_threads)
.map(|i| base_seed.wrapping_add((i as u64).wrapping_mul(0x9e37_79b9_7f4a_7c15_u64)))
.collect();
let chunks: Result<Vec<Vec<f64>>, StatsError> = seeds
.into_par_iter()
.enumerate()
.map(|(i, s)| {
let start = i * chunk_size;
if start >= n {
return Ok(vec![]);
}
let end = (start + chunk_size).min(n);
let count = end - start;
let arr = sample_normal_batch::<f64>(count, mean, std_dev, Some(s))?;
Ok(arr.to_vec())
})
.collect();
let chunks = chunks?;
let mut out = Vec::with_capacity(n);
for chunk in chunks {
out.extend_from_slice(&chunk);
}
Ok(out)
}
pub fn parallel_uniform_sample(
n: usize,
low: f64,
high: f64,
seed: Option<u64>,
) -> StatsResult<Vec<f64>> {
if n == 0 {
return Err(StatsError::InvalidArgument(
"n must be at least 1".to_string(),
));
}
if low >= high {
return Err(StatsError::InvalidArgument(
"low must be strictly less than high for the Uniform distribution".to_string(),
));
}
use scirs2_core::parallel_ops::*;
let base_seed: u64 = seed.unwrap_or_else(|| {
use scirs2_core::random::{Rng, RngExt};
scirs2_core::random::thread_rng().random()
});
let n_threads = num_cpus::get().max(1).min(n);
let chunk_size = (n + n_threads - 1) / n_threads;
let seeds: Vec<u64> = (0..n_threads)
.map(|i| base_seed.wrapping_add((i as u64).wrapping_mul(0x6c62_272e_07bb_0142_u64)))
.collect();
let chunks: Result<Vec<Vec<f64>>, StatsError> = seeds
.into_par_iter()
.enumerate()
.map(|(i, s)| {
let start = i * chunk_size;
if start >= n {
return Ok(vec![]);
}
let end = (start + chunk_size).min(n);
let count = end - start;
let arr = sample_uniform_batch::<f64>(count, low, high, Some(s))?;
Ok(arr.to_vec())
})
.collect();
let chunks = chunks?;
let mut out = Vec::with_capacity(n);
for chunk in chunks {
out.extend_from_slice(&chunk);
}
Ok(out)
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::array;
#[test]
fn test_sample_normal_batch_length() {
let samples = sample_normal_batch::<f64>(500, 0.0, 1.0, Some(1)).expect("should succeed");
assert_eq!(samples.len(), 500);
}
#[test]
fn test_sample_normal_batch_empirical_moments() {
let n = 10_000_usize;
let mu = 3.5_f64;
let sigma = 2.0_f64;
let samples = sample_normal_batch::<f64>(n, mu, sigma, Some(42)).expect("should succeed");
assert_eq!(samples.len(), n);
let emp_mean: f64 = samples.iter().sum::<f64>() / n as f64;
let emp_var: f64 = samples.iter().map(|&x| (x - emp_mean).powi(2)).sum::<f64>() / n as f64;
let emp_std = emp_var.sqrt();
assert!(
(emp_mean - mu).abs() < 0.2,
"empirical mean {emp_mean} too far from {mu}"
);
assert!(
(emp_std - sigma).abs() < 0.2,
"empirical std {emp_std} too far from {sigma}"
);
}
#[test]
fn test_sample_normal_batch_rejects_bad_args() {
assert!(sample_normal_batch::<f64>(0, 0.0, 1.0, None).is_err());
assert!(sample_normal_batch::<f64>(10, 0.0, -1.0, None).is_err());
assert!(sample_normal_batch::<f64>(10, 0.0, 0.0, None).is_err());
}
#[test]
fn test_sample_normal_batch_f32() {
let samples = sample_normal_batch::<f32>(200, 0.0, 1.0, Some(5)).expect("should succeed");
assert_eq!(samples.len(), 200);
}
#[test]
fn test_sample_uniform_batch_length() {
let samples = sample_uniform_batch::<f64>(300, -1.0, 1.0, Some(2)).expect("should succeed");
assert_eq!(samples.len(), 300);
}
#[test]
fn test_sample_uniform_batch_range() {
let samples =
sample_uniform_batch::<f64>(2_000, 3.0, 7.0, Some(11)).expect("should succeed");
for &s in samples.iter() {
assert!(s >= 3.0 && s < 7.0, "sample {s} outside [3.0, 7.0)");
}
}
#[test]
fn test_sample_uniform_batch_empirical_mean() {
let (low, high) = (2.0_f64, 6.0_f64);
let expected_mean = (low + high) / 2.0;
let n = 10_000_usize;
let samples = sample_uniform_batch::<f64>(n, low, high, Some(99)).expect("should succeed");
let emp_mean: f64 = samples.iter().sum::<f64>() / n as f64;
assert!(
(emp_mean - expected_mean).abs() < 0.1,
"empirical mean {emp_mean} too far from {expected_mean}"
);
}
#[test]
fn test_sample_uniform_batch_rejects_bad_args() {
assert!(sample_uniform_batch::<f64>(0, 0.0, 1.0, None).is_err());
assert!(sample_uniform_batch::<f64>(10, 1.0, 1.0, None).is_err());
assert!(sample_uniform_batch::<f64>(10, 2.0, 1.0, None).is_err());
}
#[test]
fn test_sample_exponential_batch_length() {
let samples = sample_exponential_batch::<f64>(400, 1.0, Some(3)).expect("should succeed");
assert_eq!(samples.len(), 400);
}
#[test]
fn test_sample_exponential_batch_non_negative() {
let samples =
sample_exponential_batch::<f64>(5_000, 0.5, Some(77)).expect("should succeed");
for &s in samples.iter() {
assert!(s >= 0.0, "exponential sample {s} is negative");
}
}
#[test]
fn test_sample_exponential_batch_empirical_mean() {
let rate = 2.5_f64;
let expected_mean = 1.0 / rate;
let n = 10_000_usize;
let samples = sample_exponential_batch::<f64>(n, rate, Some(13)).expect("should succeed");
let emp_mean: f64 = samples.iter().sum::<f64>() / n as f64;
assert!(
(emp_mean - expected_mean).abs() < 0.05,
"empirical mean {emp_mean} too far from {expected_mean}"
);
}
#[test]
fn test_sample_exponential_batch_rejects_bad_args() {
assert!(sample_exponential_batch::<f64>(0, 1.0, None).is_err());
assert!(sample_exponential_batch::<f64>(10, 0.0, None).is_err());
assert!(sample_exponential_batch::<f64>(10, -1.0, None).is_err());
}
#[test]
fn test_normal_pdf_batch_standard() {
let x = array![0.0_f64, 1.0, -1.0];
let pdf = normal_pdf_batch(&x.view(), 0.0, 1.0).expect("should succeed");
let expected_0 = 1.0_f64 / (2.0 * std::f64::consts::PI).sqrt();
assert!((pdf[0] - expected_0).abs() < 1e-7, "pdf[0]={}", pdf[0]);
assert!((pdf[1] - pdf[2]).abs() < 1e-10, "symmetry failed");
}
#[test]
fn test_normal_cdf_batch_standard() {
let x = array![0.0_f64, 1.959_964_f64, -1.959_964_f64];
let cdf = normal_cdf_batch(&x.view(), 0.0, 1.0).expect("should succeed");
assert!((cdf[0] - 0.5).abs() < 1e-5, "cdf[0]={}", cdf[0]);
assert!((cdf[1] - 0.975).abs() < 1e-4, "cdf[1]={}", cdf[1]);
assert!((cdf[2] - 0.025).abs() < 1e-4, "cdf[2]={}", cdf[2]);
}
#[test]
fn test_uniform_pdf_batch() {
let x = array![0.5_f64, 1.5, 2.5];
let pdf = uniform_pdf_batch(&x.view(), 1.0, 2.0).expect("should succeed");
assert!((pdf[0] - 0.0).abs() < 1e-10);
assert!((pdf[1] - 1.0).abs() < 1e-10);
assert!((pdf[2] - 0.0).abs() < 1e-10);
}
#[test]
fn test_uniform_cdf_batch() {
let x = array![0.5_f64, 1.5, 2.5];
let cdf = uniform_cdf_batch(&x.view(), 1.0, 2.0).expect("should succeed");
assert!((cdf[0] - 0.0).abs() < 1e-10);
assert!((cdf[1] - 0.5).abs() < 1e-10);
assert!((cdf[2] - 1.0).abs() < 1e-10);
}
#[test]
fn test_exponential_pdf_batch() {
let x = array![0.0_f64, 1.0, 2.0, -0.5];
let pdf = exponential_pdf_batch(&x.view(), 1.0).expect("should succeed");
assert!((pdf[0] - 1.0).abs() < 1e-10, "pdf(0)={}", pdf[0]);
assert!(
(pdf[1] - (-1.0_f64).exp()).abs() < 1e-10,
"pdf(1)={}",
pdf[1]
);
assert!(
(pdf[2] - (-2.0_f64).exp()).abs() < 1e-10,
"pdf(2)={}",
pdf[2]
);
assert!((pdf[3] - 0.0).abs() < 1e-10, "pdf(-0.5)={}", pdf[3]);
}
#[test]
fn test_exponential_cdf_batch() {
let x = array![0.0_f64, 1.0, 2.0, -1.0];
let cdf = exponential_cdf_batch(&x.view(), 1.0).expect("should succeed");
assert!((cdf[0] - 0.0).abs() < 1e-10);
assert!((cdf[1] - (1.0 - (-1.0_f64).exp())).abs() < 1e-10);
assert!((cdf[2] - (1.0 - (-2.0_f64).exp())).abs() < 1e-10);
assert!((cdf[3] - 0.0).abs() < 1e-10);
}
#[test]
fn test_seeded_reproducibility_normal() {
let s1 = sample_normal_batch::<f64>(100, 0.0, 1.0, Some(42)).expect("ok");
let s2 = sample_normal_batch::<f64>(100, 0.0, 1.0, Some(42)).expect("ok");
for (a, b) in s1.iter().zip(s2.iter()) {
assert_eq!(a, b, "seeded normal samples should be identical");
}
}
#[test]
fn test_seeded_reproducibility_uniform() {
let s1 = sample_uniform_batch::<f64>(100, 0.0, 1.0, Some(17)).expect("ok");
let s2 = sample_uniform_batch::<f64>(100, 0.0, 1.0, Some(17)).expect("ok");
for (a, b) in s1.iter().zip(s2.iter()) {
assert_eq!(a, b, "seeded uniform samples should be identical");
}
}
#[test]
fn test_seeded_reproducibility_exponential() {
let s1 = sample_exponential_batch::<f64>(100, 0.5, Some(7)).expect("ok");
let s2 = sample_exponential_batch::<f64>(100, 0.5, Some(7)).expect("ok");
for (a, b) in s1.iter().zip(s2.iter()) {
assert_eq!(a, b, "seeded exponential samples should be identical");
}
}
#[test]
fn test_parallel_normal_sample_length() {
let samples =
super::parallel_normal_sample(5_000, 0.0, 1.0, Some(42)).expect("should succeed");
assert_eq!(samples.len(), 5_000);
}
#[test]
fn test_parallel_normal_sample_empirical_moments() {
let n = 20_000_usize;
let mu = 2.5_f64;
let sigma = 1.5_f64;
let samples =
super::parallel_normal_sample(n, mu, sigma, Some(1234)).expect("should succeed");
assert_eq!(samples.len(), n);
let emp_mean: f64 = samples.iter().sum::<f64>() / n as f64;
let emp_var: f64 = samples.iter().map(|&x| (x - emp_mean).powi(2)).sum::<f64>() / n as f64;
let emp_std = emp_var.sqrt();
assert!(
(emp_mean - mu).abs() < 0.2,
"parallel normal: empirical mean {emp_mean} too far from {mu}"
);
assert!(
(emp_std - sigma).abs() < 0.2,
"parallel normal: empirical std {emp_std} too far from {sigma}"
);
}
#[test]
fn test_parallel_normal_sample_rejects_bad_args() {
assert!(super::parallel_normal_sample(0, 0.0, 1.0, None).is_err());
assert!(super::parallel_normal_sample(10, 0.0, 0.0, None).is_err());
assert!(super::parallel_normal_sample(10, 0.0, -1.5, None).is_err());
}
#[test]
fn test_parallel_normal_sample_reproducibility() {
let s1 = super::parallel_normal_sample(1_000, 0.0, 1.0, Some(77)).expect("ok");
let s2 = super::parallel_normal_sample(1_000, 0.0, 1.0, Some(77)).expect("ok");
assert_eq!(s1.len(), s2.len());
for (a, b) in s1.iter().zip(s2.iter()) {
assert_eq!(a, b, "seeded parallel normal samples should be identical");
}
}
#[test]
fn test_parallel_uniform_sample_length() {
let samples =
super::parallel_uniform_sample(4_000, 1.0, 3.0, Some(55)).expect("should succeed");
assert_eq!(samples.len(), 4_000);
}
#[test]
fn test_parallel_uniform_sample_range() {
let samples =
super::parallel_uniform_sample(8_000, 2.0, 5.0, Some(99)).expect("should succeed");
for &s in samples.iter() {
assert!(
s >= 2.0 && s < 5.0,
"parallel uniform: sample {s} outside [2.0, 5.0)"
);
}
}
#[test]
fn test_parallel_uniform_sample_empirical_mean() {
let (low, high) = (3.0_f64, 7.0_f64);
let expected_mean = (low + high) / 2.0;
let n = 10_000_usize;
let samples =
super::parallel_uniform_sample(n, low, high, Some(11)).expect("should succeed");
let emp_mean: f64 = samples.iter().sum::<f64>() / n as f64;
assert!(
(emp_mean - expected_mean).abs() < 0.1,
"parallel uniform: empirical mean {emp_mean} too far from {expected_mean}"
);
}
#[test]
fn test_parallel_uniform_sample_rejects_bad_args() {
assert!(super::parallel_uniform_sample(0, 0.0, 1.0, None).is_err());
assert!(super::parallel_uniform_sample(10, 1.0, 1.0, None).is_err());
assert!(super::parallel_uniform_sample(10, 2.0, 1.0, None).is_err());
}
#[test]
fn test_parallel_uniform_sample_reproducibility() {
let s1 = super::parallel_uniform_sample(500, 0.0, 1.0, Some(33)).expect("ok");
let s2 = super::parallel_uniform_sample(500, 0.0, 1.0, Some(33)).expect("ok");
assert_eq!(s1.len(), s2.len());
for (a, b) in s1.iter().zip(s2.iter()) {
assert_eq!(a, b, "seeded parallel uniform samples should be identical");
}
}
}