use wasm_bindgen::prelude::*;
use std::f64::consts::PI;
#[wasm_bindgen]
pub struct DescriptiveStats {
mean: f64,
variance: f64,
std_dev: f64,
min: f64,
max: f64,
median: f64,
skewness: f64,
kurtosis: f64,
count: usize,
sum: f64,
q25: f64,
q75: f64,
iqr: f64,
}
#[wasm_bindgen]
impl DescriptiveStats {
#[wasm_bindgen(getter)]
pub fn mean(&self) -> f64 { self.mean }
#[wasm_bindgen(getter)]
pub fn variance(&self) -> f64 { self.variance }
#[wasm_bindgen(getter)]
pub fn std_dev(&self) -> f64 { self.std_dev }
#[wasm_bindgen(getter)]
pub fn min(&self) -> f64 { self.min }
#[wasm_bindgen(getter)]
pub fn max(&self) -> f64 { self.max }
#[wasm_bindgen(getter)]
pub fn median(&self) -> f64 { self.median }
#[wasm_bindgen(getter)]
pub fn skewness(&self) -> f64 { self.skewness }
#[wasm_bindgen(getter)]
pub fn kurtosis(&self) -> f64 { self.kurtosis }
#[wasm_bindgen(getter)]
pub fn count(&self) -> usize { self.count }
#[wasm_bindgen(getter)]
pub fn sum(&self) -> f64 { self.sum }
#[wasm_bindgen(getter)]
pub fn q25(&self) -> f64 { self.q25 }
#[wasm_bindgen(getter)]
pub fn q75(&self) -> f64 { self.q75 }
#[wasm_bindgen(getter)]
pub fn iqr(&self) -> f64 { self.iqr }
}
#[wasm_bindgen]
pub fn compute_descriptive_stats(data: &[f64]) -> DescriptiveStats {
let n = data.len();
if n == 0 {
return DescriptiveStats {
mean: f64::NAN, variance: f64::NAN, std_dev: f64::NAN,
min: f64::NAN, max: f64::NAN, median: f64::NAN,
skewness: f64::NAN, kurtosis: f64::NAN,
count: 0, sum: 0.0, q25: f64::NAN, q75: f64::NAN, iqr: f64::NAN,
};
}
let n_f = n as f64;
let sum: f64 = data.iter().sum();
let mean = sum / n_f;
let variance = data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / n_f;
let std_dev = variance.sqrt();
let (skewness, kurtosis) = if std_dev > 0.0 {
let m3 = data.iter().map(|&x| ((x - mean) / std_dev).powi(3)).sum::<f64>() / n_f;
let m4 = data.iter().map(|&x| ((x - mean) / std_dev).powi(4)).sum::<f64>() / n_f;
(m3, m4 - 3.0)
} else {
(0.0, 0.0)
};
let mut sorted = data.to_vec();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let min = sorted[0];
let max = sorted[n - 1];
let median = quantile_linear(&sorted, 0.50);
let q25 = quantile_linear(&sorted, 0.25);
let q75 = quantile_linear(&sorted, 0.75);
let iqr = q75 - q25;
DescriptiveStats {
mean, variance, std_dev, min, max, median,
skewness, kurtosis, count: n, sum, q25, q75, iqr,
}
}
#[wasm_bindgen]
pub struct WasmLinearModel {
slope: f64,
intercept: f64,
r_squared: f64,
std_err_slope: f64,
std_err_intercept: f64,
n: usize,
}
#[wasm_bindgen]
impl WasmLinearModel {
#[wasm_bindgen(getter)]
pub fn slope(&self) -> f64 { self.slope }
#[wasm_bindgen(getter)]
pub fn intercept(&self) -> f64 { self.intercept }
#[wasm_bindgen(getter)]
pub fn r_squared(&self) -> f64 { self.r_squared }
#[wasm_bindgen(getter)]
pub fn std_err_slope(&self) -> f64 { self.std_err_slope }
#[wasm_bindgen(getter)]
pub fn std_err_intercept(&self) -> f64 { self.std_err_intercept }
#[wasm_bindgen(getter)]
pub fn n(&self) -> usize { self.n }
pub fn predict(&self, x: f64) -> f64 {
self.slope * x + self.intercept
}
pub fn predict_batch(&self, xs: &[f64]) -> Vec<f64> {
xs.iter().map(|&x| self.slope * x + self.intercept).collect()
}
}
#[wasm_bindgen]
pub fn wasm_linear_regression_typed(x: &[f64], y: &[f64]) -> WasmLinearModel {
let n = x.len();
let nan_model = WasmLinearModel {
slope: f64::NAN, intercept: f64::NAN, r_squared: f64::NAN,
std_err_slope: f64::NAN, std_err_intercept: f64::NAN, n: 0,
};
if n < 2 || n != y.len() {
return nan_model;
}
let n_f = n as f64;
let x_mean = x.iter().sum::<f64>() / n_f;
let y_mean = y.iter().sum::<f64>() / n_f;
let mut ss_xx = 0.0_f64;
let mut ss_xy = 0.0_f64;
let mut ss_yy = 0.0_f64;
for i in 0..n {
let dx = x[i] - x_mean;
let dy = y[i] - y_mean;
ss_xx += dx * dx;
ss_xy += dx * dy;
ss_yy += dy * dy;
}
if ss_xx == 0.0 {
return nan_model;
}
let slope = ss_xy / ss_xx;
let intercept = y_mean - slope * x_mean;
let ss_res: f64 = (0..n).map(|i| (y[i] - (slope * x[i] + intercept)).powi(2)).sum();
let r_squared = if ss_yy == 0.0 { 1.0 } else { (1.0 - ss_res / ss_yy).clamp(0.0, 1.0) };
let (std_err_slope, std_err_intercept) = if n > 2 {
let s2 = ss_res / (n_f - 2.0);
let se_slope = (s2 / ss_xx).sqrt();
let se_intercept = (s2 * (1.0 / n_f + x_mean * x_mean / ss_xx)).sqrt();
(se_slope, se_intercept)
} else {
(f64::NAN, f64::NAN)
};
WasmLinearModel { slope, intercept, r_squared, std_err_slope, std_err_intercept, n }
}
#[wasm_bindgen]
pub struct WasmTTestResult {
t_stat: f64,
p_value: f64,
df: f64,
significant_at_05: bool,
significant_at_01: bool,
}
#[wasm_bindgen]
impl WasmTTestResult {
#[wasm_bindgen(getter)]
pub fn t_stat(&self) -> f64 { self.t_stat }
#[wasm_bindgen(getter)]
pub fn p_value(&self) -> f64 { self.p_value }
#[wasm_bindgen(getter)]
pub fn df(&self) -> f64 { self.df }
#[wasm_bindgen(getter)]
pub fn significant_at_05(&self) -> bool { self.significant_at_05 }
#[wasm_bindgen(getter)]
pub fn significant_at_01(&self) -> bool { self.significant_at_01 }
}
#[wasm_bindgen]
pub fn wasm_t_test_one_sample_typed(data: &[f64], mu: f64) -> WasmTTestResult {
let nan_result = WasmTTestResult {
t_stat: f64::NAN, p_value: f64::NAN, df: f64::NAN,
significant_at_05: false, significant_at_01: false,
};
let n = data.len();
if n < 2 { return nan_result; }
let n_f = n as f64;
let mean = data.iter().sum::<f64>() / n_f;
let s2 = data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / (n_f - 1.0);
let se = (s2 / n_f).sqrt();
if se == 0.0 { return nan_result; }
let t_stat = (mean - mu) / se;
let df = n_f - 1.0;
let p_value = t_two_tailed_pvalue(t_stat.abs(), df);
WasmTTestResult {
t_stat, p_value, df,
significant_at_05: p_value < 0.05,
significant_at_01: p_value < 0.01,
}
}
#[wasm_bindgen]
pub fn wasm_t_test_two_sample_typed(x: &[f64], y: &[f64]) -> WasmTTestResult {
let nan_result = WasmTTestResult {
t_stat: f64::NAN, p_value: f64::NAN, df: f64::NAN,
significant_at_05: false, significant_at_01: false,
};
if x.len() < 2 || y.len() < 2 { return nan_result; }
let nx = x.len() as f64;
let ny = y.len() as f64;
let mx = x.iter().sum::<f64>() / nx;
let my = y.iter().sum::<f64>() / ny;
let vx = x.iter().map(|&v| (v - mx).powi(2)).sum::<f64>() / (nx - 1.0);
let vy = y.iter().map(|&v| (v - my).powi(2)).sum::<f64>() / (ny - 1.0);
let se_x = vx / nx;
let se_y = vy / ny;
let se = (se_x + se_y).sqrt();
if se == 0.0 { return nan_result; }
let t_stat = (mx - my) / se;
let df = {
let num = (se_x + se_y).powi(2);
let denom = se_x.powi(2) / (nx - 1.0) + se_y.powi(2) / (ny - 1.0);
if denom == 0.0 { 1.0 } else { num / denom }
};
let p_value = t_two_tailed_pvalue(t_stat.abs(), df);
WasmTTestResult {
t_stat, p_value, df,
significant_at_05: p_value < 0.05,
significant_at_01: p_value < 0.01,
}
}
#[wasm_bindgen]
pub struct WasmKSResult {
statistic: f64,
p_value: f64,
is_normal_at_05: bool,
}
#[wasm_bindgen]
impl WasmKSResult {
#[wasm_bindgen(getter)]
pub fn statistic(&self) -> f64 { self.statistic }
#[wasm_bindgen(getter)]
pub fn p_value(&self) -> f64 { self.p_value }
#[wasm_bindgen(getter)]
pub fn is_normal_at_05(&self) -> bool { self.is_normal_at_05 }
}
#[wasm_bindgen]
pub fn wasm_ks_test_normality(data: &[f64]) -> WasmKSResult {
let nan_result = WasmKSResult { statistic: f64::NAN, p_value: f64::NAN, is_normal_at_05: false };
let n = data.len();
if n < 3 { return nan_result; }
let n_f = n as f64;
let mean = data.iter().sum::<f64>() / n_f;
let var = data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / n_f;
let std_dev = var.sqrt();
if std_dev == 0.0 { return nan_result; }
let mut sorted = data.to_vec();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let mut d: f64 = 0.0;
for (i, &xi) in sorted.iter().enumerate() {
let phi = normal_cdf_inner(xi, mean, std_dev);
let fn_plus = (i + 1) as f64 / n_f;
let fn_minus = i as f64 / n_f;
let di = (fn_plus - phi).abs().max((phi - fn_minus).abs());
if di > d { d = di; }
}
let lambda = d * (n_f.sqrt() + 0.12 + 0.11 / n_f.sqrt());
let p_value = kolmogorov_pvalue(lambda);
WasmKSResult {
statistic: d,
p_value,
is_normal_at_05: p_value > 0.05,
}
}
#[wasm_bindgen]
pub struct WasmHistogram {
edges: Vec<f64>,
counts: Vec<u32>,
density: Vec<f64>,
n_bins: usize,
total: usize,
}
#[wasm_bindgen]
impl WasmHistogram {
#[wasm_bindgen(getter)]
pub fn edges(&self) -> Vec<f64> { self.edges.clone() }
#[wasm_bindgen(getter)]
pub fn counts(&self) -> Vec<u32> { self.counts.clone() }
#[wasm_bindgen(getter)]
pub fn density(&self) -> Vec<f64> { self.density.clone() }
#[wasm_bindgen(getter)]
pub fn n_bins(&self) -> usize { self.n_bins }
#[wasm_bindgen(getter)]
pub fn total(&self) -> usize { self.total }
pub fn bin_centers(&self) -> Vec<f64> {
(0..self.n_bins)
.map(|i| (self.edges[i] + self.edges[i + 1]) / 2.0)
.collect()
}
pub fn bin_width(&self) -> f64 {
if self.n_bins == 0 { return f64::NAN; }
self.edges[1] - self.edges[0]
}
}
#[wasm_bindgen]
pub fn wasm_histogram(data: &[f64], n_bins: usize) -> WasmHistogram {
let empty = WasmHistogram { edges: vec![], counts: vec![], density: vec![], n_bins: 0, total: 0 };
if data.is_empty() || n_bins == 0 { return empty; }
let min = data.iter().cloned().fold(f64::INFINITY, f64::min);
let max = data.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
if !min.is_finite() || !max.is_finite() { return empty; }
let (range, adjusted_min) = if (max - min).abs() < f64::EPSILON {
(1.0, min - 0.5)
} else {
(max - min, min)
};
let bin_width = range / n_bins as f64;
let edges: Vec<f64> = (0..=n_bins).map(|i| adjusted_min + i as f64 * bin_width).collect();
let mut counts = vec![0u32; n_bins];
for &v in data {
if v.is_nan() { continue; }
let bin_idx = ((v - adjusted_min) / bin_width).floor() as isize;
let bin_idx = bin_idx.clamp(0, n_bins as isize - 1) as usize;
counts[bin_idx] += 1;
}
let total: usize = counts.iter().map(|&c| c as usize).sum();
let density: Vec<f64> = counts.iter()
.map(|&c| if total > 0 { c as f64 / (total as f64 * bin_width) } else { 0.0 })
.collect();
WasmHistogram { edges, counts, density, n_bins, total }
}
#[wasm_bindgen]
pub fn wasm_spearman_correlation_typed(x: &[f64], y: &[f64]) -> f64 {
let n = x.len();
if n == 0 || n != y.len() { return f64::NAN; }
let rx = rank_vector(x);
let ry = rank_vector(y);
pearson_on_ranks(&rx, &ry)
}
fn quantile_linear(sorted: &[f64], p: f64) -> f64 {
let n = sorted.len();
if n == 0 { return f64::NAN; }
if n == 1 { return sorted[0]; }
let h = p * (n - 1) as f64;
let lo = h.floor() as usize;
let hi = h.ceil() as usize;
if lo == hi { sorted[lo] }
else { sorted[lo] * (1.0 - (h - lo as f64)) + sorted[hi] * (h - lo as f64) }
}
fn normal_cdf_inner(x: f64, mean: f64, std_dev: f64) -> f64 {
let z = (x - mean) / (std_dev * std::f64::consts::SQRT_2);
(1.0 + erf_approx(z)) / 2.0
}
fn erf_approx(x: f64) -> f64 {
let sign = if x < 0.0 { -1.0_f64 } else { 1.0_f64 };
let x = x.abs();
let t = 1.0 / (1.0 + 0.327_591_1 * x);
let poly = t * (0.254_829_592
+ t * (-0.284_496_736
+ t * (1.421_413_741
+ t * (-1.453_152_027
+ t * 1.061_405_429))));
sign * (1.0 - poly * (-x * x).exp())
}
fn regularized_incomplete_beta(x: f64, a: f64, b: f64) -> f64 {
if x <= 0.0 { return 0.0; }
if x >= 1.0 { return 1.0; }
if x > (a + 1.0) / (a + b + 2.0) {
return 1.0 - regularized_incomplete_beta(1.0 - x, b, a);
}
let ln_beta = ln_gamma(a) + ln_gamma(b) - ln_gamma(a + b);
let front = (a * x.ln() + b * (1.0 - x).ln() - ln_beta).exp() / a;
let mut c = 1.0_f64;
let mut d = 1.0 - (a + b) * x / (a + 1.0);
if d.abs() < 1e-30 { d = 1e-30; }
d = 1.0 / d;
let mut f = d;
for m in 1_usize..=200 {
let m_f = m as f64;
let num_e = m_f * (b - m_f) * x / ((a + 2.0 * m_f - 1.0) * (a + 2.0 * m_f));
d = 1.0 + num_e * d; if d.abs() < 1e-30 { d = 1e-30; }
c = 1.0 + num_e / c; if c.abs() < 1e-30 { c = 1e-30; }
d = 1.0 / d; f *= d * c;
let num_o = -(a + m_f) * (a + b + m_f) * x / ((a + 2.0 * m_f) * (a + 2.0 * m_f + 1.0));
d = 1.0 + num_o * d; if d.abs() < 1e-30 { d = 1e-30; }
c = 1.0 + num_o / c; if c.abs() < 1e-30 { c = 1e-30; }
d = 1.0 / d;
let delta = d * c;
f *= delta;
if (delta - 1.0).abs() < 1e-14 { break; }
}
front * f
}
fn ln_gamma(x: f64) -> f64 {
const G: f64 = 7.0;
const C: [f64; 9] = [
0.999_999_999_999_809_93,
676.520_368_121_885_1,
-1259.139_216_722_402_8,
771.323_428_777_653_1,
-176.615_029_162_140_6,
12.507_343_278_686_905,
-0.138_571_095_265_720_12,
9.984_369_578_019_572e-6,
1.505_632_735_149_312e-7,
];
if x < 0.5 {
let v = PI / ((PI * x).sin() * ln_gamma(1.0 - x).exp());
return v.ln();
}
let xm1 = x - 1.0;
let mut a = C[0];
let t = xm1 + G + 0.5;
for (i, &c) in C[1..].iter().enumerate() {
a += c / (xm1 + i as f64 + 1.0);
}
(2.0 * PI).sqrt().ln() + a.ln() + (xm1 + 0.5) * t.ln() - t
}
fn t_two_tailed_pvalue(t_abs: f64, df: f64) -> f64 {
let x = df / (df + t_abs * t_abs);
2.0 * regularized_incomplete_beta(x, df / 2.0, 0.5)
}
fn kolmogorov_pvalue(lambda: f64) -> f64 {
if lambda <= 0.0 { return 1.0; }
let mut p = 0.0_f64;
for k in 1_i64..=100 {
let term = (-2.0 * (k as f64).powi(2) * lambda * lambda).exp();
let sign = if k % 2 == 0 { -1.0_f64 } else { 1.0_f64 };
p += sign * term;
if term < 1e-14 { break; }
}
(2.0 * p).clamp(0.0, 1.0)
}
fn rank_vector(data: &[f64]) -> Vec<f64> {
let n = data.len();
let mut indexed: Vec<(f64, usize)> = data.iter().enumerate().map(|(i, &v)| (v, i)).collect();
indexed.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal));
let mut ranks = vec![0.0_f64; n];
let mut i = 0;
while i < n {
let val = indexed[i].0;
let mut j = i;
while j < n && indexed[j].0 == val { j += 1; }
let avg_rank = (i + j + 1) as f64 / 2.0; for item in indexed[i..j].iter() {
ranks[item.1] = avg_rank;
}
i = j;
}
ranks
}
fn pearson_on_ranks(rx: &[f64], ry: &[f64]) -> f64 {
let n = rx.len();
if n == 0 { return f64::NAN; }
let n_f = n as f64;
let mx = rx.iter().sum::<f64>() / n_f;
let my = ry.iter().sum::<f64>() / n_f;
let mut cov = 0.0_f64;
let mut vx = 0.0_f64;
let mut vy = 0.0_f64;
for i in 0..n {
let dx = rx[i] - mx;
let dy = ry[i] - my;
cov += dx * dy;
vx += dx * dx;
vy += dy * dy;
}
if vx == 0.0 || vy == 0.0 { f64::NAN } else { cov / (vx * vy).sqrt() }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_descriptive_stats_basic() {
let data = [1.0_f64, 2.0, 3.0, 4.0, 5.0];
let ds = compute_descriptive_stats(&data);
assert!((ds.mean() - 3.0).abs() < 1e-10, "mean = {}", ds.mean());
assert!((ds.min() - 1.0).abs() < 1e-10);
assert!((ds.max() - 5.0).abs() < 1e-10);
assert!((ds.median() - 3.0).abs() < 1e-10);
assert_eq!(ds.count(), 5);
assert!((ds.sum() - 15.0).abs() < 1e-10);
}
#[test]
fn test_descriptive_stats_empty() {
let ds = compute_descriptive_stats(&[]);
assert!(ds.mean().is_nan());
assert_eq!(ds.count(), 0);
}
#[test]
fn test_descriptive_stats_skewness_symmetric() {
let data = [1.0_f64, 2.0, 3.0, 4.0, 5.0];
let ds = compute_descriptive_stats(&data);
assert!(ds.skewness().abs() < 1e-10, "skewness = {}", ds.skewness());
}
#[test]
fn test_descriptive_stats_iqr() {
let data = [1.0_f64, 2.0, 3.0, 4.0, 5.0];
let ds = compute_descriptive_stats(&data);
assert!(ds.iqr() > 0.0, "iqr = {}", ds.iqr());
}
#[test]
fn test_linear_regression_typed_perfect() {
let x = [1.0_f64, 2.0, 3.0, 4.0, 5.0];
let y = [2.0_f64, 4.0, 6.0, 8.0, 10.0];
let model = wasm_linear_regression_typed(&x, &y);
assert!((model.slope() - 2.0).abs() < 1e-10, "slope = {}", model.slope());
assert!(model.intercept().abs() < 1e-10, "intercept = {}", model.intercept());
assert!((model.r_squared() - 1.0).abs() < 1e-10, "r2 = {}", model.r_squared());
}
#[test]
fn test_linear_regression_predict() {
let x = [0.0_f64, 1.0, 2.0];
let y = [1.0_f64, 3.0, 5.0];
let model = wasm_linear_regression_typed(&x, &y);
let p = model.predict(3.0);
assert!((p - 7.0).abs() < 1e-8, "predict(3) = {}", p);
}
#[test]
fn test_t_test_one_sample() {
let data = [-0.5_f64, 0.2, 0.1, -0.3, 0.4, 0.0, 0.15, -0.1];
let res = wasm_t_test_one_sample_typed(&data, 0.0);
assert!(!res.t_stat().is_nan());
assert!(res.p_value() > 0.05, "p_value = {}", res.p_value());
}
#[test]
fn test_t_test_two_sample_distinct() {
let x: Vec<f64> = (0..20).map(|i| i as f64 * 0.1).collect();
let y: Vec<f64> = (0..20).map(|i| 10.0 + i as f64 * 0.1).collect();
let res = wasm_t_test_two_sample_typed(&x, &y);
assert!(res.p_value() < 0.001, "p_value = {}", res.p_value());
assert!(res.significant_at_01());
}
#[test]
fn test_ks_test_normality_gaussian() {
let data: Vec<f64> = (0..100).map(|i| (i as f64 - 50.0) * 0.1).collect();
let res = wasm_ks_test_normality(&data);
assert!(!res.statistic().is_nan(), "KS statistic should be computable");
}
#[test]
fn test_ks_test_too_small() {
let res = wasm_ks_test_normality(&[1.0, 2.0]);
assert!(res.statistic().is_nan());
}
#[test]
fn test_histogram_basic() {
let data = [1.0_f64, 2.0, 3.0, 4.0, 5.0];
let h = wasm_histogram(&data, 5);
assert_eq!(h.n_bins(), 5);
assert_eq!(h.edges().len(), 6);
assert_eq!(h.counts().len(), 5);
assert_eq!(h.total(), 5);
}
#[test]
fn test_histogram_empty() {
let h = wasm_histogram(&[], 5);
assert_eq!(h.n_bins(), 0);
}
#[test]
fn test_histogram_density_sums_to_one() {
let data: Vec<f64> = (0..50).map(|i| i as f64).collect();
let h = wasm_histogram(&data, 10);
let bw = h.bin_width();
let area: f64 = h.density().iter().sum::<f64>() * bw;
assert!((area - 1.0).abs() < 1e-10, "density area = {}", area);
}
#[test]
fn test_spearman_perfect_monotone() {
let x = [1.0_f64, 2.0, 3.0, 4.0, 5.0];
let y = [2.0_f64, 4.0, 6.0, 8.0, 10.0];
let rho = wasm_spearman_correlation_typed(&x, &y);
assert!((rho - 1.0).abs() < 1e-10, "rho = {}", rho);
}
#[test]
fn test_spearman_perfect_anti_monotone() {
let x = [1.0_f64, 2.0, 3.0, 4.0, 5.0];
let y = [5.0_f64, 4.0, 3.0, 2.0, 1.0];
let rho = wasm_spearman_correlation_typed(&x, &y);
assert!((rho + 1.0).abs() < 1e-10, "rho = {}", rho);
}
#[test]
fn test_spearman_mismatch() {
let x = [1.0_f64, 2.0];
let y = [3.0_f64];
assert!(wasm_spearman_correlation_typed(&x, &y).is_nan());
}
#[test]
fn test_quantile_edge_cases() {
let sorted = [1.0_f64, 3.0, 5.0];
assert!((quantile_linear(&sorted, 0.0) - 1.0).abs() < 1e-10);
assert!((quantile_linear(&sorted, 1.0) - 5.0).abs() < 1e-10);
assert!((quantile_linear(&sorted, 0.5) - 3.0).abs() < 1e-10);
}
#[test]
fn test_rank_vector_no_ties() {
let data = [3.0_f64, 1.0, 4.0, 1.5, 9.0];
let ranks = rank_vector(&data);
assert!((ranks[0] - 3.0).abs() < 1e-10, "rank[0] = {}", ranks[0]);
assert!((ranks[1] - 1.0).abs() < 1e-10, "rank[1] = {}", ranks[1]);
assert!((ranks[4] - 5.0).abs() < 1e-10, "rank[4] = {}", ranks[4]);
}
#[test]
fn test_rank_vector_ties() {
let data = [1.0_f64, 1.0, 3.0];
let ranks = rank_vector(&data);
assert!((ranks[0] - 1.5).abs() < 1e-10, "rank[0] = {}", ranks[0]);
assert!((ranks[1] - 1.5).abs() < 1e-10, "rank[1] = {}", ranks[1]);
assert!((ranks[2] - 3.0).abs() < 1e-10, "rank[2] = {}", ranks[2]);
}
}