use crate::error::FdarError;
use crate::iter_maybe_parallel;
use crate::linalg::{cholesky_factor, cholesky_forward_back, compute_xtx};
use crate::matrix::FdMatrix;
use crate::regression::fdata_to_pc_1d;
#[cfg(feature = "parallel")]
use rayon::iter::ParallelIterator;
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub struct FosrResult {
pub intercept: Vec<f64>,
pub beta: FdMatrix,
pub fitted: FdMatrix,
pub residuals: FdMatrix,
pub r_squared_t: Vec<f64>,
pub r_squared: f64,
pub beta_se: FdMatrix,
pub lambda: f64,
pub gcv: f64,
}
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub struct FosrFpcResult {
pub intercept: Vec<f64>,
pub beta: FdMatrix,
pub fitted: FdMatrix,
pub residuals: FdMatrix,
pub r_squared_t: Vec<f64>,
pub r_squared: f64,
pub beta_scores: Vec<Vec<f64>>,
pub ncomp: usize,
}
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub struct FanovaResult {
pub group_means: FdMatrix,
pub overall_mean: Vec<f64>,
pub f_statistic_t: Vec<f64>,
pub global_statistic: f64,
pub p_value: f64,
pub n_perm: usize,
pub n_groups: usize,
pub group_labels: Vec<usize>,
}
pub(crate) fn penalty_matrix(m: usize) -> Vec<f64> {
if m < 3 {
return vec![0.0; m * m];
}
let mut dtd = vec![0.0; m * m];
for i in 0..m - 2 {
let coeffs = [(i, 1.0), (i + 1, -2.0), (i + 2, 1.0)];
for &(r, cr) in &coeffs {
for &(c, cc) in &coeffs {
dtd[r * m + c] += cr * cc;
}
}
}
dtd
}
fn penalized_solve(
xtx: &[f64],
xty: &FdMatrix,
penalty: &[f64],
lambda: f64,
) -> Result<FdMatrix, FdarError> {
let p = xty.nrows();
let m = xty.ncols();
let mut a = vec![0.0; p * p];
for i in 0..p * p {
a[i] = xtx[i] + lambda * penalty[i];
}
let l = cholesky_factor(&a, p)?;
let mut beta = FdMatrix::zeros(p, m);
for t in 0..m {
let b: Vec<f64> = (0..p).map(|j| xty[(j, t)]).collect();
let x = cholesky_forward_back(&l, &b, p);
for j in 0..p {
beta[(j, t)] = x[j];
}
}
Ok(beta)
}
pub(crate) fn pointwise_r_squared(data: &FdMatrix, fitted: &FdMatrix) -> Vec<f64> {
let (n, m) = data.shape();
(0..m)
.map(|t| {
let mean_t: f64 = (0..n).map(|i| data[(i, t)]).sum::<f64>() / n as f64;
let ss_tot: f64 = (0..n).map(|i| (data[(i, t)] - mean_t).powi(2)).sum();
let ss_res: f64 = (0..n)
.map(|i| (data[(i, t)] - fitted[(i, t)]).powi(2))
.sum();
if ss_tot > 1e-15 {
1.0 - ss_res / ss_tot
} else {
0.0
}
})
.collect()
}
fn compute_fosr_gcv(residuals: &FdMatrix, trace_h: f64) -> f64 {
let (n, m) = residuals.shape();
let denom = (1.0 - trace_h / n as f64).max(1e-10);
let ss_res: f64 = (0..n)
.flat_map(|i| (0..m).map(move |t| residuals[(i, t)].powi(2)))
.sum();
ss_res / (n as f64 * m as f64 * denom * denom)
}
pub(crate) fn build_fosr_design(predictors: &FdMatrix, n: usize) -> FdMatrix {
let p = predictors.ncols();
let p_total = p + 1;
let mut design = FdMatrix::zeros(n, p_total);
for i in 0..n {
design[(i, 0)] = 1.0;
for j in 0..p {
design[(i, 1 + j)] = predictors[(i, j)];
}
}
design
}
pub(crate) fn compute_xty_matrix(design: &FdMatrix, data: &FdMatrix) -> FdMatrix {
let (n, m) = data.shape();
let p_total = design.ncols();
let mut xty = FdMatrix::zeros(p_total, m);
for j in 0..p_total {
for t in 0..m {
let mut s = 0.0;
for i in 0..n {
s += design[(i, j)] * data[(i, t)];
}
xty[(j, t)] = s;
}
}
xty
}
fn drop_intercept_rows(full: &FdMatrix, p: usize, m: usize) -> FdMatrix {
let mut out = FdMatrix::zeros(p, m);
for j in 0..p {
for t in 0..m {
out[(j, t)] = full[(j + 1, t)];
}
}
out
}
#[must_use = "expensive computation whose result should not be discarded"]
pub fn fosr(data: &FdMatrix, predictors: &FdMatrix, lambda: f64) -> Result<FosrResult, FdarError> {
let (n, m) = data.shape();
let p = predictors.ncols();
if m == 0 {
return Err(FdarError::InvalidDimension {
parameter: "data",
expected: "at least 1 column (grid points)".to_string(),
actual: "0 columns".to_string(),
});
}
if predictors.nrows() != n {
return Err(FdarError::InvalidDimension {
parameter: "predictors",
expected: format!("{n} rows (matching data)"),
actual: format!("{} rows", predictors.nrows()),
});
}
if n < p + 2 {
return Err(FdarError::InvalidDimension {
parameter: "data",
expected: format!("at least {} observations (p + 2)", p + 2),
actual: format!("{n} observations"),
});
}
let design = build_fosr_design(predictors, n);
let p_total = design.ncols();
let xtx = compute_xtx(&design);
let xty = compute_xty_matrix(&design, data);
let penalty = penalty_matrix(p_total);
let lambda = if lambda < 0.0 {
select_lambda_gcv(&xtx, &xty, &penalty, data, &design)
} else {
lambda
};
let beta = penalized_solve(&xtx, &xty, &penalty, lambda)?;
let (fitted, residuals) = compute_fosr_fitted(&design, &beta, data);
let r_squared_t = pointwise_r_squared(data, &fitted);
let r_squared = r_squared_t.iter().sum::<f64>() / m as f64;
let beta_se = compute_beta_se(&xtx, &penalty, lambda, &residuals, p_total, n);
let trace_h = compute_trace_hat(&xtx, &penalty, lambda, p_total, n);
let gcv = compute_fosr_gcv(&residuals, trace_h);
let intercept: Vec<f64> = (0..m).map(|t| beta[(0, t)]).collect();
Ok(FosrResult {
intercept,
beta: drop_intercept_rows(&beta, p, m),
fitted,
residuals,
r_squared_t,
r_squared,
beta_se: drop_intercept_rows(&beta_se, p, m),
lambda,
gcv,
})
}
fn compute_fosr_fitted(
design: &FdMatrix,
beta: &FdMatrix,
data: &FdMatrix,
) -> (FdMatrix, FdMatrix) {
let (n, m) = data.shape();
let p_total = design.ncols();
let rows: Vec<(Vec<f64>, Vec<f64>)> = iter_maybe_parallel!(0..n)
.map(|i| {
let mut fitted_row = vec![0.0; m];
let mut resid_row = vec![0.0; m];
for t in 0..m {
let mut yhat = 0.0;
for j in 0..p_total {
yhat += design[(i, j)] * beta[(j, t)];
}
fitted_row[t] = yhat;
resid_row[t] = data[(i, t)] - yhat;
}
(fitted_row, resid_row)
})
.collect();
let mut fitted = FdMatrix::zeros(n, m);
let mut residuals = FdMatrix::zeros(n, m);
for (i, (fr, rr)) in rows.into_iter().enumerate() {
for t in 0..m {
fitted[(i, t)] = fr[t];
residuals[(i, t)] = rr[t];
}
}
(fitted, residuals)
}
fn select_lambda_gcv(
xtx: &[f64],
xty: &FdMatrix,
penalty: &[f64],
data: &FdMatrix,
design: &FdMatrix,
) -> f64 {
let lambdas = [0.0, 1e-6, 1e-4, 1e-2, 0.1, 1.0, 10.0, 100.0, 1000.0];
let p_total = design.ncols();
let n = design.nrows();
let mut best_lambda = 0.0;
let mut best_gcv = f64::INFINITY;
for &lam in &lambdas {
let Ok(beta) = penalized_solve(xtx, xty, penalty, lam) else {
continue;
};
let (_, residuals) = compute_fosr_fitted(design, &beta, data);
let trace_h = compute_trace_hat(xtx, penalty, lam, p_total, n);
let gcv = compute_fosr_gcv(&residuals, trace_h);
if gcv < best_gcv {
best_gcv = gcv;
best_lambda = lam;
}
}
best_lambda
}
fn compute_trace_hat(xtx: &[f64], penalty: &[f64], lambda: f64, p: usize, n: usize) -> f64 {
let mut a = vec![0.0; p * p];
for i in 0..p * p {
a[i] = xtx[i] + lambda * penalty[i];
}
let Ok(l) = cholesky_factor(&a, p) else {
return p as f64; };
let mut trace = 0.0;
for j in 0..p {
let col: Vec<f64> = (0..p).map(|i| xtx[i * p + j]).collect();
let z = cholesky_forward_back(&l, &col, p);
trace += z[j]; }
trace.min(n as f64)
}
fn compute_beta_se(
xtx: &[f64],
penalty: &[f64],
lambda: f64,
residuals: &FdMatrix,
p: usize,
n: usize,
) -> FdMatrix {
let m = residuals.ncols();
let mut a = vec![0.0; p * p];
for i in 0..p * p {
a[i] = xtx[i] + lambda * penalty[i];
}
let Ok(l) = cholesky_factor(&a, p) else {
return FdMatrix::zeros(p, m);
};
let a_inv_diag: Vec<f64> = (0..p)
.map(|j| {
let mut ej = vec![0.0; p];
ej[j] = 1.0;
let v = cholesky_forward_back(&l, &ej, p);
v[j]
})
.collect();
let df = (n - p).max(1) as f64;
let mut se = FdMatrix::zeros(p, m);
for t in 0..m {
let sigma2_t: f64 = (0..n).map(|i| residuals[(i, t)].powi(2)).sum::<f64>() / df;
for j in 0..p {
se[(j, t)] = (sigma2_t * a_inv_diag[j]).max(0.0).sqrt();
}
}
se
}
fn regress_scores_on_design(
design: &FdMatrix,
scores: &FdMatrix,
n: usize,
k: usize,
p_total: usize,
) -> Result<Vec<Vec<f64>>, FdarError> {
let xtx = compute_xtx(design);
let l = cholesky_factor(&xtx, p_total)?;
let gamma_all: Vec<Vec<f64>> = (0..k)
.map(|comp| {
let mut xts = vec![0.0; p_total];
for j in 0..p_total {
for i in 0..n {
xts[j] += design[(i, j)] * scores[(i, comp)];
}
}
cholesky_forward_back(&l, &xts, p_total)
})
.collect();
Ok(gamma_all)
}
fn reconstruct_beta_fpc(
gamma_all: &[Vec<f64>],
rotation: &FdMatrix,
p: usize,
k: usize,
m: usize,
) -> FdMatrix {
let mut beta = FdMatrix::zeros(p, m);
for j in 0..p {
for t in 0..m {
let mut val = 0.0;
for comp in 0..k {
val += gamma_all[comp][1 + j] * rotation[(t, comp)];
}
beta[(j, t)] = val;
}
}
beta
}
fn compute_intercept_fpc(
mean: &[f64],
gamma_all: &[Vec<f64>],
rotation: &FdMatrix,
k: usize,
m: usize,
) -> Vec<f64> {
let mut intercept = mean.to_vec();
for t in 0..m {
for comp in 0..k {
intercept[t] += gamma_all[comp][0] * rotation[(t, comp)];
}
}
intercept
}
fn extract_beta_scores(gamma_all: &[Vec<f64>], p: usize, k: usize, m: usize) -> Vec<Vec<f64>> {
let h = if m > 1 { 1.0 / (m - 1) as f64 } else { 1.0 };
let score_scale = h.sqrt();
(0..p)
.map(|j| {
(0..k)
.map(|comp| gamma_all[comp][1 + j] * score_scale)
.collect()
})
.collect()
}
#[must_use = "expensive computation whose result should not be discarded"]
pub fn fosr_fpc(
data: &FdMatrix,
predictors: &FdMatrix,
ncomp: usize,
) -> Result<FosrFpcResult, FdarError> {
let (n, m) = data.shape();
let p = predictors.ncols();
if m == 0 {
return Err(FdarError::InvalidDimension {
parameter: "data",
expected: "at least 1 column (grid points)".to_string(),
actual: "0 columns".to_string(),
});
}
if predictors.nrows() != n {
return Err(FdarError::InvalidDimension {
parameter: "predictors",
expected: format!("{n} rows (matching data)"),
actual: format!("{} rows", predictors.nrows()),
});
}
if n < p + 2 {
return Err(FdarError::InvalidDimension {
parameter: "data",
expected: format!("at least {} observations (p + 2)", p + 2),
actual: format!("{n} observations"),
});
}
if ncomp == 0 {
return Err(FdarError::InvalidParameter {
parameter: "ncomp",
message: "number of FPC components must be at least 1".to_string(),
});
}
let argvals: Vec<f64> = (0..m).map(|j| j as f64 / (m - 1).max(1) as f64).collect();
let fpca = fdata_to_pc_1d(data, ncomp, &argvals)?;
let k = fpca.scores.ncols();
let p_total = p + 1;
let design = build_fosr_design(predictors, n);
let gamma_all = regress_scores_on_design(&design, &fpca.scores, n, k, p_total)?;
let beta = reconstruct_beta_fpc(&gamma_all, &fpca.rotation, p, k, m);
let intercept = compute_intercept_fpc(&fpca.mean, &gamma_all, &fpca.rotation, k, m);
let (fitted, residuals) = compute_fosr_fpc_fitted(data, &intercept, &beta, predictors);
let r_squared_t = pointwise_r_squared(data, &fitted);
let r_squared = r_squared_t.iter().sum::<f64>() / m as f64;
let beta_scores = extract_beta_scores(&gamma_all, p, k, m);
Ok(FosrFpcResult {
intercept,
beta,
fitted,
residuals,
r_squared_t,
r_squared,
beta_scores,
ncomp: k,
})
}
fn compute_fosr_fpc_fitted(
data: &FdMatrix,
intercept: &[f64],
beta: &FdMatrix,
predictors: &FdMatrix,
) -> (FdMatrix, FdMatrix) {
let (n, m) = data.shape();
let p = predictors.ncols();
let mut fitted = FdMatrix::zeros(n, m);
let mut residuals = FdMatrix::zeros(n, m);
for i in 0..n {
for t in 0..m {
let mut yhat = intercept[t];
for j in 0..p {
yhat += predictors[(i, j)] * beta[(j, t)];
}
fitted[(i, t)] = yhat;
residuals[(i, t)] = data[(i, t)] - yhat;
}
}
(fitted, residuals)
}
#[must_use = "prediction result should not be discarded"]
pub fn predict_fosr(result: &FosrResult, new_predictors: &FdMatrix) -> FdMatrix {
let n_new = new_predictors.nrows();
let m = result.intercept.len();
let p = result.beta.nrows();
let mut predicted = FdMatrix::zeros(n_new, m);
for i in 0..n_new {
for t in 0..m {
let mut yhat = result.intercept[t];
for j in 0..p {
yhat += new_predictors[(i, j)] * result.beta[(j, t)];
}
predicted[(i, t)] = yhat;
}
}
predicted
}
fn compute_group_means(
data: &FdMatrix,
groups: &[usize],
labels: &[usize],
) -> (FdMatrix, Vec<f64>) {
let (n, m) = data.shape();
let k = labels.len();
let mut group_means = FdMatrix::zeros(k, m);
let mut counts = vec![0usize; k];
for i in 0..n {
let g = labels.iter().position(|&l| l == groups[i]).unwrap_or(0);
counts[g] += 1;
for t in 0..m {
group_means[(g, t)] += data[(i, t)];
}
}
for g in 0..k {
if counts[g] > 0 {
for t in 0..m {
group_means[(g, t)] /= counts[g] as f64;
}
}
}
let overall_mean: Vec<f64> = (0..m)
.map(|t| (0..n).map(|i| data[(i, t)]).sum::<f64>() / n as f64)
.collect();
(group_means, overall_mean)
}
fn pointwise_f_statistic(
data: &FdMatrix,
groups: &[usize],
labels: &[usize],
group_means: &FdMatrix,
overall_mean: &[f64],
) -> Vec<f64> {
let (n, m) = data.shape();
let k = labels.len();
let mut counts = vec![0usize; k];
for &g in groups {
let idx = labels.iter().position(|&l| l == g).unwrap_or(0);
counts[idx] += 1;
}
(0..m)
.map(|t| {
let ss_between: f64 = (0..k)
.map(|g| counts[g] as f64 * (group_means[(g, t)] - overall_mean[t]).powi(2))
.sum();
let ss_within: f64 = (0..n)
.map(|i| {
let g = labels.iter().position(|&l| l == groups[i]).unwrap_or(0);
(data[(i, t)] - group_means[(g, t)]).powi(2)
})
.sum();
let ms_between = ss_between / (k as f64 - 1.0).max(1.0);
let ms_within = ss_within / (n as f64 - k as f64).max(1.0);
if ms_within > 1e-15 {
ms_between / ms_within
} else {
0.0
}
})
.collect()
}
fn global_f_statistic(f_t: &[f64]) -> f64 {
f_t.iter().sum::<f64>() / f_t.len() as f64
}
#[must_use = "expensive computation whose result should not be discarded"]
pub fn fanova(data: &FdMatrix, groups: &[usize], n_perm: usize) -> Result<FanovaResult, FdarError> {
let (n, m) = data.shape();
if m == 0 {
return Err(FdarError::InvalidDimension {
parameter: "data",
expected: "at least 1 column (grid points)".to_string(),
actual: "0 columns".to_string(),
});
}
if groups.len() != n {
return Err(FdarError::InvalidDimension {
parameter: "groups",
expected: format!("{n} elements (matching data rows)"),
actual: format!("{} elements", groups.len()),
});
}
if n < 3 {
return Err(FdarError::InvalidDimension {
parameter: "data",
expected: "at least 3 observations".to_string(),
actual: format!("{n} observations"),
});
}
let mut labels: Vec<usize> = groups.to_vec();
labels.sort_unstable();
labels.dedup();
let n_groups = labels.len();
if n_groups < 2 {
return Err(FdarError::InvalidParameter {
parameter: "groups",
message: format!("at least 2 distinct groups required, but only {n_groups} found"),
});
}
let (group_means, overall_mean) = compute_group_means(data, groups, &labels);
let f_t = pointwise_f_statistic(data, groups, &labels, &group_means, &overall_mean);
let observed_stat = global_f_statistic(&f_t);
let n_perm = n_perm.max(1);
let mut n_ge = 0usize;
let mut perm_groups = groups.to_vec();
let mut rng_state: u64 = 42;
for _ in 0..n_perm {
for i in (1..n).rev() {
rng_state = rng_state
.wrapping_mul(6_364_136_223_846_793_005)
.wrapping_add(1);
let j = (rng_state >> 33) as usize % (i + 1);
perm_groups.swap(i, j);
}
let (perm_means, perm_overall) = compute_group_means(data, &perm_groups, &labels);
let perm_f = pointwise_f_statistic(data, &perm_groups, &labels, &perm_means, &perm_overall);
let perm_stat = global_f_statistic(&perm_f);
if perm_stat >= observed_stat {
n_ge += 1;
}
}
let p_value = (n_ge as f64 + 1.0) / (n_perm as f64 + 1.0);
Ok(FanovaResult {
group_means,
overall_mean,
f_statistic_t: f_t,
global_statistic: observed_stat,
p_value,
n_perm,
n_groups,
group_labels: labels,
})
}
impl FosrResult {
pub fn predict(&self, new_predictors: &FdMatrix) -> FdMatrix {
predict_fosr(self, new_predictors)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_helpers::uniform_grid;
use std::f64::consts::PI;
fn generate_fosr_data(n: usize, m: usize) -> (FdMatrix, FdMatrix) {
let t = uniform_grid(m);
let mut y = FdMatrix::zeros(n, m);
let mut z = FdMatrix::zeros(n, 2);
for i in 0..n {
let age = (i as f64) / (n as f64);
let group = if i % 2 == 0 { 1.0 } else { 0.0 };
z[(i, 0)] = age;
z[(i, 1)] = group;
for j in 0..m {
let mu = (2.0 * PI * t[j]).sin();
let beta1 = t[j]; let beta2 = (4.0 * PI * t[j]).cos(); y[(i, j)] = mu
+ age * beta1
+ group * beta2
+ 0.05 * ((i * 13 + j * 7) % 100) as f64 / 100.0;
}
}
(y, z)
}
#[test]
fn test_fosr_basic() {
let (y, z) = generate_fosr_data(30, 50);
let result = fosr(&y, &z, 0.0);
assert!(result.is_ok());
let fit = result.unwrap();
assert_eq!(fit.intercept.len(), 50);
assert_eq!(fit.beta.shape(), (2, 50));
assert_eq!(fit.fitted.shape(), (30, 50));
assert_eq!(fit.residuals.shape(), (30, 50));
assert!(fit.r_squared >= 0.0);
}
#[test]
fn test_fosr_with_penalty() {
let (y, z) = generate_fosr_data(30, 50);
let fit0 = fosr(&y, &z, 0.0).unwrap();
let fit1 = fosr(&y, &z, 1.0).unwrap();
assert_eq!(fit0.beta.shape(), (2, 50));
assert_eq!(fit1.beta.shape(), (2, 50));
}
#[test]
fn test_fosr_auto_lambda() {
let (y, z) = generate_fosr_data(30, 50);
let fit = fosr(&y, &z, -1.0).unwrap();
assert!(fit.lambda >= 0.0);
}
#[test]
fn test_fosr_fitted_plus_residuals_equals_y() {
let (y, z) = generate_fosr_data(30, 50);
let fit = fosr(&y, &z, 0.0).unwrap();
for i in 0..30 {
for t in 0..50 {
let reconstructed = fit.fitted[(i, t)] + fit.residuals[(i, t)];
assert!(
(reconstructed - y[(i, t)]).abs() < 1e-10,
"ŷ + r should equal y at ({}, {})",
i,
t
);
}
}
}
#[test]
fn test_fosr_pointwise_r_squared_valid() {
let (y, z) = generate_fosr_data(30, 50);
let fit = fosr(&y, &z, 0.0).unwrap();
for &r2 in &fit.r_squared_t {
assert!(
(-0.01..=1.0 + 1e-10).contains(&r2),
"R²(t) out of range: {}",
r2
);
}
}
#[test]
fn test_fosr_se_positive() {
let (y, z) = generate_fosr_data(30, 50);
let fit = fosr(&y, &z, 0.0).unwrap();
for j in 0..2 {
for t in 0..50 {
assert!(
fit.beta_se[(j, t)] >= 0.0 && fit.beta_se[(j, t)].is_finite(),
"SE should be non-negative finite"
);
}
}
}
#[test]
fn test_fosr_invalid_input() {
let y = FdMatrix::zeros(2, 50);
let z = FdMatrix::zeros(2, 1);
assert!(fosr(&y, &z, 0.0).is_err());
}
#[test]
fn test_predict_fosr_on_training_data() {
let (y, z) = generate_fosr_data(30, 50);
let fit = fosr(&y, &z, 0.0).unwrap();
let preds = predict_fosr(&fit, &z);
assert_eq!(preds.shape(), (30, 50));
for i in 0..30 {
for t in 0..50 {
assert!(
(preds[(i, t)] - fit.fitted[(i, t)]).abs() < 1e-8,
"Prediction on training data should match fitted"
);
}
}
}
#[test]
fn test_fanova_two_groups() {
let n = 40;
let m = 50;
let t = uniform_grid(m);
let mut data = FdMatrix::zeros(n, m);
let mut groups = vec![0usize; n];
for i in 0..n {
groups[i] = if i < n / 2 { 0 } else { 1 };
for j in 0..m {
let base = (2.0 * PI * t[j]).sin();
let effect = if groups[i] == 1 { 0.5 * t[j] } else { 0.0 };
data[(i, j)] = base + effect + 0.01 * (i as f64 * 0.1).sin();
}
}
let result = fanova(&data, &groups, 200);
assert!(result.is_ok());
let res = result.unwrap();
assert_eq!(res.n_groups, 2);
assert_eq!(res.group_means.shape(), (2, m));
assert_eq!(res.f_statistic_t.len(), m);
assert!(res.p_value >= 0.0 && res.p_value <= 1.0);
assert!(
res.p_value < 0.1,
"Should detect group effect, got p={}",
res.p_value
);
}
#[test]
fn test_fanova_no_effect() {
let n = 40;
let m = 50;
let t = uniform_grid(m);
let mut data = FdMatrix::zeros(n, m);
let mut groups = vec![0usize; n];
for i in 0..n {
groups[i] = if i < n / 2 { 0 } else { 1 };
for j in 0..m {
data[(i, j)] =
(2.0 * PI * t[j]).sin() + 0.1 * ((i * 7 + j * 3) % 100) as f64 / 100.0;
}
}
let result = fanova(&data, &groups, 200);
assert!(result.is_ok());
let res = result.unwrap();
assert!(
res.p_value > 0.05,
"Should not detect effect, got p={}",
res.p_value
);
}
#[test]
fn test_fanova_three_groups() {
let n = 30;
let m = 50;
let t = uniform_grid(m);
let mut data = FdMatrix::zeros(n, m);
let mut groups = vec![0usize; n];
for i in 0..n {
groups[i] = i % 3;
for j in 0..m {
let effect = match groups[i] {
0 => 0.0,
1 => 0.5 * t[j],
_ => -0.3 * (2.0 * PI * t[j]).cos(),
};
data[(i, j)] = (2.0 * PI * t[j]).sin() + effect + 0.01 * (i as f64 * 0.1).sin();
}
}
let result = fanova(&data, &groups, 200);
assert!(result.is_ok());
let res = result.unwrap();
assert_eq!(res.n_groups, 3);
}
#[test]
fn test_fanova_invalid_input() {
let data = FdMatrix::zeros(10, 50);
let groups = vec![0; 10]; assert!(fanova(&data, &groups, 100).is_err());
let groups = vec![0; 5]; assert!(fanova(&data, &groups, 100).is_err());
}
}