use crate::error::{Result, TimeSeriesError};
use crate::functional::basis::{evaluate_basis_matrix, BasisSystem, BSplineBasis};
use crate::functional::smoothing::{FunctionalData, PenalizedLeastSquares, solve_linear_system};
use scirs2_core::ndarray::{s, Array1, Array2, Axis};
use scirs2_linalg::eigh;
#[derive(Debug, Clone)]
pub struct VarianceExplained {
pub eigenvalues: Array1<f64>,
pub proportion: Array1<f64>,
pub cumulative: Array1<f64>,
}
impl VarianceExplained {
pub fn from_eigenvalues(eigenvalues: &Array1<f64>) -> Self {
let total: f64 = eigenvalues.iter().map(|&v| v.max(0.0)).sum();
let k = eigenvalues.len();
let mut proportion = Array1::zeros(k);
let mut cumulative = Array1::zeros(k);
if total > 0.0 {
let mut cum = 0.0;
for (i, &ev) in eigenvalues.iter().enumerate() {
let p = ev.max(0.0) / total;
proportion[i] = p;
cum += p;
cumulative[i] = cum;
}
}
Self {
eigenvalues: eigenvalues.clone(),
proportion,
cumulative,
}
}
pub fn n_components_for_threshold(&self, threshold: f64) -> usize {
for (i, &c) in self.cumulative.iter().enumerate() {
if c >= threshold {
return i + 1;
}
}
self.eigenvalues.len()
}
}
#[derive(Debug, Clone)]
pub struct FPCAResult<B: BasisSystem + Clone> {
pub eigenvalues: Array1<f64>,
pub eigenfunctions: Vec<FunctionalData<B>>,
pub scores: Array2<f64>,
pub mean_coefficients: Array1<f64>,
pub variance_explained: VarianceExplained,
pub n_components: usize,
}
impl<B: BasisSystem + Clone> FPCAResult<B> {
pub fn reconstruct(
&self,
obs_idx: usize,
n_comp: usize,
basis: &B,
) -> Result<FunctionalData<B>> {
if obs_idx >= self.scores.nrows() {
return Err(TimeSeriesError::InvalidInput(format!(
"obs_idx {} out of range (n_obs={})",
obs_idx,
self.scores.nrows()
)));
}
let n_comp = n_comp.min(self.n_components);
let k = basis.n_basis();
let mut coeff = self.mean_coefficients.clone();
for c in 0..n_comp {
let score = self.scores[[obs_idx, c]];
let ef_coeff = &self.eigenfunctions[c].coefficients;
for j in 0..k {
coeff[j] += score * ef_coeff[j];
}
}
FunctionalData::new(basis.clone(), coeff)
}
pub fn transform(&self, x_new_coeff: &Array2<f64>) -> Result<Array2<f64>> {
let n_new = x_new_coeff.nrows();
let k = x_new_coeff.ncols();
if k != self.mean_coefficients.len() {
return Err(TimeSeriesError::DimensionMismatch {
expected: self.mean_coefficients.len(),
actual: k,
});
}
let mut scores = Array2::zeros((n_new, self.n_components));
for i in 0..n_new {
let centered: Vec<f64> = (0..k)
.map(|j| x_new_coeff[[i, j]] - self.mean_coefficients[j])
.collect();
let centered_arr = Array1::from(centered);
for c in 0..self.n_components {
scores[[i, c]] = centered_arr.dot(&self.eigenfunctions[c].coefficients);
}
}
Ok(scores)
}
}
#[derive(Debug, Clone)]
pub struct FPCAConfig {
pub n_components: Option<usize>,
pub smooth_lambda: Option<f64>,
pub n_interior_knots: usize,
pub spline_order: usize,
pub cov_regularization: f64,
pub center: bool,
}
impl Default for FPCAConfig {
fn default() -> Self {
Self {
n_components: None,
smooth_lambda: None,
n_interior_knots: 15,
spline_order: 4,
cov_regularization: 1e-8,
center: true,
}
}
}
#[derive(Debug, Clone)]
pub struct FPCA {
pub config: FPCAConfig,
}
impl FPCA {
pub fn new() -> Self {
Self {
config: FPCAConfig::default(),
}
}
pub fn with_config(config: FPCAConfig) -> Self {
Self { config }
}
pub fn fit(
&self,
t_list: &[Array1<f64>],
y_list: &[Array1<f64>],
) -> Result<FPCAResult<BSplineBasis>> {
let n = t_list.len();
if n < 2 {
return Err(TimeSeriesError::InsufficientData {
message: "FPCA requires at least 2 observations".to_string(),
required: 2,
actual: n,
});
}
if y_list.len() != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: y_list.len(),
});
}
let basis = BSplineBasis::uniform(self.config.n_interior_knots, self.config.spline_order)?;
let k = basis.n_basis();
let pls = PenalizedLeastSquares {
lambda: self.config.smooth_lambda,
penalty_order: 2,
n_lambda_grid: 30,
lambda_min: 1e-10,
lambda_max: 1e4,
};
let mut coeff_matrix = Array2::zeros((n, k));
for (i, (t, y)) in t_list.iter().zip(y_list.iter()).enumerate() {
let fd = pls.fit(basis.clone(), t, y)?;
for j in 0..k {
coeff_matrix[[i, j]] = fd.coefficients[j];
}
}
let mut mean_coeff = Array1::zeros(k);
if self.config.center {
for j in 0..k {
let col_sum: f64 = (0..n).map(|i| coeff_matrix[[i, j]]).sum();
mean_coeff[j] = col_sum / n as f64;
}
}
let mut c_centered = coeff_matrix.clone();
if self.config.center {
for i in 0..n {
for j in 0..k {
c_centered[[i, j]] -= mean_coeff[j];
}
}
}
let n_f = if n > 1 { (n - 1) as f64 } else { 1.0 };
let cov = c_centered.t().dot(&c_centered) / n_f;
let gram = basis.gram_matrix()?;
let eig_result = solve_generalized_eig(&cov, &gram, self.config.cov_regularization)?;
let (eigenvalues_all, eigenvecs_all) = eig_result;
let n_comp = match self.config.n_components {
Some(nc) => nc.min(eigenvalues_all.len()),
None => eigenvalues_all
.iter()
.filter(|&&ev| ev > 0.0)
.count()
.max(1),
};
let mut eigenfunctions = Vec::with_capacity(n_comp);
for c in 0..n_comp {
let eig_coeff = eigenvecs_all.column(c).to_owned();
let norm_sq = eig_coeff.dot(&gram.dot(&eig_coeff));
let norm = norm_sq.max(0.0).sqrt();
let normalized = if norm > 1e-15 {
eig_coeff.mapv(|x| x / norm)
} else {
eig_coeff
};
eigenfunctions.push(FunctionalData::new(basis.clone(), normalized)?);
}
let mut scores = Array2::zeros((n, n_comp));
for i in 0..n {
let ci: Array1<f64> = c_centered.row(i).to_owned();
let g_ci = gram.dot(&ci);
for c in 0..n_comp {
scores[[i, c]] = g_ci.dot(&eigenfunctions[c].coefficients);
}
}
let eigenvalues = Array1::from_vec(eigenvalues_all[..n_comp].to_vec());
let variance_explained = VarianceExplained::from_eigenvalues(&eigenvalues);
Ok(FPCAResult {
eigenvalues,
eigenfunctions,
scores,
mean_coefficients: mean_coeff,
variance_explained,
n_components: n_comp,
})
}
}
impl Default for FPCA {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone)]
pub struct BivariateFPCAResult<B: BasisSystem + Clone> {
pub eigenvalues: Array1<f64>,
pub eigenfunctions_1: Vec<FunctionalData<B>>,
pub eigenfunctions_2: Vec<FunctionalData<B>>,
pub scores: Array2<f64>,
pub mean_coeff_1: Array1<f64>,
pub mean_coeff_2: Array1<f64>,
pub variance_explained: VarianceExplained,
pub n_components: usize,
}
#[derive(Debug, Clone)]
pub struct BivariateFPCA {
pub config: FPCAConfig,
}
impl BivariateFPCA {
pub fn new() -> Self {
Self {
config: FPCAConfig::default(),
}
}
pub fn fit(
&self,
t_list_1: &[Array1<f64>],
y_list_1: &[Array1<f64>],
t_list_2: &[Array1<f64>],
y_list_2: &[Array1<f64>],
) -> Result<BivariateFPCAResult<BSplineBasis>> {
let n = t_list_1.len();
if n < 2 || t_list_2.len() != n {
return Err(TimeSeriesError::InsufficientData {
message: "BivariateFPCA requires at least 2 paired observations".to_string(),
required: 2,
actual: n,
});
}
let basis = BSplineBasis::uniform(self.config.n_interior_knots, self.config.spline_order)?;
let k = basis.n_basis();
let pls = PenalizedLeastSquares {
lambda: self.config.smooth_lambda,
penalty_order: 2,
n_lambda_grid: 30,
lambda_min: 1e-10,
lambda_max: 1e4,
};
let mut coeff1 = Array2::zeros((n, k));
let mut coeff2 = Array2::zeros((n, k));
for i in 0..n {
let fd1 = pls.fit(basis.clone(), &t_list_1[i], &y_list_1[i])?;
let fd2 = pls.fit(basis.clone(), &t_list_2[i], &y_list_2[i])?;
for j in 0..k {
coeff1[[i, j]] = fd1.coefficients[j];
coeff2[[i, j]] = fd2.coefficients[j];
}
}
let mut mean1 = Array1::zeros(k);
let mut mean2 = Array1::zeros(k);
for j in 0..k {
mean1[j] = (0..n).map(|i| coeff1[[i, j]]).sum::<f64>() / n as f64;
mean2[j] = (0..n).map(|i| coeff2[[i, j]]).sum::<f64>() / n as f64;
}
for i in 0..n {
for j in 0..k {
coeff1[[i, j]] -= mean1[j];
coeff2[[i, j]] -= mean2[j];
}
}
let n_f = if n > 1 { (n - 1) as f64 } else { 1.0 };
let c11 = coeff1.t().dot(&coeff1) / n_f;
let c12 = coeff1.t().dot(&coeff2) / n_f;
let c21 = coeff2.t().dot(&coeff1) / n_f;
let c22 = coeff2.t().dot(&coeff2) / n_f;
let mut block_cov = Array2::zeros((2 * k, 2 * k));
for i in 0..k {
for j in 0..k {
block_cov[[i, j]] = c11[[i, j]];
block_cov[[i, j + k]] = c12[[i, j]];
block_cov[[i + k, j]] = c21[[i, j]];
block_cov[[i + k, j + k]] = c22[[i, j]];
}
}
let gram = basis.gram_matrix()?;
let mut block_gram = Array2::zeros((2 * k, 2 * k));
for i in 0..k {
for j in 0..k {
block_gram[[i, j]] = gram[[i, j]];
block_gram[[i + k, j + k]] = gram[[i, j]];
}
}
let (eigenvalues_all, eigenvecs_all) =
solve_generalized_eig(&block_cov, &block_gram, self.config.cov_regularization)?;
let n_comp = match self.config.n_components {
Some(nc) => nc.min(eigenvalues_all.len()),
None => eigenvalues_all.iter().filter(|&&ev| ev > 0.0).count().max(1),
};
let mut eigenfunctions_1 = Vec::with_capacity(n_comp);
let mut eigenfunctions_2 = Vec::with_capacity(n_comp);
for c in 0..n_comp {
let ev = eigenvecs_all.column(c).to_owned();
let v1: Array1<f64> = ev.slice(s![..k]).to_owned();
let v2: Array1<f64> = ev.slice(s![k..]).to_owned();
let norm1_sq = v1.dot(&gram.dot(&v1));
let norm2_sq = v2.dot(&gram.dot(&v2));
let joint_norm = (norm1_sq + norm2_sq).sqrt().max(1e-15);
let nv1 = v1.mapv(|x| x / joint_norm);
let nv2 = v2.mapv(|x| x / joint_norm);
eigenfunctions_1.push(FunctionalData::new(basis.clone(), nv1)?);
eigenfunctions_2.push(FunctionalData::new(basis.clone(), nv2)?);
}
let mut scores = Array2::zeros((n, n_comp));
for i in 0..n {
let ci1: Array1<f64> = coeff1.row(i).to_owned();
let ci2: Array1<f64> = coeff2.row(i).to_owned();
let gci1 = gram.dot(&ci1);
let gci2 = gram.dot(&ci2);
for c in 0..n_comp {
let s1 = gci1.dot(&eigenfunctions_1[c].coefficients);
let s2 = gci2.dot(&eigenfunctions_2[c].coefficients);
scores[[i, c]] = s1 + s2;
}
}
let eigenvalues = Array1::from_vec(eigenvalues_all[..n_comp].to_vec());
let variance_explained = VarianceExplained::from_eigenvalues(&eigenvalues);
Ok(BivariateFPCAResult {
eigenvalues,
eigenfunctions_1,
eigenfunctions_2,
scores,
mean_coeff_1: mean1,
mean_coeff_2: mean2,
variance_explained,
n_components: n_comp,
})
}
}
impl Default for BivariateFPCA {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone)]
pub struct MultilevelFPCAResult<B: BasisSystem + Clone> {
pub eigenvalues_between: Array1<f64>,
pub eigenvalues_within: Array1<f64>,
pub eigenfunctions_between: Vec<FunctionalData<B>>,
pub eigenfunctions_within: Vec<FunctionalData<B>>,
pub scores_between: Array2<f64>,
pub scores_within: Array2<f64>,
pub subject_mean_coefficients: Array2<f64>,
pub grand_mean_coefficients: Array1<f64>,
}
#[derive(Debug, Clone)]
pub struct MultilevelFPCA {
pub config: FPCAConfig,
pub n_comp_between: Option<usize>,
pub n_comp_within: Option<usize>,
}
impl MultilevelFPCA {
pub fn new() -> Self {
Self {
config: FPCAConfig::default(),
n_comp_between: None,
n_comp_within: None,
}
}
pub fn fit(
&self,
subjects: &[Vec<(Array1<f64>, Array1<f64>)>],
) -> Result<MultilevelFPCAResult<BSplineBasis>> {
let n_subjects = subjects.len();
if n_subjects < 2 {
return Err(TimeSeriesError::InsufficientData {
message: "MultilevelFPCA requires at least 2 subjects".to_string(),
required: 2,
actual: n_subjects,
});
}
let basis =
BSplineBasis::uniform(self.config.n_interior_knots, self.config.spline_order)?;
let k = basis.n_basis();
let pls = PenalizedLeastSquares {
lambda: self.config.smooth_lambda,
penalty_order: 2,
n_lambda_grid: 20,
lambda_min: 1e-10,
lambda_max: 1e4,
};
let mut all_coeffs: Vec<Array1<f64>> = Vec::new();
let mut subject_indices: Vec<usize> = Vec::new();
let mut n_per_subject: Vec<usize> = Vec::new();
for (i, subject_data) in subjects.iter().enumerate() {
if subject_data.is_empty() {
return Err(TimeSeriesError::InvalidInput(format!(
"Subject {} has no observations",
i
)));
}
for (t, y) in subject_data.iter() {
let fd = pls.fit(basis.clone(), t, y)?;
all_coeffs.push(fd.coefficients);
subject_indices.push(i);
}
n_per_subject.push(subject_data.len());
}
let n_total = all_coeffs.len();
let mut coeff_matrix = Array2::zeros((n_total, k));
for (i, c) in all_coeffs.iter().enumerate() {
for j in 0..k {
coeff_matrix[[i, j]] = c[j];
}
}
let grand_mean: Array1<f64> = coeff_matrix
.mean_axis(Axis(0))
.ok_or_else(|| TimeSeriesError::ComputationError("mean computation failed".to_string()))?;
let mut subject_means = Array2::zeros((n_subjects, k));
for i in 0..n_subjects {
let indices: Vec<usize> = subject_indices
.iter()
.enumerate()
.filter(|(_, &s)| s == i)
.map(|(idx, _)| idx)
.collect();
for j in 0..k {
let sum: f64 = indices.iter().map(|&idx| coeff_matrix[[idx, j]]).sum();
subject_means[[i, j]] = sum / indices.len() as f64;
}
}
let mut between_devs = Array2::zeros((n_subjects, k));
for i in 0..n_subjects {
for j in 0..k {
between_devs[[i, j]] = subject_means[[i, j]] - grand_mean[j];
}
}
let mut within_devs = Array2::zeros((n_total, k));
for (obs_idx, &subj_idx) in subject_indices.iter().enumerate() {
for j in 0..k {
within_devs[[obs_idx, j]] =
coeff_matrix[[obs_idx, j]] - subject_means[[subj_idx, j]];
}
}
let gram = basis.gram_matrix()?;
let ns_f = if n_subjects > 1 {
(n_subjects - 1) as f64
} else {
1.0
};
let cov_between = between_devs.t().dot(&between_devs) / ns_f;
let (ev_between, vecs_between) =
solve_generalized_eig(&cov_between, &gram, self.config.cov_regularization)?;
let n_comp_b = match self.n_comp_between {
Some(nc) => nc.min(ev_between.len()),
None => ev_between.iter().filter(|&&v| v > 0.0).count().max(1),
};
let nw_f = if n_total > n_subjects {
(n_total - n_subjects) as f64
} else {
1.0
};
let cov_within = within_devs.t().dot(&within_devs) / nw_f;
let (ev_within, vecs_within) =
solve_generalized_eig(&cov_within, &gram, self.config.cov_regularization)?;
let n_comp_w = match self.n_comp_within {
Some(nc) => nc.min(ev_within.len()),
None => ev_within.iter().filter(|&&v| v > 0.0).count().max(1),
};
let mut ef_between = Vec::with_capacity(n_comp_b);
for c in 0..n_comp_b {
let v = vecs_between.column(c).to_owned();
let norm = v.dot(&gram.dot(&v)).max(0.0).sqrt().max(1e-15);
let nv = v.mapv(|x| x / norm);
ef_between.push(FunctionalData::new(basis.clone(), nv)?);
}
let mut ef_within = Vec::with_capacity(n_comp_w);
for c in 0..n_comp_w {
let v = vecs_within.column(c).to_owned();
let norm = v.dot(&gram.dot(&v)).max(0.0).sqrt().max(1e-15);
let nv = v.mapv(|x| x / norm);
ef_within.push(FunctionalData::new(basis.clone(), nv)?);
}
let mut scores_between = Array2::zeros((n_subjects, n_comp_b));
for i in 0..n_subjects {
let dev: Array1<f64> = between_devs.row(i).to_owned();
let gdev = gram.dot(&dev);
for c in 0..n_comp_b {
scores_between[[i, c]] = gdev.dot(&ef_between[c].coefficients);
}
}
let mut scores_within = Array2::zeros((n_total, n_comp_w));
for i in 0..n_total {
let dev: Array1<f64> = within_devs.row(i).to_owned();
let gdev = gram.dot(&dev);
for c in 0..n_comp_w {
scores_within[[i, c]] = gdev.dot(&ef_within[c].coefficients);
}
}
Ok(MultilevelFPCAResult {
eigenvalues_between: Array1::from_vec(ev_between[..n_comp_b].to_vec()),
eigenvalues_within: Array1::from_vec(ev_within[..n_comp_w].to_vec()),
eigenfunctions_between: ef_between,
eigenfunctions_within: ef_within,
scores_between,
scores_within,
subject_mean_coefficients: subject_means,
grand_mean_coefficients: grand_mean,
})
}
}
impl Default for MultilevelFPCA {
fn default() -> Self {
Self::new()
}
}
fn solve_generalized_eig(
a: &Array2<f64>,
b: &Array2<f64>,
reg: f64,
) -> Result<(Vec<f64>, Array2<f64>)> {
let k = a.nrows();
assert_eq!(k, b.nrows());
let mut b_reg = b.clone();
for i in 0..k {
b_reg[[i, i]] += reg;
}
let l = cholesky_lower(&b_reg)?;
let l_inv = lower_triangular_inv(&l)?;
let a_l_inv_t = a.dot(&l_inv.t());
let m = l_inv.dot(&a_l_inv_t);
let m_sym = (&m + &m.t()) / 2.0;
let (eigenvalues_raw, eigenvecs_raw) = eigh(&m_sym.view(), None).map_err(|e| {
TimeSeriesError::ComputationError(format!("Eigenvalue decomposition failed: {}", e))
})?;
let eigenvecs = l_inv.t().dot(&eigenvecs_raw);
let mut indexed: Vec<(usize, f64)> = eigenvalues_raw
.iter()
.enumerate()
.map(|(i, &ev)| (i, ev))
.collect();
indexed.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
let sorted_eigenvalues: Vec<f64> = indexed.iter().map(|(_, ev)| *ev).collect();
let mut sorted_eigenvecs = Array2::zeros((k, k));
for (new_col, &(old_col, _)) in indexed.iter().enumerate() {
let col = eigenvecs.column(old_col).to_owned();
for i in 0..k {
sorted_eigenvecs[[i, new_col]] = col[i];
}
}
Ok((sorted_eigenvalues, sorted_eigenvecs))
}
fn cholesky_lower(a: &Array2<f64>) -> Result<Array2<f64>> {
let n = a.nrows();
let mut l = Array2::zeros((n, n));
for i in 0..n {
for j in 0..=i {
let mut sum = a[[i, j]];
for p in 0..j {
sum -= l[[i, p]] * l[[j, p]];
}
if i == j {
if sum <= 0.0 {
sum = 1e-12;
}
l[[i, j]] = sum.sqrt();
} else {
l[[i, j]] = sum / l[[j, j]];
}
}
}
Ok(l)
}
fn lower_triangular_inv(l: &Array2<f64>) -> Result<Array2<f64>> {
let n = l.nrows();
let mut inv = Array2::zeros((n, n));
for j in 0..n {
inv[[j, j]] = 1.0 / l[[j, j]].max(1e-15).min(-1e-15).abs().max(1e-15);
inv[[j, j]] = if l[[j, j]].abs() > 1e-15 {
1.0 / l[[j, j]]
} else {
0.0
};
for i in (j + 1)..n {
let mut sum = 0.0;
for p in j..i {
sum += l[[i, p]] * inv[[p, j]];
}
inv[[i, j]] = if l[[i, i]].abs() > 1e-15 {
-sum / l[[i, i]]
} else {
0.0
};
}
}
Ok(inv)
}
#[cfg(test)]
mod tests {
use super::*;
use std::f64::consts::PI;
fn make_fpca_data(n_obs: usize) -> (Vec<Array1<f64>>, Vec<Array1<f64>>) {
let n_t = 50;
let t: Array1<f64> = Array1::from_vec(
(0..n_t).map(|i| i as f64 / (n_t - 1) as f64 * 2.0 * PI).collect(),
);
let mut t_list = Vec::new();
let mut y_list = Vec::new();
for i in 0..n_obs {
let phase = i as f64 * 0.3;
let amplitude = 1.0 + (i as f64 / n_obs as f64) * 0.5;
let y: Array1<f64> = t.mapv(|ti| amplitude * (ti + phase).sin());
t_list.push(t.clone());
y_list.push(y);
}
(t_list, y_list)
}
#[test]
fn test_fpca_basic() {
let (t_list, y_list) = make_fpca_data(15);
let fpca = FPCA::with_config(FPCAConfig {
n_components: Some(3),
smooth_lambda: Some(1e-4),
n_interior_knots: 8,
..Default::default()
});
let result = fpca.fit(&t_list, &y_list).expect("FPCA fit failed");
assert_eq!(result.n_components, 3);
assert_eq!(result.scores.nrows(), 15);
assert_eq!(result.scores.ncols(), 3);
}
#[test]
fn test_fpca_variance_explained() {
let (t_list, y_list) = make_fpca_data(20);
let fpca = FPCA::with_config(FPCAConfig {
n_components: Some(5),
smooth_lambda: Some(1e-4),
n_interior_knots: 8,
..Default::default()
});
let result = fpca.fit(&t_list, &y_list).expect("FPCA fit");
let last = result.variance_explained.cumulative[result.n_components - 1];
assert!(last <= 1.0 + 1e-10);
}
#[test]
fn test_fpca_reconstruct() {
let (t_list, y_list) = make_fpca_data(12);
let fpca = FPCA::with_config(FPCAConfig {
n_components: Some(3),
smooth_lambda: Some(1e-4),
n_interior_knots: 6,
..Default::default()
});
let result = fpca.fit(&t_list, &y_list).expect("FPCA fit");
let basis = BSplineBasis::uniform(6, 4).expect("basis");
let recon = result.reconstruct(0, 3, &basis).expect("reconstruct");
let val = recon.eval(1.0).expect("eval");
assert!(val.is_finite());
}
#[test]
fn test_variance_explained_threshold() {
let ev = Array1::from_vec(vec![4.0, 2.0, 1.0, 0.5]);
let ve = VarianceExplained::from_eigenvalues(&ev);
let n = ve.n_components_for_threshold(0.8);
assert!(n >= 1 && n <= 4);
}
}