use scirs2_core::ndarray::{Array1, Array2, ArrayD, IxDyn};
use std::fmt;
#[derive(Debug, Clone)]
pub enum DecompositionError {
ShapeError(String),
ConvergenceFailure { iterations: usize, residual: f64 },
SingularMatrix,
InvalidRank { rank: usize, max_rank: usize },
EmptyTensor,
NonMatrixInput { ndim: usize },
}
impl fmt::Display for DecompositionError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::ShapeError(msg) => write!(f, "Shape error: {msg}"),
Self::ConvergenceFailure {
iterations,
residual,
} => write!(
f,
"Convergence failure after {iterations} iterations (residual={residual:.3e})"
),
Self::SingularMatrix => write!(f, "Matrix is numerically singular"),
Self::InvalidRank { rank, max_rank } => {
write!(f, "Invalid rank {rank}: must be in 1..={max_rank}")
}
Self::EmptyTensor => write!(f, "Tensor has at least one zero-length dimension"),
Self::NonMatrixInput { ndim } => {
write!(f, "Expected a 2-D matrix, got a {ndim}-D tensor")
}
}
}
}
impl std::error::Error for DecompositionError {}
#[derive(Debug, Clone)]
pub struct TruncatedSvd {
pub u: Array2<f64>,
pub s: Array1<f64>,
pub vt: Array2<f64>,
pub rank: usize,
pub explained_variance_ratio: f64,
}
impl TruncatedSvd {
pub fn reconstruct(&self) -> Array2<f64> {
let m = self.u.nrows();
let n = self.vt.ncols();
let mut result = Array2::<f64>::zeros((m, n));
for i in 0..self.rank {
let u_col = self.u.column(i);
let vt_row = self.vt.row(i);
let s_i = self.s[i];
for r in 0..m {
for c in 0..n {
result[[r, c]] += s_i * u_col[r] * vt_row[c];
}
}
}
result
}
pub fn reconstruction_error(&self, original: &Array2<f64>) -> f64 {
let approx = self.reconstruct();
let diff = original - ≈
frobenius_norm_2d(&diff)
}
}
pub fn truncated_svd(
matrix: &Array2<f64>,
k: usize,
n_iter: usize,
tol: f64,
) -> Result<TruncatedSvd, DecompositionError> {
let (m, n) = (matrix.nrows(), matrix.ncols());
if m == 0 || n == 0 {
return Err(DecompositionError::EmptyTensor);
}
let max_rank = m.min(n);
if k == 0 || k > max_rank {
return Err(DecompositionError::InvalidRank { rank: k, max_rank });
}
let total_sq: f64 = matrix.iter().map(|x| x * x).sum();
let mut u_vecs: Vec<Vec<f64>> = Vec::with_capacity(k);
let mut s_vals: Vec<f64> = Vec::with_capacity(k);
let mut v_vecs: Vec<Vec<f64>> = Vec::with_capacity(k);
let mut work = matrix.to_owned();
for _comp in 0..k {
let mut v = init_vector(n, _comp);
normalize_vec(&mut v);
let mut prev_sigma = f64::INFINITY;
for _iter in 0..n_iter {
let u = mat_vec_mul(&work, &v);
let mut v_new = mat_t_vec_mul(&work, &u);
normalize_vec(&mut v_new);
let diff: f64 = v_new
.iter()
.zip(v.iter())
.map(|(a, b)| (a - b).abs())
.sum::<f64>();
v = v_new;
let sigma_est = {
let u_tmp = mat_vec_mul(&work, &v);
dot_product(&u_tmp, &u_tmp).sqrt()
};
if (sigma_est - prev_sigma).abs() < tol && diff < tol {
break;
}
prev_sigma = sigma_est;
}
let mut u = mat_vec_mul(&work, &v);
let sigma = vec_norm(&u);
if sigma < 1e-14 {
u = vec![0.0; m];
} else {
u.iter_mut().for_each(|x| *x /= sigma);
}
for r in 0..m {
for c in 0..n {
work[[r, c]] -= sigma * u[r] * v[c];
}
}
u_vecs.push(u);
s_vals.push(sigma);
v_vecs.push(v);
}
let u_arr = Array2::from_shape_fn((m, k), |(r, c)| u_vecs[c][r]);
let s_arr = Array1::from_vec(s_vals.clone());
let vt_arr = Array2::from_shape_fn((k, n), |(r, c)| v_vecs[r][c]);
let captured_sq: f64 = s_vals.iter().map(|s| s * s).sum();
let explained_variance_ratio = if total_sq < 1e-30 {
1.0
} else {
(captured_sq / total_sq).min(1.0)
};
Ok(TruncatedSvd {
u: u_arr,
s: s_arr,
vt: vt_arr,
rank: k,
explained_variance_ratio,
})
}
pub fn unfold(tensor: &ArrayD<f64>, mode: usize) -> Result<Array2<f64>, DecompositionError> {
let ndim = tensor.ndim();
if ndim == 0 {
return Err(DecompositionError::ShapeError(
"Cannot unfold a 0-D scalar tensor".into(),
));
}
if mode >= ndim {
return Err(DecompositionError::ShapeError(format!(
"mode {mode} out of range for {ndim}-D tensor"
)));
}
if tensor.shape().contains(&0) {
return Err(DecompositionError::EmptyTensor);
}
let shape = tensor.shape();
let n_rows = shape[mode];
let n_cols: usize = shape
.iter()
.enumerate()
.filter(|&(i, _)| i != mode)
.map(|(_i, &d)| d)
.product();
let mut perm: Vec<usize> = Vec::with_capacity(ndim);
perm.push(mode);
for i in 0..ndim {
if i != mode {
perm.push(i);
}
}
let permuted = tensor.view().permuted_axes(perm);
let data: Vec<f64> = permuted.iter().copied().collect();
Array2::from_shape_vec((n_rows, n_cols), data)
.map_err(|e| DecompositionError::ShapeError(e.to_string()))
}
pub fn fold(
matrix: &Array2<f64>,
mode: usize,
shape: &[usize],
) -> Result<ArrayD<f64>, DecompositionError> {
let ndim = shape.len();
if ndim == 0 {
return Err(DecompositionError::ShapeError(
"Cannot fold to a 0-D tensor".into(),
));
}
if mode >= ndim {
return Err(DecompositionError::ShapeError(format!(
"mode {mode} out of range for {ndim}-D shape"
)));
}
let n_rows = shape[mode];
let n_cols: usize = shape
.iter()
.enumerate()
.filter(|&(i, _)| i != mode)
.map(|(_i, &d)| d)
.product();
if matrix.nrows() != n_rows || matrix.ncols() != n_cols {
return Err(DecompositionError::ShapeError(format!(
"matrix shape {}×{} does not match expected {}×{} for mode={mode}, shape={shape:?}",
matrix.nrows(),
matrix.ncols(),
n_rows,
n_cols
)));
}
let mut perm_shape: Vec<usize> = Vec::with_capacity(ndim);
perm_shape.push(shape[mode]);
for (i, &d) in shape.iter().enumerate() {
if i != mode {
perm_shape.push(d);
}
}
let data: Vec<f64> = matrix.iter().copied().collect();
let permuted = ArrayD::from_shape_vec(IxDyn(&perm_shape), data)
.map_err(|e| DecompositionError::ShapeError(e.to_string()))?;
let mut inv_perm = vec![0usize; ndim];
inv_perm[mode] = 0;
let mut pos = 1usize;
for (i, slot) in inv_perm.iter_mut().enumerate().take(ndim) {
if i != mode {
*slot = pos;
pos += 1;
}
}
let result = permuted.view().permuted_axes(inv_perm).to_owned();
Ok(result)
}
#[derive(Debug, Clone)]
pub struct Tucker1Result {
pub core: ArrayD<f64>,
pub factor: Array2<f64>,
pub mode: usize,
pub rank: usize,
pub compression_ratio: f64,
}
impl Tucker1Result {
pub fn reconstruct(&self) -> ArrayD<f64> {
let core_unfolded = match unfold(&self.core, self.mode) {
Ok(m) => m,
Err(_) => return self.core.clone(),
};
let reconstructed_mat = self.factor.dot(&core_unfolded);
let mut orig_shape: Vec<usize> = self.core.shape().to_vec();
orig_shape[self.mode] = self.factor.nrows();
match fold(&reconstructed_mat, self.mode, &orig_shape) {
Ok(t) => t,
Err(_) => ArrayD::zeros(IxDyn(&orig_shape)),
}
}
pub fn reconstruction_error(&self, original: &ArrayD<f64>) -> f64 {
let approx = self.reconstruct();
let diff = original - ≈
frobenius_norm_nd(&diff)
}
}
pub fn tucker1(
tensor: &ArrayD<f64>,
mode: usize,
rank: usize,
) -> Result<Tucker1Result, DecompositionError> {
let ndim = tensor.ndim();
if ndim == 0 {
return Err(DecompositionError::ShapeError("0-D tensor".into()));
}
if mode >= ndim {
return Err(DecompositionError::ShapeError(format!(
"mode {mode} out of range for {ndim}-D tensor"
)));
}
let orig_dim = tensor.shape()[mode];
if orig_dim == 0 {
return Err(DecompositionError::EmptyTensor);
}
if rank == 0 || rank > orig_dim {
return Err(DecompositionError::InvalidRank {
rank,
max_rank: orig_dim,
});
}
let unfolded = unfold(tensor, mode)?; let svd = truncated_svd(&unfolded, rank, 30, 1e-10)?;
let factor = svd.u.clone();
let u_t = svd.u.t().to_owned(); let core_unfolded = u_t.dot(&unfolded);
let mut core_shape: Vec<usize> = tensor.shape().to_vec();
core_shape[mode] = rank;
let core = fold(&core_unfolded, mode, &core_shape)?;
let original_elements: usize = tensor.shape().iter().product();
let core_elements: usize = core_shape.iter().product();
let compression_ratio = if core_elements == 0 {
1.0
} else {
original_elements as f64 / core_elements as f64
};
Ok(Tucker1Result {
core,
factor,
mode,
rank,
compression_ratio,
})
}
#[derive(Debug, Clone)]
pub struct CpDecomposition {
pub factors: Vec<Array2<f64>>,
pub weights: Array1<f64>,
pub rank: usize,
pub num_modes: usize,
pub iterations: usize,
pub final_residual: f64,
pub converged: bool,
}
impl CpDecomposition {
pub fn reconstruct(&self) -> ArrayD<f64> {
if self.factors.is_empty() {
return ArrayD::zeros(IxDyn(&[]));
}
let shape: Vec<usize> = self.factors.iter().map(|f| f.nrows()).collect();
let total: usize = shape.iter().product();
let ndim = self.num_modes;
let mut data = vec![0.0f64; total];
for r in 0..self.rank {
let w = self.weights[r];
for (flat, slot) in data.iter_mut().enumerate().take(total) {
let mut strides = vec![0usize; ndim];
let mut remaining = flat;
for d in (0..ndim).rev() {
strides[d] = remaining % shape[d];
remaining /= shape[d];
}
let contrib: f64 = self
.factors
.iter()
.zip(strides.iter())
.map(|(f, &i)| f[[i, r]])
.product();
*slot += w * contrib;
}
}
ArrayD::from_shape_vec(IxDyn(&shape), data).unwrap_or_else(|_| ArrayD::zeros(IxDyn(&shape)))
}
pub fn reconstruction_error(&self, original: &ArrayD<f64>) -> f64 {
let approx = self.reconstruct();
let diff = original - ≈
frobenius_norm_nd(&diff)
}
pub fn explained_variance(&self, original: &ArrayD<f64>) -> f64 {
let original_sq: f64 = original.iter().map(|x| x * x).sum();
if original_sq < 1e-30 {
return 1.0;
}
let residual = self.reconstruction_error(original);
let explained = 1.0 - (residual * residual) / original_sq;
explained.clamp(0.0, 1.0)
}
}
fn khatri_rao(a: &Array2<f64>, b: &Array2<f64>) -> Array2<f64> {
let (m, r_a) = (a.nrows(), a.ncols());
let (n, r_b) = (b.nrows(), b.ncols());
debug_assert_eq!(
r_a, r_b,
"Khatri-Rao: both matrices must have same number of columns"
);
let r = r_a.min(r_b);
let mut result = Array2::<f64>::zeros((m * n, r));
for col in 0..r {
for i in 0..m {
for j in 0..n {
result[[i * n + j, col]] = a[[i, col]] * b[[j, col]];
}
}
}
result
}
fn hadamard(a: &Array2<f64>, b: &Array2<f64>) -> Array2<f64> {
a * b
}
fn gram(a: &Array2<f64>) -> Array2<f64> {
a.t().dot(a)
}
fn pinv_small(m: &Array2<f64>, lambda: f64) -> Result<Array2<f64>, DecompositionError> {
let n = m.nrows();
debug_assert_eq!(n, m.ncols());
let mut reg = m.to_owned();
for i in 0..n {
reg[[i, i]] += lambda;
}
let mut aug: Vec<Vec<f64>> = (0..n)
.map(|i| {
let mut row: Vec<f64> = reg.row(i).to_vec();
for j in 0..n {
row.push(if i == j { 1.0 } else { 0.0 });
}
row
})
.collect();
for col in 0..n {
let pivot = (col..n).max_by(|&a, &b| {
aug[a][col]
.abs()
.partial_cmp(&aug[b][col].abs())
.unwrap_or(std::cmp::Ordering::Equal)
});
let pivot = pivot.ok_or(DecompositionError::SingularMatrix)?;
aug.swap(col, pivot);
let diag = aug[col][col];
if diag.abs() < 1e-15 {
return Err(DecompositionError::SingularMatrix);
}
let inv_diag = 1.0 / diag;
for elem in aug[col].iter_mut().take(2 * n) {
*elem *= inv_diag;
}
for row in 0..n {
if row != col {
let factor = aug[row][col];
let col_row: Vec<f64> = aug[col][..2 * n].to_vec();
for (j, &cv) in col_row.iter().enumerate().take(2 * n) {
aug[row][j] -= factor * cv;
}
}
}
}
let inv = Array2::from_shape_fn((n, n), |(i, j)| aug[i][n + j]);
Ok(inv)
}
pub fn cp_als(
tensor: &ArrayD<f64>,
rank: usize,
max_iter: usize,
tol: f64,
) -> Result<CpDecomposition, DecompositionError> {
if tensor.ndim() != 3 {
return Err(DecompositionError::ShapeError(format!(
"cp_als currently supports only 3-mode tensors, got {}-mode",
tensor.ndim()
)));
}
let shape = tensor.shape();
let (i_dim, j_dim, k_dim) = (shape[0], shape[1], shape[2]);
if i_dim == 0 || j_dim == 0 || k_dim == 0 {
return Err(DecompositionError::EmptyTensor);
}
let max_rank = i_dim.min(j_dim).min(k_dim);
if rank == 0 || rank > max_rank {
return Err(DecompositionError::InvalidRank { rank, max_rank });
}
let x0 = unfold(tensor, 0)?; let x1 = unfold(tensor, 1)?; let x2 = unfold(tensor, 2)?;
let tensor_norm_sq: f64 = tensor.iter().map(|x| x * x).sum();
let mut a = init_factor_svd(&x0, rank)?; let mut b = init_factor_svd(&x1, rank)?; let mut c = init_factor_svd(&x2, rank)?;
let mut weights = Array1::<f64>::ones(rank);
let mut prev_residual = f64::INFINITY;
let mut converged = false;
let mut iter = 0usize;
let regularization = 1e-10;
for _it in 0..max_iter {
iter = _it + 1;
{
let kr_bc = khatri_rao(&b, &c); let gram_prod = hadamard(&gram(&b), &gram(&c)); let gram_inv = pinv_small(&gram_prod, regularization)?;
let rhs = kr_bc.dot(&gram_inv); a = x0.dot(&rhs); }
{
let kr_ac = khatri_rao(&a, &c); let gram_prod = hadamard(&gram(&a), &gram(&c));
let gram_inv = pinv_small(&gram_prod, regularization)?;
let rhs = kr_ac.dot(&gram_inv);
b = x1.dot(&rhs); }
{
let kr_ab = khatri_rao(&a, &b); let gram_prod = hadamard(&gram(&a), &gram(&b));
let gram_inv = pinv_small(&gram_prod, regularization)?;
let rhs = kr_ab.dot(&gram_inv);
c = x2.dot(&rhs); }
for r in 0..rank {
let norm_a = col_norm(&a, r);
let norm_b = col_norm(&b, r);
let norm_c = col_norm(&c, r);
let w = norm_a * norm_b * norm_c;
weights[r] = w;
if norm_a > 1e-14 {
a.column_mut(r).mapv_inplace(|x| x / norm_a);
}
if norm_b > 1e-14 {
b.column_mut(r).mapv_inplace(|x| x / norm_b);
}
if norm_c > 1e-14 {
c.column_mut(r).mapv_inplace(|x| x / norm_c);
}
}
let approx_norm_sq = compute_cp_norm_sq(&a, &b, &c, &weights, rank);
let inner_xhat = compute_inner_x_xhat(&x0, &a, &b, &c, &weights, rank);
let residual_sq = (tensor_norm_sq - 2.0 * inner_xhat + approx_norm_sq).max(0.0);
let residual = residual_sq.sqrt();
let rel_change = if prev_residual.is_finite() && prev_residual > 1e-30 {
(prev_residual - residual).abs() / prev_residual
} else {
f64::INFINITY
};
if rel_change < tol && _it > 0 {
converged = true;
prev_residual = residual;
break;
}
prev_residual = residual;
}
Ok(CpDecomposition {
factors: vec![a, b, c],
weights,
rank,
num_modes: 3,
iterations: iter,
final_residual: prev_residual,
converged,
})
}
#[derive(Debug, Clone)]
pub struct HosvdResult {
pub core: ArrayD<f64>,
pub factors: Vec<Array2<f64>>,
pub ranks: Vec<usize>,
pub compression_ratio: f64,
}
impl HosvdResult {
pub fn reconstruct(&self) -> ArrayD<f64> {
let mut current = self.core.clone();
for (mode, factor) in self.factors.iter().enumerate() {
current = match tucker_product(¤t, factor, mode) {
Ok(t) => t,
Err(_) => return self.core.clone(),
};
}
current
}
pub fn reconstruction_error(&self, original: &ArrayD<f64>) -> f64 {
let approx = self.reconstruct();
let diff = original - ≈
frobenius_norm_nd(&diff)
}
}
pub fn hosvd(tensor: &ArrayD<f64>, ranks: &[usize]) -> Result<HosvdResult, DecompositionError> {
let ndim = tensor.ndim();
if ndim == 0 {
return Err(DecompositionError::ShapeError("0-D tensor".into()));
}
if ranks.len() != ndim {
return Err(DecompositionError::ShapeError(format!(
"ranks length {} must match tensor ndim {}",
ranks.len(),
ndim
)));
}
if tensor.shape().contains(&0) {
return Err(DecompositionError::EmptyTensor);
}
for (mode, &r) in ranks.iter().enumerate() {
let dim = tensor.shape()[mode];
if r == 0 || r > dim {
return Err(DecompositionError::InvalidRank {
rank: r,
max_rank: dim,
});
}
}
let mut factors: Vec<Array2<f64>> = Vec::with_capacity(ndim);
for (mode, &rank_m) in ranks.iter().enumerate().take(ndim) {
let unfolded = unfold(tensor, mode)?;
let svd = truncated_svd(&unfolded, rank_m, 30, 1e-10)?;
factors.push(svd.u); }
let mut core = tensor.to_owned();
for (mode, factor) in factors.iter().enumerate() {
let unfolded_core = unfold(&core, mode)?; let u_t = factor.t().to_owned(); let compressed = u_t.dot(&unfolded_core);
let mut new_shape: Vec<usize> = core.shape().to_vec();
new_shape[mode] = ranks[mode];
core = fold(&compressed, mode, &new_shape)?;
}
let original_elements: usize = tensor.shape().iter().product();
let core_elements: usize = ranks.iter().product();
let compression_ratio = if core_elements == 0 {
1.0
} else {
original_elements as f64 / core_elements as f64
};
Ok(HosvdResult {
core,
factors,
ranks: ranks.to_vec(),
compression_ratio,
})
}
fn tucker_product(
tensor: &ArrayD<f64>,
factor: &Array2<f64>,
mode: usize,
) -> Result<ArrayD<f64>, DecompositionError> {
let unfolded = unfold(tensor, mode)?; let result_mat = factor.dot(&unfolded);
let mut new_shape: Vec<usize> = tensor.shape().to_vec();
new_shape[mode] = factor.nrows();
fold(&result_mat, mode, &new_shape)
}
fn init_factor_svd(matrix: &Array2<f64>, rank: usize) -> Result<Array2<f64>, DecompositionError> {
let effective_rank = rank.min(matrix.nrows().min(matrix.ncols()));
if effective_rank == 0 {
return Err(DecompositionError::EmptyTensor);
}
let svd = truncated_svd(matrix, effective_rank, 20, 1e-10)?;
if effective_rank == rank {
return Ok(svd.u);
}
let m = matrix.nrows();
let mut factor = Array2::<f64>::zeros((m, rank));
for c in 0..effective_rank {
for r in 0..m {
factor[[r, c]] = svd.u[[r, c]];
}
}
for c in effective_rank..rank {
let mut v = init_vector(m, c);
normalize_vec(&mut v);
for r in 0..m {
factor[[r, c]] = v[r];
}
}
Ok(factor)
}
fn init_vector(n: usize, seed: usize) -> Vec<f64> {
(0..n)
.map(|i| {
let x = i
.wrapping_mul(6364136223846793005usize)
.wrapping_add(seed.wrapping_mul(1442695040888963407usize))
as u64;
(x as f64 / u64::MAX as f64) * 2.0 - 1.0
})
.collect()
}
fn normalize_vec(v: &mut [f64]) {
let norm = vec_norm(v);
if norm > 1e-14 {
v.iter_mut().for_each(|x| *x /= norm);
}
}
fn vec_norm(v: &[f64]) -> f64 {
dot_product(v, v).sqrt()
}
fn dot_product(a: &[f64], b: &[f64]) -> f64 {
a.iter().zip(b.iter()).map(|(x, y)| x * y).sum()
}
fn mat_vec_mul(a: &Array2<f64>, v: &[f64]) -> Vec<f64> {
let m = a.nrows();
let n = a.ncols();
let mut result = vec![0.0f64; m];
for i in 0..m {
let mut s = 0.0;
for j in 0..n {
s += a[[i, j]] * v[j];
}
result[i] = s;
}
result
}
fn mat_t_vec_mul(a: &Array2<f64>, v: &[f64]) -> Vec<f64> {
let m = a.nrows();
let n = a.ncols();
let mut result = vec![0.0f64; n];
for j in 0..n {
let mut s = 0.0;
for i in 0..m {
s += a[[i, j]] * v[i];
}
result[j] = s;
}
result
}
fn frobenius_norm_2d(m: &Array2<f64>) -> f64 {
m.iter().map(|x| x * x).sum::<f64>().sqrt()
}
fn frobenius_norm_nd(t: &ArrayD<f64>) -> f64 {
t.iter().map(|x| x * x).sum::<f64>().sqrt()
}
fn col_norm(a: &Array2<f64>, col: usize) -> f64 {
a.column(col).iter().map(|x| x * x).sum::<f64>().sqrt()
}
fn compute_cp_norm_sq(
a: &Array2<f64>,
b: &Array2<f64>,
c: &Array2<f64>,
weights: &Array1<f64>,
rank: usize,
) -> f64 {
let ga = gram(a); let gb = gram(b);
let gc = gram(c);
let mut norm_sq = 0.0f64;
for r1 in 0..rank {
for r2 in 0..rank {
let val = ga[[r1, r2]] * gb[[r1, r2]] * gc[[r1, r2]] * weights[r1] * weights[r2];
norm_sq += val;
}
}
norm_sq
}
fn compute_inner_x_xhat(
x0: &Array2<f64>, a: &Array2<f64>, b: &Array2<f64>, c: &Array2<f64>, weights: &Array1<f64>,
rank: usize,
) -> f64 {
let kr = khatri_rao(b, c); let mttkrp = x0.dot(&kr); let mut inner = 0.0f64;
for r in 0..rank {
let dot: f64 = a
.column(r)
.iter()
.zip(mttkrp.column(r).iter())
.map(|(x, y)| x * y)
.sum();
inner += weights[r] * dot;
}
inner
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::IxDyn;
const TOL: f64 = 1e-6;
const ALS_TOL: f64 = 1e-5;
fn make_tensor(shape: &[usize], seed: usize) -> ArrayD<f64> {
let n: usize = shape.iter().product();
let data: Vec<f64> = (0..n)
.map(|i| {
let x = i
.wrapping_mul(6364136223846793005usize)
.wrapping_add(seed.wrapping_mul(1442695040888963407usize))
as u64;
(x as f64 / u64::MAX as f64) * 2.0 - 1.0
})
.collect();
ArrayD::from_shape_vec(IxDyn(shape), data).expect("shape ok")
}
fn make_matrix(rows: usize, cols: usize, seed: usize) -> Array2<f64> {
let n = rows * cols;
let data: Vec<f64> = (0..n)
.map(|i| {
let x = i
.wrapping_mul(6364136223846793005usize)
.wrapping_add(seed.wrapping_mul(1442695040888963407usize))
as u64;
(x as f64 / u64::MAX as f64) * 2.0 - 1.0
})
.collect();
Array2::from_shape_vec((rows, cols), data).expect("shape ok")
}
#[test]
fn test_truncated_svd_rank1() {
let u_true: Vec<f64> = (0..5).map(|i| (i + 1) as f64).collect();
let v_true: Vec<f64> = (0..4).map(|i| (i + 1) as f64).collect();
let mut mat = Array2::<f64>::zeros((5, 4));
for i in 0..5 {
for j in 0..4 {
mat[[i, j]] = u_true[i] * v_true[j];
}
}
let svd = truncated_svd(&mat, 1, 40, 1e-12).expect("svd ok");
let recon = svd.reconstruct();
let err = (mat - recon).iter().map(|x| x * x).sum::<f64>().sqrt();
assert!(err < 1e-6, "rank-1 reconstruction error too large: {err}");
}
#[test]
fn test_truncated_svd_reconstruction_error_decreases_with_rank() {
let mat = make_matrix(8, 6, 42);
let err1 = truncated_svd(&mat, 1, 30, 1e-12)
.expect("ok")
.reconstruction_error(&mat);
let err3 = truncated_svd(&mat, 3, 30, 1e-12)
.expect("ok")
.reconstruction_error(&mat);
let err6 = truncated_svd(&mat, 6, 30, 1e-12)
.expect("ok")
.reconstruction_error(&mat);
assert!(
err3 <= err1 + 1e-8,
"rank-3 err ({err3}) should be <= rank-1 err ({err1})"
);
assert!(
err6 <= err3 + 1e-8,
"rank-6 err ({err6}) should be <= rank-3 err ({err3})"
);
}
#[test]
fn test_truncated_svd_singular_values_descending() {
let mat = make_matrix(7, 5, 99);
let svd = truncated_svd(&mat, 4, 30, 1e-12).expect("ok");
for i in 0..svd.rank - 1 {
assert!(
svd.s[i] >= svd.s[i + 1] - 1e-8,
"singular values not descending: s[{i}]={} < s[{}]={}",
svd.s[i],
i + 1,
svd.s[i + 1]
);
}
}
#[test]
fn test_truncated_svd_explained_variance_full_rank() {
let mat = make_matrix(4, 4, 7);
let max_rank = 4;
let svd = truncated_svd(&mat, max_rank, 50, 1e-14).expect("ok");
assert!(
svd.explained_variance_ratio > 0.99,
"full-rank EVR should be ≈1, got {}",
svd.explained_variance_ratio
);
}
#[test]
fn test_truncated_svd_invalid_rank() {
let mat = make_matrix(3, 4, 1);
let result = truncated_svd(&mat, 0, 10, 1e-10);
assert!(result.is_err(), "rank=0 should fail");
let result2 = truncated_svd(&mat, 5, 10, 1e-10);
assert!(result2.is_err(), "rank > min(m,n) should fail");
}
#[test]
fn test_unfold_mode0_shape() {
let tensor = make_tensor(&[3, 4, 5], 1);
let mat = unfold(&tensor, 0).expect("ok");
assert_eq!(mat.nrows(), 3, "mode-0 rows should be dim-0");
assert_eq!(mat.ncols(), 4 * 5, "mode-0 cols should be dim-1 * dim-2");
}
#[test]
fn test_unfold_mode1_shape() {
let tensor = make_tensor(&[3, 4, 5], 2);
let mat = unfold(&tensor, 1).expect("ok");
assert_eq!(mat.nrows(), 4, "mode-1 rows should be dim-1");
assert_eq!(mat.ncols(), 3 * 5, "mode-1 cols should be dim-0 * dim-2");
}
#[test]
fn test_fold_roundtrip() {
let original = make_tensor(&[3, 4, 5], 3);
for mode in 0..3usize {
let mat = unfold(&original, mode).expect("unfold ok");
let recovered = fold(&mat, mode, &[3, 4, 5]).expect("fold ok");
let err = (&original - &recovered)
.iter()
.map(|x| x * x)
.sum::<f64>()
.sqrt();
assert!(err < TOL, "fold(unfold(x, {mode})) != x, error={err}");
}
}
#[test]
fn test_tucker1_compression_ratio() {
let tensor = make_tensor(&[10, 8, 6], 5);
let result = tucker1(&tensor, 0, 3).expect("ok");
assert!(
result.compression_ratio > 1.0,
"compressed core should be smaller than original, ratio={}",
result.compression_ratio
);
assert_eq!(
result.core.shape()[0],
3,
"core mode-0 dim should equal rank"
);
}
#[test]
fn test_tucker1_reconstruction_error_small() {
let _basis: Vec<f64> = vec![1.0, 0.0, 0.0, 1.0]; let factor_true =
Array2::from_shape_vec((4, 2), vec![1.0, 0.0, 0.0, 1.0, 0.5, 0.5, 0.3, 0.7])
.expect("ok");
let core_true = make_tensor(&[2, 3, 3], 11);
let core_unfolded = unfold(&core_true, 0).expect("ok"); let t_unfolded = factor_true.dot(&core_unfolded); let tensor = fold(&t_unfolded, 0, &[4, 3, 3]).expect("ok");
let result = tucker1(&tensor, 0, 2).expect("ok");
let err = result.reconstruction_error(&tensor);
assert!(
err < 1e-5,
"Tucker-1 error too large for rank-2 tensor: {err}"
);
}
#[test]
fn test_tucker1_invalid_rank() {
let tensor = make_tensor(&[3, 4, 5], 6);
let result = tucker1(&tensor, 0, 10); assert!(result.is_err(), "rank > dim should return error");
}
#[test]
fn test_tucker1_reconstruct_shape() {
let tensor = make_tensor(&[5, 4, 3], 8);
let result = tucker1(&tensor, 1, 2).expect("ok");
let recon = result.reconstruct();
assert_eq!(
recon.shape(),
tensor.shape(),
"reconstructed shape must match original"
);
}
#[test]
fn test_cp_als_3mode() {
let tensor = make_tensor(&[4, 3, 3], 20);
let result = cp_als(&tensor, 2, 200, ALS_TOL).expect("ok");
assert_eq!(result.num_modes, 3);
assert_eq!(result.rank, 2);
assert!(result.iterations > 0);
}
#[test]
fn test_cp_als_reconstruction_error() {
let a = make_matrix(4, 2, 30);
let b = make_matrix(3, 2, 31);
let c = make_matrix(3, 2, 32);
let mut data = vec![0.0f64; 4 * 3 * 3];
for i in 0..4 {
for j in 0..3 {
for k in 0..3 {
let v: f64 = (0..2).map(|r| a[[i, r]] * b[[j, r]] * c[[k, r]]).sum();
data[i * 9 + j * 3 + k] = v;
}
}
}
let tensor = ArrayD::from_shape_vec(IxDyn(&[4, 3, 3]), data).expect("ok");
let result = cp_als(&tensor, 2, 300, 1e-8).expect("ok");
let err = result.reconstruction_error(&tensor);
assert!(
err < 0.5,
"CP-ALS reconstruction error too large for rank-2 tensor: {err}"
);
}
#[test]
fn test_cp_als_factors_shape() {
let tensor = make_tensor(&[5, 4, 3], 40);
let result = cp_als(&tensor, 2, 100, ALS_TOL).expect("ok");
assert_eq!(result.factors.len(), 3);
assert_eq!(result.factors[0].shape(), &[5, 2]);
assert_eq!(result.factors[1].shape(), &[4, 2]);
assert_eq!(result.factors[2].shape(), &[3, 2]);
}
#[test]
fn test_cp_als_weights_positive() {
let tensor = make_tensor(&[4, 3, 3], 50);
let result = cp_als(&tensor, 2, 100, ALS_TOL).expect("ok");
for (i, &w) in result.weights.iter().enumerate() {
assert!(w >= 0.0, "weight[{i}] should be non-negative, got {w}");
}
}
#[test]
fn test_hosvd_shape() {
let tensor = make_tensor(&[6, 5, 4], 60);
let ranks = vec![3, 2, 2];
let result = hosvd(&tensor, &ranks).expect("ok");
assert_eq!(result.core.shape(), &[3usize, 2, 2][..]);
}
#[test]
fn test_hosvd_factors_shape() {
let tensor = make_tensor(&[6, 5, 4], 61);
let ranks = vec![3, 2, 2];
let result = hosvd(&tensor, &ranks).expect("ok");
assert_eq!(result.factors.len(), 3);
assert_eq!(result.factors[0].shape(), &[6, 3]);
assert_eq!(result.factors[1].shape(), &[5, 2]);
assert_eq!(result.factors[2].shape(), &[4, 2]);
}
#[test]
fn test_hosvd_reconstruction_error_small() {
let tensor = make_tensor(&[3, 3, 3], 62);
let ranks = vec![3, 3, 3];
let result = hosvd(&tensor, &ranks).expect("ok");
let err = result.reconstruction_error(&tensor);
assert!(err < 1e-5, "Full-rank HOSVD error should be near 0: {err}");
}
#[test]
fn test_decomposition_error_display() {
let errors = vec![
DecompositionError::ShapeError("bad shape".into()),
DecompositionError::ConvergenceFailure {
iterations: 100,
residual: 1e-3,
},
DecompositionError::SingularMatrix,
DecompositionError::InvalidRank {
rank: 10,
max_rank: 5,
},
DecompositionError::EmptyTensor,
DecompositionError::NonMatrixInput { ndim: 3 },
];
for e in &errors {
let s = format!("{e}");
assert!(!s.is_empty(), "Display for {e:?} should not be empty");
}
}
#[test]
fn test_cp_decomp_explained_variance() {
let tensor = make_tensor(&[4, 3, 3], 70);
let result = cp_als(&tensor, 2, 100, ALS_TOL).expect("ok");
let ev = result.explained_variance(&tensor);
assert!(
(0.0..=1.0).contains(&ev),
"explained variance must be in [0,1], got {ev}"
);
}
}