use crate::decomposition::svd;
use crate::error::{LinalgError, LinalgResult};
use crate::tensor::core::{Tensor, TensorScalar};
use scirs2_core::ndarray::Array2;
#[derive(Debug, Clone)]
pub struct HOSVDResult<F> {
pub core: Tensor<F>,
pub factors: Vec<Array2<F>>,
}
impl<F: TensorScalar> HOSVDResult<F> {
pub fn reconstruct(&self) -> LinalgResult<Tensor<F>> {
let mut result = self.core.clone();
for (n, factor) in self.factors.iter().enumerate() {
result = result.mode_product(factor, n)?;
}
Ok(result)
}
pub fn relative_error(&self, original: &Tensor<F>) -> LinalgResult<F> {
let reconstructed = self.reconstruct()?;
let orig_norm = original.frobenius_norm();
if orig_norm == F::zero() {
let diff_norm = {
let diff_sq: F = original
.data
.iter()
.zip(reconstructed.data.iter())
.map(|(&a, &b)| {
let d = a - b;
d * d
})
.fold(F::zero(), |acc, x| acc + x);
diff_sq.sqrt()
};
return Ok(if diff_norm == F::zero() { F::zero() } else { F::infinity() });
}
let diff_sq: F = original
.data
.iter()
.zip(reconstructed.data.iter())
.map(|(&a, &b)| {
let d = a - b;
d * d
})
.fold(F::zero(), |acc, x| acc + x);
Ok((diff_sq / (orig_norm * orig_norm)).sqrt())
}
pub fn compression_ratio(&self, original_shape: &[usize]) -> F {
let orig: usize = original_shape.iter().product();
let core_n: usize = self.core.shape.iter().product();
let factors_n: usize = self.factors.iter().map(|f| f.len()).sum();
let compressed = core_n + factors_n;
if compressed == 0 {
return F::infinity();
}
F::from(orig).unwrap_or(F::zero()) / F::from(compressed).unwrap_or(F::one())
}
}
pub fn hosvd<F: TensorScalar>(tensor: &Tensor<F>, ranks: &[usize]) -> LinalgResult<HOSVDResult<F>> {
validate_ranks(tensor, ranks)?;
let ndim = tensor.ndim();
let mut factors: Vec<Array2<F>> = Vec::with_capacity(ndim);
for n in 0..ndim {
let unfolded = tensor.unfold(n)?;
let u = truncated_left_singular_vectors(&unfolded, ranks[n])?;
factors.push(u);
}
let core = compute_core(tensor, &factors)?;
Ok(HOSVDResult { core, factors })
}
pub fn hooi<F: TensorScalar>(
tensor: &Tensor<F>,
ranks: &[usize],
max_iter: usize,
) -> LinalgResult<HOSVDResult<F>> {
validate_ranks(tensor, ranks)?;
let ndim = tensor.ndim();
let init = hosvd(tensor, ranks)?;
let mut factors = init.factors;
let tol = F::from(1e-10_f64).unwrap_or(F::zero());
for _iter in 0..max_iter {
let mut max_change = F::zero();
for n in 0..ndim {
let y = project_all_but_mode(tensor, &factors, n)?;
let y_unfold = y.unfold(n)?;
let u_new = truncated_left_singular_vectors(&y_unfold, ranks[n])?;
let u_old = &factors[n];
let change = subspace_change(u_old, &u_new);
if change > max_change {
max_change = change;
}
factors[n] = u_new;
}
if max_change < tol {
break;
}
}
let core = compute_core(tensor, &factors)?;
Ok(HOSVDResult { core, factors })
}
pub fn truncated_hosvd<F: TensorScalar>(tensor: &Tensor<F>, eps: F) -> LinalgResult<HOSVDResult<F>> {
let ndim = tensor.ndim();
let mut ranks = Vec::with_capacity(ndim);
for n in 0..ndim {
let unfolded = tensor.unfold(n)?;
let rank = auto_rank_from_energy(&unfolded, eps)?;
ranks.push(rank.max(1));
}
hosvd(tensor, &ranks)
}
fn validate_ranks<F: TensorScalar>(tensor: &Tensor<F>, ranks: &[usize]) -> LinalgResult<()> {
if ranks.len() != tensor.ndim() {
return Err(LinalgError::DimensionError(format!(
"ranks length {} != tensor ndim {}",
ranks.len(),
tensor.ndim()
)));
}
for (n, (&r, &s)) in ranks.iter().zip(tensor.shape.iter()).enumerate() {
if r == 0 {
return Err(LinalgError::ValueError(format!("ranks[{n}] must be > 0")));
}
if r > s {
return Err(LinalgError::ValueError(format!(
"ranks[{n}]={r} exceeds tensor shape[{n}]={s}"
)));
}
}
Ok(())
}
fn truncated_left_singular_vectors<F: TensorScalar>(
matrix: &Array2<F>,
k: usize,
) -> LinalgResult<Array2<F>> {
let (u_full, _s, _vt) = svd(&matrix.view(), false, None)?;
let m = u_full.nrows();
let avail = u_full.ncols().min(k);
let mut u = Array2::<F>::zeros((m, k));
for i in 0..m {
for j in 0..avail {
u[[i, j]] = u_full[[i, j]];
}
}
Ok(u)
}
fn compute_core<F: TensorScalar>(
tensor: &Tensor<F>,
factors: &[Array2<F>],
) -> LinalgResult<Tensor<F>> {
let mut g = tensor.clone();
for (n, u) in factors.iter().enumerate() {
let ut = u.t().to_owned();
g = g.mode_product(&ut, n)?;
}
Ok(g)
}
fn project_all_but_mode<F: TensorScalar>(
tensor: &Tensor<F>,
factors: &[Array2<F>],
skip_mode: usize,
) -> LinalgResult<Tensor<F>> {
let mut y = tensor.clone();
for (m, u) in factors.iter().enumerate() {
if m == skip_mode {
continue;
}
let ut = u.t().to_owned();
y = y.mode_product(&ut, m)?;
}
Ok(y)
}
fn subspace_change<F: TensorScalar>(u_old: &Array2<F>, u_new: &Array2<F>) -> F {
let sq: F = u_old
.iter()
.zip(u_new.iter())
.map(|(&a, &b)| {
let d = a - b;
d * d
})
.fold(F::zero(), |acc, x| acc + x);
sq.sqrt()
}
fn auto_rank_from_energy<F: TensorScalar>(matrix: &Array2<F>, eps: F) -> LinalgResult<usize> {
let (_u, s, _vt) = svd(&matrix.view(), false, None)?;
let total_energy: F = s.iter().map(|&sv| sv * sv).fold(F::zero(), |a, b| a + b);
if total_energy == F::zero() {
return Ok(1);
}
let threshold = (F::one() - eps * eps) * total_energy;
let mut cumulative = F::zero();
for (k, &sv) in s.iter().enumerate() {
cumulative = cumulative + sv * sv;
if cumulative >= threshold {
return Ok(k + 1);
}
}
Ok(s.len().max(1))
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
fn make_tensor_234() -> Tensor<f64> {
let data: Vec<f64> = (0..24).map(|x| x as f64 + 1.0).collect();
Tensor::new(data, vec![2, 3, 4]).expect("valid")
}
#[test]
fn test_hosvd_shape() {
let t = make_tensor_234();
let r = hosvd(&t, &[2, 2, 3]).expect("ok");
assert_eq!(r.core.shape, vec![2, 2, 3]);
assert_eq!(r.factors[0].shape(), &[2, 2]);
assert_eq!(r.factors[1].shape(), &[3, 2]);
assert_eq!(r.factors[2].shape(), &[4, 3]);
}
#[test]
fn test_hosvd_full_rank_lossless() {
let t = make_tensor_234();
let r = hosvd(&t, &[2, 3, 4]).expect("full rank");
let err = r.relative_error(&t).expect("err ok");
assert!(err < 1e-8, "full rank HOSVD error={err}");
}
#[test]
fn test_hosvd_factor_orthogonality() {
let t = make_tensor_234();
let r = hosvd(&t, &[2, 2, 3]).expect("ok");
for (n, factor) in r.factors.iter().enumerate() {
let utu = factor.t().dot(factor);
let rank = factor.ncols();
for i in 0..rank {
for j in 0..rank {
let expected = if i == j { 1.0 } else { 0.0 };
let diff = (utu[[i, j]] - expected).abs();
assert!(
diff < 1e-8,
"factor[{n}] not orthonormal at ({i},{j}): diff={diff}");
}
}
}
}
#[test]
fn test_hooi_shape() {
let t = make_tensor_234();
let r = hooi(&t, &[2, 2, 3], 50).expect("ok");
assert_eq!(r.core.shape, vec![2, 2, 3]);
}
#[test]
fn test_hooi_better_than_hosvd() {
let t = make_tensor_234();
let r_hosvd = hosvd(&t, &[1, 2, 3]).expect("hosvd");
let r_hooi = hooi(&t, &[1, 2, 3], 100).expect("hooi");
let err_hosvd = r_hosvd.relative_error(&t).expect("e1");
let err_hooi = r_hooi.relative_error(&t).expect("e2");
assert!(
err_hooi <= err_hosvd + 1e-6,
"HOOI err {err_hooi} > HOSVD err {err_hosvd}"
);
}
#[test]
fn test_truncated_hosvd_auto_rank() {
let t = make_tensor_234();
let r = truncated_hosvd(&t, 0.0_f64).expect("ok"); let err = r.relative_error(&t).expect("err");
assert!(err < 1e-6, "truncated (eps=0) err={err}");
}
#[test]
fn test_compression_ratio() {
let data: Vec<f64> = (0..120).map(|x| x as f64 + 1.0).collect();
let t = Tensor::new(data, vec![4, 5, 6]).expect("valid");
let r = hosvd(&t, &[2, 2, 2]).expect("ok");
let ratio = r.compression_ratio(&[4, 5, 6]);
assert!(ratio > 1.0, "should compress: ratio={ratio}");
}
#[test]
fn test_invalid_ranks_length() {
let t = make_tensor_234();
assert!(hosvd(&t, &[2, 2]).is_err());
}
#[test]
fn test_invalid_rank_too_large() {
let t = make_tensor_234();
assert!(hosvd(&t, &[3, 3, 4]).is_err()); }
}