use crate::error::{LinalgError, LinalgResult};
use crate::tensor_contractions::{tensor_mode_product_view, unfold_tensor_view};
use scirs2_core::ndarray::{Array2, Array3, ArrayView2, ArrayView3};
#[derive(Debug, Clone)]
pub struct TuckerDecomp {
pub core: Array3<f64>,
pub factors: [Array2<f64>; 3],
}
impl TuckerDecomp {
pub fn reconstruct(&self) -> LinalgResult<Array3<f64>> {
tucker_reconstruct(self)
}
pub fn relative_error(&self, original: &Array3<f64>) -> f64 {
let reconstructed = match self.reconstruct() {
Ok(r) => r,
Err(_) => return f64::INFINITY,
};
let mut diff_sq = 0.0_f64;
let mut orig_sq = 0.0_f64;
let shape = original.shape();
for i in 0..shape[0] {
for j in 0..shape[1] {
for k in 0..shape[2] {
let diff = original[[i, j, k]] - reconstructed[[i, j, k]];
diff_sq += diff * diff;
orig_sq += original[[i, j, k]] * original[[i, j, k]];
}
}
}
if orig_sq == 0.0 {
if diff_sq == 0.0 { 0.0 } else { f64::INFINITY }
} else {
(diff_sq / orig_sq).sqrt()
}
}
pub fn compression_ratio(&self, original_shape: [usize; 3]) -> f64 {
let original_elements: usize = original_shape.iter().product();
let core_elements: usize = self.core.shape().iter().product();
let factor_elements: usize = self.factors.iter().map(|f| f.len()).sum();
let compressed_elements = core_elements + factor_elements;
if compressed_elements == 0 {
return f64::INFINITY;
}
original_elements as f64 / compressed_elements as f64
}
pub fn error_bound(&self, original: &Array3<f64>) -> f64 {
let reconstructed = match self.reconstruct() {
Ok(r) => r,
Err(_) => return f64::INFINITY,
};
let mut diff_sq = 0.0_f64;
let shape = original.shape();
for i in 0..shape[0] {
for j in 0..shape[1] {
for k in 0..shape[2] {
let diff = original[[i, j, k]] - reconstructed[[i, j, k]];
diff_sq += diff * diff;
}
}
}
diff_sq.sqrt()
}
}
pub fn tucker_decomp(tensor: &Array3<f64>, ranks: [usize; 3]) -> LinalgResult<TuckerDecomp> {
tucker_hosvd(&tensor.view(), ranks)
}
pub fn tucker_hosvd(tensor: &ArrayView3<f64>, ranks: [usize; 3]) -> LinalgResult<TuckerDecomp> {
let shape = tensor.shape();
for n in 0..3 {
if ranks[n] == 0 {
return Err(LinalgError::ValueError(format!(
"ranks[{n}] must be positive; got 0"
)));
}
if ranks[n] > shape[n] {
return Err(LinalgError::ValueError(format!(
"ranks[{n}]={} exceeds tensor dimension {}={}",
ranks[n], n, shape[n]
)));
}
}
let factors = compute_hosvd_factors(tensor, ranks)?;
let core = project_to_core(tensor, &factors)?;
Ok(TuckerDecomp { core, factors })
}
pub fn tucker_hooi(
tensor: &Array3<f64>,
ranks: [usize; 3],
max_iter: usize,
tol: f64,
) -> LinalgResult<TuckerDecomp> {
tucker_hooi_view(&tensor.view(), ranks, max_iter, tol)
}
pub fn tucker_hooi_view(
tensor: &ArrayView3<f64>,
ranks: [usize; 3],
max_iter: usize,
tol: f64,
) -> LinalgResult<TuckerDecomp> {
let shape = tensor.shape();
for n in 0..3 {
if ranks[n] == 0 {
return Err(LinalgError::ValueError(format!(
"ranks[{n}] must be positive; got 0"
)));
}
if ranks[n] > shape[n] {
return Err(LinalgError::ValueError(format!(
"ranks[{n}]={} exceeds tensor dimension {}={}",
ranks[n], n, shape[n]
)));
}
}
let mut factors = compute_hosvd_factors(tensor, ranks)?;
let tensor_norm = frobenius_norm_3d(tensor);
let mut prev_fit = 0.0_f64;
for _iter in 0..max_iter {
for mode in 0..3 {
let y = project_except_mode(tensor, &factors, mode)?;
let y_unf = unfold_tensor_view(&y.view(), mode)?;
let (u_new, _) = truncated_svd_left(&y_unf, ranks[mode])?;
factors[mode] = u_new;
}
let core = project_to_core(tensor, &factors)?;
let core_norm = frobenius_norm_3d(&core.view());
let fit = if tensor_norm > 0.0 {
core_norm / tensor_norm
} else {
1.0
};
if (fit - prev_fit).abs() < tol {
let decomp = TuckerDecomp { core, factors };
return Ok(decomp);
}
prev_fit = fit;
}
let core = project_to_core(tensor, &factors)?;
Ok(TuckerDecomp { core, factors })
}
pub fn tucker_reconstruct(decomp: &TuckerDecomp) -> LinalgResult<Array3<f64>> {
let core = &decomp.core;
let tmp0 = tensor_mode_product_view(&core.view(), &decomp.factors[0].view(), 0)?;
let tmp1 = tensor_mode_product_view(&tmp0.view(), &decomp.factors[1].view(), 1)?;
let result = tensor_mode_product_view(&tmp1.view(), &decomp.factors[2].view(), 2)?;
Ok(result)
}
fn compute_hosvd_factors(
tensor: &ArrayView3<f64>,
ranks: [usize; 3],
) -> LinalgResult<[Array2<f64>; 3]> {
let mut factors_vec: Vec<Array2<f64>> = Vec::with_capacity(3);
for mode in 0..3 {
let x_n = unfold_tensor_view(tensor, mode)?;
let (u, _) = truncated_svd_left(&x_n, ranks[mode])?;
factors_vec.push(u);
}
let f2 = factors_vec.pop().expect("factor 2 must exist");
let f1 = factors_vec.pop().expect("factor 1 must exist");
let f0 = factors_vec.pop().expect("factor 0 must exist");
Ok([f0, f1, f2])
}
fn project_to_core(
tensor: &ArrayView3<f64>,
factors: &[Array2<f64>; 3],
) -> LinalgResult<Array3<f64>> {
let ut0 = factors[0].t().to_owned();
let ut1 = factors[1].t().to_owned();
let ut2 = factors[2].t().to_owned();
let tmp0 = tensor_mode_product_view(tensor, &ut0.view(), 0)?;
let tmp1 = tensor_mode_product_view(&tmp0.view(), &ut1.view(), 1)?;
let core = tensor_mode_product_view(&tmp1.view(), &ut2.view(), 2)?;
Ok(core)
}
fn project_except_mode(
tensor: &ArrayView3<f64>,
factors: &[Array2<f64>; 3],
skip_mode: usize,
) -> LinalgResult<Array3<f64>> {
let modes: Vec<usize> = (0..3).filter(|&m| m != skip_mode).collect();
let ut = factors[modes[0]].t().to_owned();
let tmp = tensor_mode_product_view(tensor, &ut.view(), modes[0])?;
let ut2 = factors[modes[1]].t().to_owned();
let result = tensor_mode_product_view(&tmp.view(), &ut2.view(), modes[1])?;
Ok(result)
}
fn truncated_svd_left(matrix: &Array2<f64>, r: usize) -> LinalgResult<(Array2<f64>, Vec<f64>)> {
use crate::decomposition::svd;
let (u, s, _vt) = svd(&matrix.view(), true, None)?;
let actual_r = r.min(u.ncols());
let u_trunc = u
.slice(scirs2_core::ndarray::s![.., ..actual_r])
.to_owned();
let s_trunc: Vec<f64> = s.iter().take(actual_r).copied().collect();
Ok((u_trunc, s_trunc))
}
fn frobenius_norm_3d(tensor: &ArrayView3<f64>) -> f64 {
let shape = tensor.shape();
let mut sq = 0.0_f64;
for i in 0..shape[0] {
for j in 0..shape[1] {
for k in 0..shape[2] {
let v = tensor[[i, j, k]];
sq += v * v;
}
}
}
sq.sqrt()
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
use scirs2_core::ndarray::Array3;
fn make_test_tensor() -> Array3<f64> {
Array3::from_shape_fn((4, 5, 6), |(i, j, k)| (i * 30 + j * 6 + k) as f64)
}
#[test]
fn test_tucker_decomp_shapes() {
let t = make_test_tensor();
let d = tucker_decomp(&t, [2, 3, 4]).expect("tucker decomp ok");
assert_eq!(d.core.shape(), &[2, 3, 4]);
assert_eq!(d.factors[0].shape(), &[4, 2]);
assert_eq!(d.factors[1].shape(), &[5, 3]);
assert_eq!(d.factors[2].shape(), &[6, 4]);
}
#[test]
fn test_tucker_decomp_full_rank_lossless() {
let t = Array3::from_shape_fn((3, 3, 3), |(i, j, k)| (i + j + k) as f64 + 1.0);
let d = tucker_decomp(&t, [3, 3, 3]).expect("full rank ok");
let err = d.relative_error(&t);
assert!(err < 1e-8, "full rank error = {err}");
}
#[test]
fn test_tucker_decomp_low_rank() {
let t = make_test_tensor();
let d = tucker_decomp(&t, [2, 2, 2]).expect("low rank ok");
assert_eq!(d.core.shape(), &[2, 2, 2]);
let err = d.relative_error(&t);
assert!(err.is_finite(), "error must be finite");
}
#[test]
fn test_tucker_decomp_error_rank0() {
let t = make_test_tensor();
assert!(tucker_decomp(&t, [0, 2, 2]).is_err());
}
#[test]
fn test_tucker_decomp_error_rank_exceeds_dim() {
let t = make_test_tensor(); assert!(tucker_decomp(&t, [5, 3, 4]).is_err()); }
#[test]
fn test_compression_ratio() {
let t = make_test_tensor(); let d = tucker_decomp(&t, [2, 3, 4]).expect("ok");
let ratio = d.compression_ratio([4, 5, 6]);
assert!((ratio - 120.0 / 71.0).abs() < 1e-10, "ratio = {ratio}");
}
#[test]
fn test_tucker_reconstruct_roundtrip() {
let t = Array3::from_shape_fn((3, 4, 5), |(i, j, k)| {
(i as f64 + 1.0) * (j as f64 + 1.0) + k as f64
});
let d = tucker_decomp(&t, [3, 4, 5]).expect("full rank ok");
let t2 = tucker_reconstruct(&d).expect("reconstruct ok");
for i in 0..3 {
for j in 0..4 {
for k in 0..5 {
assert_abs_diff_eq!(t[[i, j, k]], t2[[i, j, k]], epsilon = 1e-8);
}
}
}
}
#[test]
fn test_tucker_hooi_shapes() {
let t = make_test_tensor();
let d = tucker_hooi(&t, [2, 3, 4], 20, 1e-8).expect("hooi ok");
assert_eq!(d.core.shape(), &[2, 3, 4]);
assert_eq!(d.factors[0].shape(), &[4, 2]);
assert_eq!(d.factors[1].shape(), &[5, 3]);
assert_eq!(d.factors[2].shape(), &[6, 4]);
}
#[test]
fn test_tucker_hooi_better_than_hosvd() {
let t = make_test_tensor();
let hosvd = tucker_decomp(&t, [2, 2, 2]).expect("hosvd ok");
let hooi = tucker_hooi(&t, [2, 2, 2], 30, 1e-10).expect("hooi ok");
let err_hosvd = hosvd.relative_error(&t);
let err_hooi = hooi.relative_error(&t);
assert!(
err_hooi <= err_hosvd + 1e-6,
"HOOI error {err_hooi} > HOSVD error {err_hosvd}"
);
}
#[test]
fn test_tucker_hooi_full_rank_lossless() {
let t = Array3::from_shape_fn((3, 3, 3), |(i, j, k)| (i + j + k) as f64 + 1.0);
let d = tucker_hooi(&t, [3, 3, 3], 10, 1e-12).expect("hooi full rank ok");
let err = d.relative_error(&t);
assert!(err < 1e-7, "full rank hooi error = {err}");
}
#[test]
fn test_error_bound_consistency() {
let t = make_test_tensor();
let d = tucker_decomp(&t, [2, 3, 4]).expect("ok");
let eb = d.error_bound(&t);
let re = d.relative_error(&t);
let tensor_norm = frobenius_norm_3d(&t.view());
assert_abs_diff_eq!(eb, re * tensor_norm, epsilon = 1e-8);
}
#[test]
fn test_factor_orthogonality() {
let t = make_test_tensor();
let d = tucker_decomp(&t, [2, 3, 4]).expect("ok");
for (n, factor) in d.factors.iter().enumerate() {
let utu = factor.t().dot(factor);
let r = factor.ncols();
for i in 0..r {
for j in 0..r {
let expected = if i == j { 1.0 } else { 0.0 };
assert_abs_diff_eq!(
utu[[i, j]],
expected,
epsilon = 1e-8,
);
let _ = n; }
}
}
}
}