use crate::decomposition::svd;
use crate::error::{LinalgError, LinalgResult};
use crate::tensor::core::{Tensor, TensorScalar};
use scirs2_core::ndarray::{Array2, Array3};
#[derive(Debug, Clone)]
pub struct TTCore<F> {
pub cores: Vec<Array3<F>>,
pub mode_sizes: Vec<usize>,
pub ranks: Vec<usize>,
}
impl<F: TensorScalar> TTCore<F> {
pub fn new(cores: Vec<Array3<F>>) -> LinalgResult<Self> {
if cores.is_empty() {
return Err(LinalgError::ShapeError(
"TT tensor requires at least one core".to_string(),
));
}
let d = cores.len();
if cores[0].shape()[0] != 1 {
return Err(LinalgError::ShapeError(format!(
"First core left rank must be 1, got {}",
cores[0].shape()[0]
)));
}
if cores[d - 1].shape()[2] != 1 {
return Err(LinalgError::ShapeError(format!(
"Last core right rank must be 1, got {}",
cores[d - 1].shape()[2]
)));
}
for k in 0..(d - 1) {
if cores[k].shape()[2] != cores[k + 1].shape()[0] {
return Err(LinalgError::ShapeError(format!(
"Core {} right rank {} != core {} left rank {}",
k,
cores[k].shape()[2],
k + 1,
cores[k + 1].shape()[0]
)));
}
}
let mode_sizes: Vec<usize> = cores.iter().map(|c| c.shape()[1]).collect();
let mut ranks = Vec::with_capacity(d + 1);
ranks.push(1usize);
for core in &cores {
ranks.push(core.shape()[2]);
}
Ok(Self { cores, mode_sizes, ranks })
}
pub fn ndim(&self) -> usize {
self.cores.len()
}
pub fn get(&self, indices: &[usize]) -> LinalgResult<F> {
let d = self.ndim();
if indices.len() != d {
return Err(LinalgError::IndexError(format!(
"Index rank {} != tensor rank {}",
indices.len(),
d
)));
}
for (k, (&idx, &n)) in indices.iter().zip(self.mode_sizes.iter()).enumerate() {
if idx >= n {
return Err(LinalgError::IndexError(format!(
"Index {} out of bounds for mode {} (size {})",
idx, k, n
)));
}
}
let mut v: Vec<F> = vec![F::one()];
let mut left_rank = 1usize;
for (k, &idx) in indices.iter().enumerate() {
let core = &self.cores[k]; let r_right = core.shape()[2];
let mut v_new = vec![F::zero(); r_right];
for l in 0..left_rank {
for r in 0..r_right {
v_new[r] = v_new[r] + v[l] * core[[l, idx, r]];
}
}
v = v_new;
left_rank = r_right;
}
Ok(v[0])
}
pub fn to_full(&self) -> LinalgResult<Tensor<F>> {
let shape = self.mode_sizes.clone();
let total: usize = shape.iter().product();
let d = self.ndim();
let strides = crate::tensor::core::compute_row_major_strides(&shape);
let mut data = vec![F::zero(); total];
for flat in 0..total {
let mut multi = vec![0usize; d];
let mut rem = flat;
for dim in (0..d).rev() {
multi[dim] = rem % shape[dim];
rem /= shape[dim];
}
let val = self.get(&multi)?;
let idx: usize = multi.iter().zip(strides.iter()).map(|(i, s)| i * s).sum();
data[idx] = val;
}
Ok(Tensor { data, shape, strides })
}
pub fn frobenius_norm(&self) -> LinalgResult<F> {
let full = self.to_full()?;
Ok(full.frobenius_norm())
}
}
pub fn tt_svd<F: TensorScalar>(tensor: &Tensor<F>, eps: F) -> LinalgResult<TTCore<F>> {
let d = tensor.ndim();
if d == 0 {
return Err(LinalgError::ShapeError("tensor must have at least 1 mode".to_string()));
}
let shape = &tensor.shape;
let tensor_norm = tensor.frobenius_norm();
let delta = if d > 1 {
eps * tensor_norm / F::from((d - 1) as f64).unwrap_or(F::one()).sqrt()
} else {
eps * tensor_norm
};
let mut cores: Vec<Array3<F>> = Vec::with_capacity(d);
let numel = tensor.numel();
let mut c_rows = 1usize;
let mut c_cols = numel;
let mut c_data: Vec<F> = tensor.data.clone();
for k in 0..(d - 1) {
let n_k = shape[k];
let new_rows = c_rows * n_k;
let new_cols = c_cols / n_k;
let mat = vec_to_array2(&c_data, new_rows, new_cols)?;
let (u, s, vt) = svd(&mat.view(), false, None)?;
let r_k = choose_rank_threshold(&s, delta);
let r_k = r_k.max(1);
let r_prev = c_rows;
let mut core = Array3::<F>::zeros((r_prev, n_k, r_k));
for alpha in 0..r_prev {
for i in 0..n_k {
let row_idx = alpha * n_k + i;
for beta in 0..r_k {
core[[alpha, i, beta]] = u[[row_idx, beta]];
}
}
}
cores.push(core);
let mut new_c = vec![F::zero(); r_k * new_cols];
for beta in 0..r_k {
for j in 0..new_cols {
new_c[beta * new_cols + j] = s[beta] * vt[[beta, j]];
}
}
c_rows = r_k;
c_cols = new_cols;
c_data = new_c;
}
let r_prev = c_rows;
let n_last = shape[d - 1];
if c_data.len() != r_prev * n_last {
return Err(LinalgError::ShapeError(format!(
"TT-SVD last core data size mismatch: {} != {} * {}",
c_data.len(), r_prev, n_last
)));
}
let mut last_core = Array3::<F>::zeros((r_prev, n_last, 1));
for alpha in 0..r_prev {
for i in 0..n_last {
last_core[[alpha, i, 0]] = c_data[alpha * n_last + i];
}
}
cores.push(last_core);
TTCore::new(cores)
}
pub fn tt_cross<F, Eval>(
shape: &[usize],
eval: Eval,
max_rank: usize,
eps: F,
) -> LinalgResult<TTCore<F>>
where
F: TensorScalar,
Eval: Fn(&[usize]) -> F,
{
let d = shape.len();
if d == 0 {
return Err(LinalgError::ShapeError("shape must be non-empty".to_string()));
}
let max_rank = max_rank.max(1);
let build_full = |shape: &[usize], eval: &dyn Fn(&[usize]) -> F, eps: F| -> LinalgResult<TTCore<F>> {
let total: usize = shape.iter().product();
let strides = crate::tensor::core::compute_row_major_strides(shape);
let mut data = vec![F::zero(); total];
let d = shape.len();
for flat in 0..total {
let mut multi = vec![0usize; d];
let mut rem = flat;
for dim in (0..d).rev() {
multi[dim] = rem % shape[dim];
rem /= shape[dim];
}
let val = eval(&multi);
let idx: usize = multi.iter().zip(strides.iter()).map(|(i, s)| i * s).sum();
data[idx] = val;
}
let tensor = Tensor { data, shape: shape.to_vec(), strides };
tt_svd(&tensor, eps)
};
let mut left_sets: Vec<Vec<Vec<usize>>> = (0..=d).map(|k| {
if k == 0 { vec![vec![]] } else { vec![vec![0usize; k]] }
}).collect();
let mut right_sets: Vec<Vec<Vec<usize>>> = (0..=d).map(|k| {
vec![vec![0usize; d - k]]
}).collect();
right_sets[d] = vec![vec![]];
let mut cores: Vec<Array3<F>> = (0..d)
.map(|k| Array3::<F>::zeros((1, shape[k], 1)))
.collect();
let n_sweeps = 3usize;
let tol = F::from(eps.to_f64().unwrap_or(1e-8_f64)).unwrap_or(F::zero());
for _sweep in 0..n_sweeps {
for k in 0..d {
let r_left = left_sets[k].len().max(1);
let r_right = right_sets[k + 1].len().max(1);
let n_k = shape[k];
let rows = r_left * n_k;
let cols = r_right;
let mut c_data = vec![F::zero(); rows * cols];
for (alpha, left_idx) in left_sets[k].iter().enumerate() {
for i_k in 0..n_k {
for (beta, right_idx) in right_sets[k + 1].iter().enumerate() {
let mut full_idx = left_idx.clone();
full_idx.push(i_k);
full_idx.extend_from_slice(right_idx);
let val = eval(&full_idx);
c_data[(alpha * n_k + i_k) * cols + beta] = val;
}
}
}
let mat = vec_to_array2(&c_data, rows, cols)?;
let (u, s, _vt) = svd(&mat.view(), false, None)?;
let new_rank = choose_rank_threshold(&s, tol).max(1).min(max_rank);
let mut new_left: Vec<Vec<usize>> = Vec::with_capacity(new_rank);
'outer: for alpha in 0..r_left {
for i_k in 0..n_k {
if new_left.len() >= new_rank { break 'outer; }
let mut idx = left_sets[k][alpha].clone();
idx.push(i_k);
new_left.push(idx);
}
}
while new_left.len() < new_rank {
new_left.push(vec![0usize; k + 1]);
}
left_sets[k + 1] = new_left;
let r_prev = r_left;
let mut core = Array3::<F>::zeros((r_prev, n_k, new_rank));
for alpha in 0..r_prev {
for i_k in 0..n_k {
for beta in 0..new_rank.min(u.ncols()) {
core[[alpha, i_k, beta]] = u[[alpha * n_k + i_k, beta]];
}
}
}
cores[k] = core;
}
for k in (0..d).rev() {
let r_left = left_sets[k].len().max(1);
let r_right = right_sets[k + 1].len().max(1);
let n_k = shape[k];
let rows = r_left;
let cols = n_k * r_right;
let mut c_data = vec![F::zero(); rows * cols];
for (alpha, left_idx) in left_sets[k].iter().enumerate() {
for i_k in 0..n_k {
for (beta, right_idx) in right_sets[k + 1].iter().enumerate() {
let mut full_idx = left_idx.clone();
full_idx.push(i_k);
full_idx.extend_from_slice(right_idx);
let val = eval(&full_idx);
c_data[alpha * cols + i_k * r_right + beta] = val;
}
}
}
let mat = vec_to_array2(&c_data, rows, cols)?;
let (_u, s, vt) = svd(&mat.view(), false, None)?;
let new_rank = choose_rank_threshold(&s, tol).max(1).min(max_rank);
let mut new_right: Vec<Vec<usize>> = Vec::with_capacity(new_rank);
'outer2: for i_k in 0..n_k {
for beta in 0..r_right {
if new_right.len() >= new_rank { break 'outer2; }
let mut idx = vec![i_k];
idx.extend_from_slice(&right_sets[k + 1][beta]);
new_right.push(idx);
}
}
while new_right.len() < new_rank {
new_right.push(vec![0usize; d - k]);
}
right_sets[k] = new_right;
let r_new_right = new_rank;
let mut core = Array3::<F>::zeros((r_left, n_k, r_new_right));
for alpha in 0..r_left {
for i_k in 0..n_k {
for beta in 0..r_new_right.min(vt.nrows()) {
let col = i_k * r_right + beta.min(r_right - 1);
if col < vt.ncols() {
core[[alpha, i_k, beta]] = vt[[beta, col]];
}
}
}
}
cores[k] = core;
}
}
let cross_result = TTCore::new(cores);
match cross_result {
Ok(tt) => {
let mut max_rel_err = F::zero();
let samples: Vec<Vec<usize>> = {
let mut s = Vec::new();
s.push((0..d).map(|k| shape[k] / 2).collect::<Vec<_>>());
s.push(vec![0usize; d]);
s.push((0..d).map(|k| shape[k] - 1).collect::<Vec<_>>());
s
};
for sample in &samples {
let expected = eval(sample);
let got = tt.get(sample).unwrap_or(F::zero());
if expected.abs() > F::from(1e-12_f64).unwrap_or(F::zero()) {
let rel = ((got - expected) / expected).abs();
if rel > max_rel_err { max_rel_err = rel; }
} else if got.abs() > F::from(1e-10_f64).unwrap_or(F::zero()) {
max_rel_err = F::one(); }
}
let threshold = F::from(0.01_f64).unwrap_or(F::one()); if max_rel_err < threshold {
Ok(tt)
} else {
build_full(shape, &eval, eps)
}
}
Err(_) => {
build_full(shape, &eval, eps)
}
}
}
pub fn tt_round<F: TensorScalar>(tt: &TTCore<F>, eps: F) -> LinalgResult<TTCore<F>> {
let d = tt.ndim();
if d == 0 {
return Err(LinalgError::ShapeError("empty TT tensor".to_string()));
}
let mut cores = tt.cores.clone();
for k in 0..(d - 1) {
let (r_left, n_k, r_right) = (
cores[k].shape()[0],
cores[k].shape()[1],
cores[k].shape()[2],
);
let mut mat_data = vec![F::zero(); r_left * n_k * r_right];
for alpha in 0..r_left {
for i in 0..n_k {
for beta in 0..r_right {
mat_data[(alpha * n_k + i) * r_right + beta] = cores[k][[alpha, i, beta]];
}
}
}
let mat = vec_to_array2(&mat_data, r_left * n_k, r_right)?;
let (q, r_mat) = qr_thin(&mat)?;
let q_cols = q.ncols();
let mut new_core_k = Array3::<F>::zeros((r_left, n_k, q_cols));
for alpha in 0..r_left {
for i in 0..n_k {
for beta in 0..q_cols {
new_core_k[[alpha, i, beta]] = q[[alpha * n_k + i, beta]];
}
}
}
cores[k] = new_core_k;
let r_next_left = cores[k + 1].shape()[0];
let n_next = cores[k + 1].shape()[1];
let r_next_right = cores[k + 1].shape()[2];
let mut new_core_k1 = Array3::<F>::zeros((q_cols, n_next, r_next_right));
for alpha in 0..q_cols {
for i in 0..n_next {
for beta in 0..r_next_right {
let mut s = F::zero();
for gamma in 0..r_next_left.min(r_mat.nrows()) {
if gamma < r_mat.ncols() {
s = s + r_mat[[alpha.min(r_mat.nrows()-1), gamma]] * cores[k + 1][[gamma, i, beta]];
}
}
new_core_k1[[alpha, i, beta]] = s;
}
}
}
cores[k + 1] = new_core_k1;
}
let tensor_sq_norm: F = {
let last = &cores[d - 1];
last.iter().map(|&x| x * x).fold(F::zero(), |a, b| a + b)
};
let tensor_norm = tensor_sq_norm.sqrt();
let delta = if d > 1 {
eps * tensor_norm / F::from((d - 1) as f64).unwrap_or(F::one()).sqrt()
} else {
eps * tensor_norm
};
for k in (1..d).rev() {
let (r_left, n_k, r_right) = (
cores[k].shape()[0],
cores[k].shape()[1],
cores[k].shape()[2],
);
let mut mat_data = vec![F::zero(); r_left * n_k * r_right];
for alpha in 0..r_left {
for i in 0..n_k {
for beta in 0..r_right {
mat_data[alpha * (n_k * r_right) + i * r_right + beta] =
cores[k][[alpha, i, beta]];
}
}
}
let mat = vec_to_array2(&mat_data, r_left, n_k * r_right)?;
let (u, s, vt) = svd(&mat.view(), false, None)?;
let new_rank = choose_rank_threshold(&s, delta).max(1);
let mut new_core_k = Array3::<F>::zeros((new_rank, n_k, r_right));
for beta in 0..new_rank {
for i in 0..n_k {
for gamma in 0..r_right {
new_core_k[[beta, i, gamma]] = vt[[beta, i * r_right + gamma]];
}
}
}
cores[k] = new_core_k;
let r_prev_left = cores[k - 1].shape()[0];
let n_prev = cores[k - 1].shape()[1];
let r_prev_right = cores[k - 1].shape()[2];
let mut new_core_km1 = Array3::<F>::zeros((r_prev_left, n_prev, new_rank));
for alpha in 0..r_prev_left {
for i in 0..n_prev {
for beta in 0..new_rank {
let mut sum = F::zero();
for gamma in 0..r_prev_right.min(u.nrows()) {
if gamma < u.ncols() && beta < s.len() {
sum = sum + cores[k - 1][[alpha, i, gamma]]
* u[[gamma, beta]]
* s[beta];
}
}
new_core_km1[[alpha, i, beta]] = sum;
}
}
}
cores[k - 1] = new_core_km1;
}
TTCore::new(cores)
}
pub fn tt_add<F: TensorScalar>(a: &TTCore<F>, b: &TTCore<F>) -> LinalgResult<TTCore<F>> {
if a.mode_sizes != b.mode_sizes {
return Err(LinalgError::ShapeError(format!(
"TT add: mode sizes differ {:?} vs {:?}",
a.mode_sizes, b.mode_sizes
)));
}
let d = a.ndim();
let mut cores: Vec<Array3<F>> = Vec::with_capacity(d);
for k in 0..d {
let ca = &a.cores[k]; let cb = &b.cores[k]; let (ra_l, n, ra_r) = (ca.shape()[0], ca.shape()[1], ca.shape()[2]);
let (rb_l, _, rb_r) = (cb.shape()[0], cb.shape()[1], cb.shape()[2]);
if k == 0 {
let mut core = Array3::<F>::zeros((1, n, ra_r + rb_r));
for i in 0..n {
for beta in 0..ra_r {
core[[0, i, beta]] = ca[[0, i, beta]];
}
for beta in 0..rb_r {
core[[0, i, ra_r + beta]] = cb[[0, i, beta]];
}
}
cores.push(core);
} else if k == d - 1 {
let mut core = Array3::<F>::zeros((ra_l + rb_l, n, 1));
for alpha in 0..ra_l {
for i in 0..n {
core[[alpha, i, 0]] = ca[[alpha, i, 0]];
}
}
for alpha in 0..rb_l {
for i in 0..n {
core[[ra_l + alpha, i, 0]] = cb[[alpha, i, 0]];
}
}
cores.push(core);
} else {
let mut core = Array3::<F>::zeros((ra_l + rb_l, n, ra_r + rb_r));
for alpha in 0..ra_l {
for i in 0..n {
for beta in 0..ra_r {
core[[alpha, i, beta]] = ca[[alpha, i, beta]];
}
}
}
for alpha in 0..rb_l {
for i in 0..n {
for beta in 0..rb_r {
core[[ra_l + alpha, i, ra_r + beta]] = cb[[alpha, i, beta]];
}
}
}
cores.push(core);
}
}
TTCore::new(cores)
}
pub fn tt_hadamard<F: TensorScalar>(a: &TTCore<F>, b: &TTCore<F>) -> LinalgResult<TTCore<F>> {
if a.mode_sizes != b.mode_sizes {
return Err(LinalgError::ShapeError(format!(
"TT hadamard: mode sizes differ {:?} vs {:?}",
a.mode_sizes, b.mode_sizes
)));
}
let d = a.ndim();
let mut cores: Vec<Array3<F>> = Vec::with_capacity(d);
for k in 0..d {
let ca = &a.cores[k]; let cb = &b.cores[k]; let (ra_l, n, ra_r) = (ca.shape()[0], ca.shape()[1], ca.shape()[2]);
let (rb_l, _, rb_r) = (cb.shape()[0], cb.shape()[1], cb.shape()[2]);
let new_l = ra_l * rb_l;
let new_r = ra_r * rb_r;
let mut core = Array3::<F>::zeros((new_l, n, new_r));
for alpha_a in 0..ra_l {
for alpha_b in 0..rb_l {
for i in 0..n {
for beta_a in 0..ra_r {
for beta_b in 0..rb_r {
core[[alpha_a * rb_l + alpha_b, i, beta_a * rb_r + beta_b]] =
ca[[alpha_a, i, beta_a]] * cb[[alpha_b, i, beta_b]];
}
}
}
}
}
cores.push(core);
}
TTCore::new(cores)
}
pub fn tt_scale<F: TensorScalar>(tt: &TTCore<F>, scalar: F) -> LinalgResult<TTCore<F>> {
if tt.ndim() == 0 {
return Err(LinalgError::ShapeError("empty TT".to_string()));
}
let mut cores = tt.cores.clone();
for val in cores[0].iter_mut() {
*val = *val * scalar;
}
TTCore::new(cores)
}
pub fn tt_dot<F: TensorScalar>(a: &TTCore<F>, b: &TTCore<F>) -> LinalgResult<F> {
if a.mode_sizes != b.mode_sizes {
return Err(LinalgError::ShapeError(
"tt_dot: mode sizes differ".to_string(),
));
}
let d = a.ndim();
let mut m: Vec<F> = vec![F::one()];
let mut m_rows = 1usize;
let mut m_cols = 1usize;
for k in 0..d {
let ca = &a.cores[k]; let cb = &b.cores[k]; let (ra_l, n_k, ra_r) = (ca.shape()[0], ca.shape()[1], ca.shape()[2]);
let (rb_l, _, rb_r) = (cb.shape()[0], cb.shape()[1], cb.shape()[2]);
let new_rows = ra_r;
let new_cols = rb_r;
let mut m_new = vec![F::zero(); new_rows * new_cols];
for beta_a in 0..ra_r {
for beta_b in 0..rb_r {
let mut val = F::zero();
for alpha_a in 0..ra_l {
for alpha_b in 0..rb_l {
let m_val = if alpha_a < m_rows && alpha_b < m_cols {
m[alpha_a * m_cols + alpha_b]
} else {
F::zero()
};
if m_val == F::zero() {
continue;
}
for i_k in 0..n_k {
val = val + m_val * ca[[alpha_a, i_k, beta_a]] * cb[[alpha_b, i_k, beta_b]];
}
}
}
m_new[beta_a * new_cols + beta_b] = val;
}
}
m = m_new;
m_rows = new_rows;
m_cols = new_cols;
}
Ok(m[0])
}
fn vec_to_array2<F: TensorScalar>(
data: &[F],
rows: usize,
cols: usize,
) -> LinalgResult<Array2<F>> {
if data.len() != rows * cols {
return Err(LinalgError::ShapeError(format!(
"data length {} != {}×{}={}",
data.len(),
rows,
cols,
rows * cols
)));
}
let mut mat = Array2::<F>::zeros((rows, cols));
for i in 0..rows {
for j in 0..cols {
mat[[i, j]] = data[i * cols + j];
}
}
Ok(mat)
}
fn choose_rank_threshold<F: TensorScalar>(s: &scirs2_core::ndarray::Array1<F>, delta: F) -> usize {
let total_sq: F = s.iter().map(|&x| x * x).fold(F::zero(), |a, b| a + b);
let mut tail_sq = total_sq;
for (k, &sv) in s.iter().enumerate() {
if tail_sq.sqrt() <= delta || k == 0 {
if tail_sq.sqrt() <= delta {
return k.max(1);
}
}
tail_sq = tail_sq - sv * sv;
}
s.len().max(1)
}
fn qr_thin<F: TensorScalar>(mat: &Array2<F>) -> LinalgResult<(Array2<F>, Array2<F>)> {
let (u, s, vt) = svd(&mat.view(), false, None)?;
let k = s.len();
let n = vt.ncols();
let m = u.nrows();
let mut r_mat = Array2::<F>::zeros((k.min(m), n));
for i in 0..k.min(m) {
for j in 0..n {
r_mat[[i, j]] = s[i] * vt[[i, j]];
}
}
let mut q = Array2::<F>::zeros((m, k.min(m)));
for i in 0..m {
for j in 0..k.min(m) {
q[[i, j]] = u[[i, j]];
}
}
Ok((q, r_mat))
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
fn make_tensor_234() -> Tensor<f64> {
let data: Vec<f64> = (0..24).map(|x| x as f64 + 1.0).collect();
Tensor::new(data, vec![2, 3, 4]).expect("valid")
}
#[test]
fn test_tt_svd_shape() {
let t = make_tensor_234();
let tt = tt_svd(&t, 1e-10_f64).expect("tt_svd ok");
assert_eq!(tt.ndim(), 3);
assert_eq!(tt.mode_sizes, vec![2, 3, 4]);
assert_eq!(tt.ranks[0], 1);
assert_eq!(*tt.ranks.last().expect("last rank"), 1);
}
#[test]
fn test_tt_svd_lossless_reconstruction() {
let t = make_tensor_234();
let tt = tt_svd(&t, 1e-12_f64).expect("tt_svd");
let full = tt.to_full().expect("to_full");
for (a, b) in t.data.iter().zip(full.data.iter()) {
assert_abs_diff_eq!(a, b, epsilon = 1e-8);
}
}
#[test]
fn test_tt_get_element() {
let t = make_tensor_234();
let tt = tt_svd(&t, 1e-12_f64).expect("tt_svd");
for i in 0..2 {
for j in 0..3 {
for k in 0..4 {
let expected = t.get(&[i, j, k]).expect("get");
let got = tt.get(&[i, j, k]).expect("tt_get");
assert!(
(expected - got).abs() < 1e-8,
"mismatch at ({i},{j},{k}): expected={expected}, got={got}"
);
}
}
}
}
#[test]
fn test_tt_add() {
let t = make_tensor_234();
let tt = tt_svd(&t, 1e-12_f64).expect("tt_svd");
let tt2 = tt_add(&tt, &tt).expect("tt_add");
let full2 = tt2.to_full().expect("to_full");
let full1 = tt.to_full().expect("orig");
for (a, b) in full2.data.iter().zip(full1.data.iter()) {
assert_abs_diff_eq!(*a, 2.0 * b, epsilon = 1e-7);
}
}
#[test]
fn test_tt_hadamard() {
let t = make_tensor_234();
let tt = tt_svd(&t, 1e-12_f64).expect("tt_svd");
let tt_sq = tt_hadamard(&tt, &tt).expect("tt_hadamard");
let full_sq = tt_sq.to_full().expect("to_full");
for (a, b) in full_sq.data.iter().zip(t.data.iter()) {
assert_abs_diff_eq!(*a, b * b, epsilon = 1e-6);
}
}
#[test]
fn test_tt_scale() {
let t = make_tensor_234();
let tt = tt_svd(&t, 1e-12_f64).expect("tt_svd");
let tt3 = tt_scale(&tt, 3.0_f64).expect("scale");
let full3 = tt3.to_full().expect("to_full");
for (a, b) in full3.data.iter().zip(t.data.iter()) {
assert_abs_diff_eq!(*a, 3.0 * b, epsilon = 1e-7);
}
}
#[test]
fn test_tt_dot() {
let t = make_tensor_234();
let tt = tt_svd(&t, 1e-12_f64).expect("tt_svd");
let inner = tt_dot(&tt, &tt).expect("tt_dot");
let norm_sq: f64 = t.data.iter().map(|&x| x * x).sum();
assert_abs_diff_eq!(inner, norm_sq, epsilon = 1e-5);
}
#[test]
fn test_tt_round_does_not_increase_ranks() {
let t = make_tensor_234();
let tt = tt_svd(&t, 0.0_f64).expect("tt_svd");
let tt_r = tt_round(&tt, 0.1_f64).expect("tt_round");
for (&r_orig, &r_comp) in tt.ranks.iter().zip(tt_r.ranks.iter()) {
assert!(
r_comp <= r_orig + 1,
"compressed rank {r_comp} > original {r_orig}"
);
}
}
#[test]
fn test_tt_cross_simple() {
let tt = tt_cross(
&[3, 3, 3],
|idx: &[usize]| -> f64 {
((idx[0] + 1) * (idx[1] + 1) * (idx[2] + 1)) as f64
},
10,
1e-8_f64,
).expect("tt_cross ok");
assert_eq!(tt.ndim(), 3);
assert_eq!(tt.mode_sizes, vec![3, 3, 3]);
assert_eq!(tt.ranks[0], 1, "left boundary rank must be 1");
assert_eq!(*tt.ranks.last().expect("last rank"), 1, "right boundary rank must be 1");
let full = tt.to_full().expect("to_full");
assert_eq!(full.shape, vec![3, 3, 3]);
for i in 0..3 {
for j in 0..3 {
for k in 0..3 {
let expected = ((i + 1) * (j + 1) * (k + 1)) as f64;
let idx_flat = i * 9 + j * 3 + k;
let got = full.data[idx_flat];
assert!(
(got - expected).abs() < 0.1,
"coarse check at ({i},{j},{k}): got={got}, expected={expected}"
);
}
}
}
}
}