tenrso_decomp/
lib.rs

1//! # tenrso-decomp - Tensor Decomposition Methods
2//!
3//! Production-grade tensor decomposition algorithms for scientific computing,
4//! machine learning, and data analysis.
5//!
6//! **Version:** 0.1.0-alpha.2
7//! **Tests:** 158 passing (100%)
8//! **Status:** M2 Complete - All algorithms implemented and tested
9//!
10//! ## Overview
11//!
12//! This crate provides high-performance implementations of three major tensor
13//! decomposition families:
14//!
15//! ### CP Decomposition (Canonical Polyadic / CANDECOMP/PARAFAC)
16//!
17//! Factorizes a tensor into a sum of rank-1 components:
18//!
19//! ```text
20//! X ≈ Σᵣ λᵣ (a₁ᵣ ⊗ a₂ᵣ ⊗ ... ⊗ aₙᵣ)
21//! ```
22//!
23//! **Use cases:**
24//! - Factor analysis and dimensionality reduction
25//! - Signal separation and blind source separation
26//! - Chemometrics and spectroscopy
27//! - Neuroscience (EEG/fMRI analysis)
28//!
29//! **Algorithms:**
30//! - `cp_als`: Alternating least squares with convergence detection
31//! - `cp_als_constrained`: With non-negativity, L2 regularization, orthogonality
32//! - `cp_als_accelerated`: Line search optimization for faster convergence
33//! - `cp_randomized`: Randomized sketching for large-scale tensors (NEW!)
34//! - `cp_completion`: Tensor completion with missing data (CP-WOPT)
35//!
36//! ### Tucker Decomposition (Higher-Order SVD)
37//!
38//! Factorizes a tensor into a core tensor and orthogonal factor matrices:
39//!
40//! ```text
41//! X ≈ G ×₁ U₁ ×₂ U₂ ×₃ ... ×ₙ Uₙ
42//! ```
43//!
44//! **Use cases:**
45//! - Image/video compression
46//! - Feature extraction from multi-way data
47//! - Gait recognition and motion analysis
48//! - Hyperspectral imaging
49//!
50//! **Algorithms:**
51//! - `tucker_hosvd`: Fast one-pass SVD-based decomposition
52//! - `tucker_hooi`: Iterative refinement for better approximation
53//! - `tucker_hosvd_auto`: Automatic rank selection (NEW!)
54//!   - Energy-based: Preserve X% of singular value energy
55//!   - Threshold-based: Keep σ > threshold × σ_max
56//!
57//! ### Tensor Train (TT) Decomposition
58//!
59//! Represents a tensor as a sequence of 3-way cores:
60//!
61//! ```text
62//! X(i₁,...,iₙ) = G₁[i₁] × G₂[i₂] × ... × Gₙ[iₙ]
63//! ```
64//!
65//! **Use cases:**
66//! - High-order tensor compression (6D+)
67//! - Quantum many-body systems
68//! - Stochastic PDEs
69//! - Tensor networks in machine learning
70//!
71//! **Algorithms:**
72//! - `tt_svd`: Sequential SVD with rank truncation
73//! - `tt_round`: Post-decomposition rank reduction
74//!
75//! **TT Operations (NEW!):**
76//! - `tt_add`: Addition of two TT decompositions
77//! - `tt_dot`: Inner product without reconstruction
78//! - `tt_hadamard`: Element-wise (Hadamard) product
79//!
80//! ## Quick Start
81//!
82//! ### CP Decomposition
83//!
84//! ```
85//! use tenrso_core::DenseND;
86//! use tenrso_decomp::{cp_als, InitStrategy};
87//!
88//! // Create a 50×50×50 tensor
89//! let tensor = DenseND::<f64>::random_uniform(&[50, 50, 50], 0.0, 1.0);
90//!
91//! // Decompose into rank-10 CP
92//! let cp = cp_als(&tensor, 10, 100, 1e-4, InitStrategy::Random, None)?;
93//!
94//! println!("Converged in {} iterations", cp.iters);
95//! println!("Final fit: {:.4}", cp.fit);
96//!
97//! // Reconstruct approximation
98//! let approx = cp.reconstruct(tensor.shape())?;
99//! # Ok::<(), anyhow::Error>(())
100//! ```
101//!
102//! ### Tucker Decomposition
103//!
104//! ```
105//! use tenrso_core::DenseND;
106//! use tenrso_decomp::tucker_hosvd;
107//!
108//! let tensor = DenseND::<f64>::random_uniform(&[30, 30, 30], 0.0, 1.0);
109//! let ranks = vec![15, 15, 15];
110//!
111//! // Tucker-HOSVD decomposition
112//! let tucker = tucker_hosvd(&tensor, &ranks)?;
113//!
114//! println!("Core shape: {:?}", tucker.core.shape());
115//! println!("Compression: {:.2}x", tucker.compression_ratio());
116//!
117//! let approx = tucker.reconstruct()?;
118//! # Ok::<(), anyhow::Error>(())
119//! ```
120//!
121//! ### Tensor Completion (NEW!)
122//!
123//! ```
124//! use scirs2_core::ndarray_ext::Array;
125//! use tenrso_core::DenseND;
126//! use tenrso_decomp::{cp_completion, InitStrategy};
127//!
128//! // Create tensor with missing entries
129//! let mut data = Array::<f64, _>::zeros(vec![20, 20, 20]);
130//! let mut mask = Array::<f64, _>::zeros(vec![20, 20, 20]);
131//!
132//! // Mark some entries as observed (1 = observed, 0 = missing)
133//! for i in 0..10 {
134//!     for j in 0..10 {
135//!         for k in 0..10 {
136//!             data[[i, j, k]] = (i + j + k) as f64 * 0.1;
137//!             mask[[i, j, k]] = 1.0;
138//!         }
139//!     }
140//! }
141//!
142//! let tensor = DenseND::from_array(data.into_dyn());
143//! let mask_tensor = DenseND::from_array(mask.into_dyn());
144//!
145//! // Complete the tensor (predict missing values)
146//! let cp = cp_completion(&tensor, &mask_tensor, 5, 100, 1e-4, InitStrategy::Random)?;
147//!
148//! // Get predictions for all entries (including missing ones)
149//! let completed = cp.reconstruct(tensor.shape())?;
150//! # Ok::<(), anyhow::Error>(())
151//! ```
152//!
153//! ### Tensor Train Decomposition
154//!
155//! ```
156//! use tenrso_core::DenseND;
157//! use tenrso_decomp::tt::{tt_svd, tt_round};
158//!
159//! let tensor = DenseND::<f64>::random_uniform(&[6, 6, 6, 6, 6], 0.0, 1.0);
160//!
161//! // TT-SVD with max ranks [5, 5, 5, 5]
162//! let tt = tt_svd(&tensor, &[5, 5, 5, 5], 1e-6)?;
163//! println!("TT-ranks: {:?}", tt.ranks);
164//!
165//! // Round to smaller ranks
166//! let tt_small = tt_round(&tt, &[3, 3, 3, 3], 1e-4)?;
167//! println!("Reduced ranks: {:?}", tt_small.ranks);
168//! # Ok::<(), anyhow::Error>(())
169//! ```
170//!
171//! ## Feature Flags
172//!
173//! Currently all features are enabled by default. Future versions may add:
174//! - `parallel`: Parallel tensor operations via Rayon
175//! - `gpu`: GPU acceleration via cuBLAS/ROCm
176//!
177//! ## SciRS2 Integration
178//!
179//! All linear algebra operations use `scirs2_linalg` for SVD, QR, and least-squares.
180//! Random number generation uses `scirs2_core::random`.
181//! Direct use of `ndarray` or `rand` is forbidden per project policy.
182//!
183//! ## Performance
184//!
185//! Typical performance on modern CPUs (single-threaded):
186//! - **CP-ALS**: 256³ tensor, rank 64, 10 iters → ~2s
187//! - **Tucker-HOOI**: 512×512×128, ranks \[64,64,32\], 10 iters → ~3s
188//! - **TT-SVD**: 32⁶ tensor, ε=1e-6 → ~2s
189//!
190//! ## References
191//!
192//! - Kolda & Bader (2009), "Tensor Decompositions and Applications"
193//! - De Lathauwer et al. (2000), "Multilinear Singular Value Decomposition"
194//! - Oseledets (2011), "Tensor-Train Decomposition"
195//! - Vervliet et al. (2016), "Tensorlab 3.0"
196
197#![deny(warnings)]
198
199pub mod cp;
200pub mod rank_selection;
201pub mod tt;
202pub mod tucker;
203pub mod utils;
204
205#[cfg(test)]
206mod property_tests;
207
208// Re-exports
209pub use cp::*;
210pub use rank_selection::*;
211pub use tt::*;
212pub use tucker::*;