ruvector_math/tensor_networks/
tucker.rs1use super::DenseTensor;
8
9#[derive(Debug, Clone)]
11pub struct TuckerConfig {
12 pub ranks: Vec<usize>,
14 pub tolerance: f64,
16 pub max_iters: usize,
18}
19
20impl Default for TuckerConfig {
21 fn default() -> Self {
22 Self {
23 ranks: vec![],
24 tolerance: 1e-10,
25 max_iters: 20,
26 }
27 }
28}
29
30#[derive(Debug, Clone)]
32pub struct TuckerDecomposition {
33 pub core: DenseTensor,
35 pub factors: Vec<Vec<f64>>,
37 pub shape: Vec<usize>,
39 pub core_shape: Vec<usize>,
41}
42
43impl TuckerDecomposition {
44 pub fn hosvd(tensor: &DenseTensor, config: &TuckerConfig) -> Self {
46 let d = tensor.order();
47 let mut factors = Vec::new();
48 let mut core_shape = Vec::new();
49
50 for k in 0..d {
52 let unfolding = mode_k_unfold(tensor, k);
53 let (n_k, cols) = (tensor.shape[k], unfolding.len() / tensor.shape[k]);
54
55 let rank = if k < config.ranks.len() {
57 config.ranks[k].min(n_k)
58 } else {
59 n_k
60 };
61
62 let u_k = compute_left_singular_vectors(&unfolding, n_k, cols, rank, config.max_iters);
64
65 factors.push(u_k);
66 core_shape.push(rank);
67 }
68
69 let core = compute_core(tensor, &factors, &core_shape);
71
72 Self {
73 core,
74 factors,
75 shape: tensor.shape.clone(),
76 core_shape,
77 }
78 }
79
80 pub fn to_dense(&self) -> DenseTensor {
82 let mut result = self.core.data.clone();
84 let mut current_shape = self.core_shape.clone();
85
86 for (k, factor) in self.factors.iter().enumerate() {
87 let n_k = self.shape[k];
88 let r_k = self.core_shape[k];
89
90 result = apply_mode_product(&result, ¤t_shape, factor, n_k, r_k, k);
92 current_shape[k] = n_k;
93 }
94
95 DenseTensor::new(result, self.shape.clone())
96 }
97
98 pub fn compression_ratio(&self) -> f64 {
100 let original: usize = self.shape.iter().product();
101 let core_size: usize = self.core_shape.iter().product();
102 let factor_size: usize = self.factors.iter()
103 .enumerate()
104 .map(|(k, f)| self.shape[k] * self.core_shape[k])
105 .sum();
106
107 original as f64 / (core_size + factor_size) as f64
108 }
109}
110
111fn mode_k_unfold(tensor: &DenseTensor, k: usize) -> Vec<f64> {
113 let d = tensor.order();
114 let n_k = tensor.shape[k];
115 let cols: usize = tensor.shape.iter().enumerate()
116 .filter(|&(i, _)| i != k)
117 .map(|(_, &s)| s)
118 .product();
119
120 let mut result = vec![0.0; n_k * cols];
121
122 let total_size: usize = tensor.shape.iter().product();
124 let mut indices = vec![0usize; d];
125
126 for flat_idx in 0..total_size {
127 let val = tensor.data[flat_idx];
128 let i_k = indices[k];
129
130 let mut col = 0;
132 let mut stride = 1;
133 for m in (0..d).rev() {
134 if m != k {
135 col += indices[m] * stride;
136 stride *= tensor.shape[m];
137 }
138 }
139
140 result[i_k * cols + col] = val;
141
142 for m in (0..d).rev() {
144 indices[m] += 1;
145 if indices[m] < tensor.shape[m] {
146 break;
147 }
148 indices[m] = 0;
149 }
150 }
151
152 result
153}
154
155fn compute_left_singular_vectors(a: &[f64], rows: usize, cols: usize, rank: usize, max_iters: usize) -> Vec<f64> {
157 let mut u = vec![0.0; rows * rank];
158
159 for r in 0..rank {
161 let mut v: Vec<f64> = (0..rows).map(|i| {
163 let x = ((i * 2654435769 + r * 1103515245) as f64 / 4294967296.0) * 2.0 - 1.0;
164 x
165 }).collect();
166 normalize(&mut v);
167
168 for _ in 0..max_iters {
170 let mut av = vec![0.0; cols];
172 for i in 0..rows {
173 for j in 0..cols {
174 av[j] += a[i * cols + j] * v[i];
175 }
176 }
177
178 let mut aatv = vec![0.0; rows];
179 for i in 0..rows {
180 for j in 0..cols {
181 aatv[i] += a[i * cols + j] * av[j];
182 }
183 }
184
185 for prev in 0..r {
187 let mut dot = 0.0;
188 for i in 0..rows {
189 dot += aatv[i] * u[i * rank + prev];
190 }
191 for i in 0..rows {
192 aatv[i] -= dot * u[i * rank + prev];
193 }
194 }
195
196 v = aatv;
197 normalize(&mut v);
198 }
199
200 for i in 0..rows {
202 u[i * rank + r] = v[i];
203 }
204 }
205
206 u
207}
208
209fn normalize(v: &mut [f64]) {
210 let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
211 if norm > 1e-15 {
212 for x in v.iter_mut() {
213 *x /= norm;
214 }
215 }
216}
217
218fn compute_core(tensor: &DenseTensor, factors: &[Vec<f64>], core_shape: &[usize]) -> DenseTensor {
220 let mut result = tensor.data.clone();
221 let mut current_shape = tensor.shape.clone();
222
223 for (k, factor) in factors.iter().enumerate() {
224 let n_k = tensor.shape[k];
225 let r_k = core_shape[k];
226
227 result = apply_mode_product_transpose(&result, ¤t_shape, factor, n_k, r_k, k);
229 current_shape[k] = r_k;
230 }
231
232 DenseTensor::new(result, core_shape.to_vec())
233}
234
235fn apply_mode_product_transpose(data: &[f64], shape: &[usize], u: &[f64], n_k: usize, r_k: usize, k: usize) -> Vec<f64> {
237 let d = shape.len();
238 let mut new_shape = shape.to_vec();
239 new_shape[k] = r_k;
240
241 let new_size: usize = new_shape.iter().product();
242 let mut result = vec![0.0; new_size];
243
244 let old_size: usize = shape.iter().product();
246 let mut old_indices = vec![0usize; d];
247
248 for _ in 0..old_size {
249 let old_idx = compute_linear_index(&old_indices, shape);
250 let val = data[old_idx];
251 let i_k = old_indices[k];
252
253 for r in 0..r_k {
255 let mut new_indices = old_indices.clone();
256 new_indices[k] = r;
257 let new_idx = compute_linear_index(&new_indices, &new_shape);
258
259 result[new_idx] += val * u[i_k * r_k + r];
261 }
262
263 for m in (0..d).rev() {
265 old_indices[m] += 1;
266 if old_indices[m] < shape[m] {
267 break;
268 }
269 old_indices[m] = 0;
270 }
271 }
272
273 result
274}
275
276fn apply_mode_product(data: &[f64], shape: &[usize], u: &[f64], n_k: usize, r_k: usize, k: usize) -> Vec<f64> {
278 let d = shape.len();
279 let mut new_shape = shape.to_vec();
280 new_shape[k] = n_k;
281
282 let new_size: usize = new_shape.iter().product();
283 let mut result = vec![0.0; new_size];
284
285 let old_size: usize = shape.iter().product();
287 let mut old_indices = vec![0usize; d];
288
289 for _ in 0..old_size {
290 let old_idx = compute_linear_index(&old_indices, shape);
291 let val = data[old_idx];
292 let r = old_indices[k];
293
294 for i in 0..n_k {
296 let mut new_indices = old_indices.clone();
297 new_indices[k] = i;
298 let new_idx = compute_linear_index(&new_indices, &new_shape);
299
300 result[new_idx] += val * u[i * r_k + r];
302 }
303
304 for m in (0..d).rev() {
306 old_indices[m] += 1;
307 if old_indices[m] < shape[m] {
308 break;
309 }
310 old_indices[m] = 0;
311 }
312 }
313
314 result
315}
316
317fn compute_linear_index(indices: &[usize], shape: &[usize]) -> usize {
318 let mut idx = 0;
319 let mut stride = 1;
320 for i in (0..shape.len()).rev() {
321 idx += indices[i] * stride;
322 stride *= shape[i];
323 }
324 idx
325}
326
327#[cfg(test)]
328mod tests {
329 use super::*;
330
331 #[test]
332 fn test_tucker_hosvd() {
333 let tensor = DenseTensor::random(vec![4, 5, 3], 42);
334
335 let config = TuckerConfig {
336 ranks: vec![2, 3, 2],
337 ..Default::default()
338 };
339
340 let tucker = TuckerDecomposition::hosvd(&tensor, &config);
341
342 assert_eq!(tucker.core_shape, vec![2, 3, 2]);
343 assert!(tucker.compression_ratio() > 1.0);
344 }
345
346 #[test]
347 fn test_mode_unfold() {
348 let tensor = DenseTensor::new(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![2, 3]);
349
350 let unfold0 = mode_k_unfold(&tensor, 0);
351 assert_eq!(unfold0.len(), 6);
353 }
354}