ruvector_math/tensor_networks/
tucker.rs1use super::DenseTensor;
8
9#[derive(Debug, Clone)]
11pub struct TuckerConfig {
12 pub ranks: Vec<usize>,
14 pub tolerance: f64,
16 pub max_iters: usize,
18}
19
20impl Default for TuckerConfig {
21 fn default() -> Self {
22 Self {
23 ranks: vec![],
24 tolerance: 1e-10,
25 max_iters: 20,
26 }
27 }
28}
29
30#[derive(Debug, Clone)]
32pub struct TuckerDecomposition {
33 pub core: DenseTensor,
35 pub factors: Vec<Vec<f64>>,
37 pub shape: Vec<usize>,
39 pub core_shape: Vec<usize>,
41}
42
43impl TuckerDecomposition {
44 pub fn hosvd(tensor: &DenseTensor, config: &TuckerConfig) -> Self {
46 let d = tensor.order();
47 let mut factors = Vec::new();
48 let mut core_shape = Vec::new();
49
50 for k in 0..d {
52 let unfolding = mode_k_unfold(tensor, k);
53 let (n_k, cols) = (tensor.shape[k], unfolding.len() / tensor.shape[k]);
54
55 let rank = if k < config.ranks.len() {
57 config.ranks[k].min(n_k)
58 } else {
59 n_k
60 };
61
62 let u_k = compute_left_singular_vectors(&unfolding, n_k, cols, rank, config.max_iters);
64
65 factors.push(u_k);
66 core_shape.push(rank);
67 }
68
69 let core = compute_core(tensor, &factors, &core_shape);
71
72 Self {
73 core,
74 factors,
75 shape: tensor.shape.clone(),
76 core_shape,
77 }
78 }
79
80 pub fn to_dense(&self) -> DenseTensor {
82 let mut result = self.core.data.clone();
84 let mut current_shape = self.core_shape.clone();
85
86 for (k, factor) in self.factors.iter().enumerate() {
87 let n_k = self.shape[k];
88 let r_k = self.core_shape[k];
89
90 result = apply_mode_product(&result, ¤t_shape, factor, n_k, r_k, k);
92 current_shape[k] = n_k;
93 }
94
95 DenseTensor::new(result, self.shape.clone())
96 }
97
98 pub fn compression_ratio(&self) -> f64 {
100 let original: usize = self.shape.iter().product();
101 let core_size: usize = self.core_shape.iter().product();
102 let factor_size: usize = self
103 .factors
104 .iter()
105 .enumerate()
106 .map(|(k, f)| self.shape[k] * self.core_shape[k])
107 .sum();
108
109 original as f64 / (core_size + factor_size) as f64
110 }
111}
112
113fn mode_k_unfold(tensor: &DenseTensor, k: usize) -> Vec<f64> {
115 let d = tensor.order();
116 let n_k = tensor.shape[k];
117 let cols: usize = tensor
118 .shape
119 .iter()
120 .enumerate()
121 .filter(|&(i, _)| i != k)
122 .map(|(_, &s)| s)
123 .product();
124
125 let mut result = vec![0.0; n_k * cols];
126
127 let total_size: usize = tensor.shape.iter().product();
129 let mut indices = vec![0usize; d];
130
131 for flat_idx in 0..total_size {
132 let val = tensor.data[flat_idx];
133 let i_k = indices[k];
134
135 let mut col = 0;
137 let mut stride = 1;
138 for m in (0..d).rev() {
139 if m != k {
140 col += indices[m] * stride;
141 stride *= tensor.shape[m];
142 }
143 }
144
145 result[i_k * cols + col] = val;
146
147 for m in (0..d).rev() {
149 indices[m] += 1;
150 if indices[m] < tensor.shape[m] {
151 break;
152 }
153 indices[m] = 0;
154 }
155 }
156
157 result
158}
159
160fn compute_left_singular_vectors(
162 a: &[f64],
163 rows: usize,
164 cols: usize,
165 rank: usize,
166 max_iters: usize,
167) -> Vec<f64> {
168 let mut u = vec![0.0; rows * rank];
169
170 for r in 0..rank {
172 let mut v: Vec<f64> = (0..rows)
174 .map(|i| {
175 let x = ((i * 2654435769 + r * 1103515245) as f64 / 4294967296.0) * 2.0 - 1.0;
176 x
177 })
178 .collect();
179 normalize(&mut v);
180
181 for _ in 0..max_iters {
183 let mut av = vec![0.0; cols];
185 for i in 0..rows {
186 for j in 0..cols {
187 av[j] += a[i * cols + j] * v[i];
188 }
189 }
190
191 let mut aatv = vec![0.0; rows];
192 for i in 0..rows {
193 for j in 0..cols {
194 aatv[i] += a[i * cols + j] * av[j];
195 }
196 }
197
198 for prev in 0..r {
200 let mut dot = 0.0;
201 for i in 0..rows {
202 dot += aatv[i] * u[i * rank + prev];
203 }
204 for i in 0..rows {
205 aatv[i] -= dot * u[i * rank + prev];
206 }
207 }
208
209 v = aatv;
210 normalize(&mut v);
211 }
212
213 for i in 0..rows {
215 u[i * rank + r] = v[i];
216 }
217 }
218
219 u
220}
221
222fn normalize(v: &mut [f64]) {
223 let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
224 if norm > 1e-15 {
225 for x in v.iter_mut() {
226 *x /= norm;
227 }
228 }
229}
230
231fn compute_core(tensor: &DenseTensor, factors: &[Vec<f64>], core_shape: &[usize]) -> DenseTensor {
233 let mut result = tensor.data.clone();
234 let mut current_shape = tensor.shape.clone();
235
236 for (k, factor) in factors.iter().enumerate() {
237 let n_k = tensor.shape[k];
238 let r_k = core_shape[k];
239
240 result = apply_mode_product_transpose(&result, ¤t_shape, factor, n_k, r_k, k);
242 current_shape[k] = r_k;
243 }
244
245 DenseTensor::new(result, core_shape.to_vec())
246}
247
248fn apply_mode_product_transpose(
250 data: &[f64],
251 shape: &[usize],
252 u: &[f64],
253 n_k: usize,
254 r_k: usize,
255 k: usize,
256) -> Vec<f64> {
257 let d = shape.len();
258 let mut new_shape = shape.to_vec();
259 new_shape[k] = r_k;
260
261 let new_size: usize = new_shape.iter().product();
262 let mut result = vec![0.0; new_size];
263
264 let old_size: usize = shape.iter().product();
266 let mut old_indices = vec![0usize; d];
267
268 for _ in 0..old_size {
269 let old_idx = compute_linear_index(&old_indices, shape);
270 let val = data[old_idx];
271 let i_k = old_indices[k];
272
273 for r in 0..r_k {
275 let mut new_indices = old_indices.clone();
276 new_indices[k] = r;
277 let new_idx = compute_linear_index(&new_indices, &new_shape);
278
279 result[new_idx] += val * u[i_k * r_k + r];
281 }
282
283 for m in (0..d).rev() {
285 old_indices[m] += 1;
286 if old_indices[m] < shape[m] {
287 break;
288 }
289 old_indices[m] = 0;
290 }
291 }
292
293 result
294}
295
296fn apply_mode_product(
298 data: &[f64],
299 shape: &[usize],
300 u: &[f64],
301 n_k: usize,
302 r_k: usize,
303 k: usize,
304) -> Vec<f64> {
305 let d = shape.len();
306 let mut new_shape = shape.to_vec();
307 new_shape[k] = n_k;
308
309 let new_size: usize = new_shape.iter().product();
310 let mut result = vec![0.0; new_size];
311
312 let old_size: usize = shape.iter().product();
314 let mut old_indices = vec![0usize; d];
315
316 for _ in 0..old_size {
317 let old_idx = compute_linear_index(&old_indices, shape);
318 let val = data[old_idx];
319 let r = old_indices[k];
320
321 for i in 0..n_k {
323 let mut new_indices = old_indices.clone();
324 new_indices[k] = i;
325 let new_idx = compute_linear_index(&new_indices, &new_shape);
326
327 result[new_idx] += val * u[i * r_k + r];
329 }
330
331 for m in (0..d).rev() {
333 old_indices[m] += 1;
334 if old_indices[m] < shape[m] {
335 break;
336 }
337 old_indices[m] = 0;
338 }
339 }
340
341 result
342}
343
344fn compute_linear_index(indices: &[usize], shape: &[usize]) -> usize {
345 let mut idx = 0;
346 let mut stride = 1;
347 for i in (0..shape.len()).rev() {
348 idx += indices[i] * stride;
349 stride *= shape[i];
350 }
351 idx
352}
353
354#[cfg(test)]
355mod tests {
356 use super::*;
357
358 #[test]
359 fn test_tucker_hosvd() {
360 let tensor = DenseTensor::random(vec![4, 5, 3], 42);
361
362 let config = TuckerConfig {
363 ranks: vec![2, 3, 2],
364 ..Default::default()
365 };
366
367 let tucker = TuckerDecomposition::hosvd(&tensor, &config);
368
369 assert_eq!(tucker.core_shape, vec![2, 3, 2]);
370 assert!(tucker.compression_ratio() > 1.0);
371 }
372
373 #[test]
374 fn test_mode_unfold() {
375 let tensor = DenseTensor::new(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![2, 3]);
376
377 let unfold0 = mode_k_unfold(&tensor, 0);
378 assert_eq!(unfold0.len(), 6);
380 }
381}