Skip to main content

ruvector_math/tensor_networks/
mod.rs

1//! Tensor Networks
2//!
3//! Efficient representations of high-dimensional tensors using network decompositions.
4//!
5//! ## Background
6//!
7//! High-dimensional tensors suffer from the "curse of dimensionality" - a tensor of
8//! order d with mode sizes n has O(n^d) elements. Tensor networks provide compressed
9//! representations with controllable approximation error.
10//!
11//! ## Decompositions
12//!
13//! - **Tensor Train (TT)**: A[i1,...,id] = G1[i1] × G2[i2] × ... × Gd[id]
14//! - **Tucker**: Core tensor with factor matrices
15//! - **CP (CANDECOMP/PARAFAC)**: Sum of rank-1 tensors
16//!
17//! ## Applications
18//!
19//! - Quantum-inspired algorithms
20//! - High-dimensional integration
21//! - Attention mechanism compression
22//! - Scientific computing
23
24mod contraction;
25mod cp_decomposition;
26mod tensor_train;
27mod tucker;
28
29pub use contraction::{NetworkContraction, TensorNetwork, TensorNode};
30pub use cp_decomposition::{CPConfig, CPDecomposition};
31pub use tensor_train::{TTCore, TensorTrain, TensorTrainConfig};
32pub use tucker::{TuckerConfig, TuckerDecomposition};
33
34/// Dense tensor for input/output
35#[derive(Debug, Clone)]
36pub struct DenseTensor {
37    /// Tensor data in row-major order
38    pub data: Vec<f64>,
39    /// Shape of the tensor
40    pub shape: Vec<usize>,
41}
42
43impl DenseTensor {
44    /// Create tensor from data and shape
45    pub fn new(data: Vec<f64>, shape: Vec<usize>) -> Self {
46        let expected_size: usize = shape.iter().product();
47        assert_eq!(data.len(), expected_size, "Data size must match shape");
48        Self { data, shape }
49    }
50
51    /// Create zeros tensor
52    pub fn zeros(shape: Vec<usize>) -> Self {
53        let size: usize = shape.iter().product();
54        Self {
55            data: vec![0.0; size],
56            shape,
57        }
58    }
59
60    /// Create ones tensor
61    pub fn ones(shape: Vec<usize>) -> Self {
62        let size: usize = shape.iter().product();
63        Self {
64            data: vec![1.0; size],
65            shape,
66        }
67    }
68
69    /// Create random tensor
70    pub fn random(shape: Vec<usize>, seed: u64) -> Self {
71        let size: usize = shape.iter().product();
72        let mut data = Vec::with_capacity(size);
73
74        let mut s = seed;
75        for _ in 0..size {
76            s = s.wrapping_mul(6364136223846793005).wrapping_add(1);
77            let x = ((s >> 33) as f64 / (1u64 << 31) as f64) * 2.0 - 1.0;
78            data.push(x);
79        }
80
81        Self { data, shape }
82    }
83
84    /// Get tensor order (number of dimensions)
85    pub fn order(&self) -> usize {
86        self.shape.len()
87    }
88
89    /// Get linear index from multi-index
90    pub fn linear_index(&self, indices: &[usize]) -> usize {
91        let mut idx = 0;
92        let mut stride = 1;
93        for (i, &s) in self.shape.iter().enumerate().rev() {
94            idx += indices[i] * stride;
95            stride *= s;
96        }
97        idx
98    }
99
100    /// Get element at multi-index
101    pub fn get(&self, indices: &[usize]) -> f64 {
102        self.data[self.linear_index(indices)]
103    }
104
105    /// Set element at multi-index
106    pub fn set(&mut self, indices: &[usize], value: f64) {
107        let idx = self.linear_index(indices);
108        self.data[idx] = value;
109    }
110
111    /// Compute Frobenius norm
112    pub fn frobenius_norm(&self) -> f64 {
113        self.data.iter().map(|x| x * x).sum::<f64>().sqrt()
114    }
115
116    /// Reshape tensor (view only, same data)
117    pub fn reshape(&self, new_shape: Vec<usize>) -> Self {
118        let new_size: usize = new_shape.iter().product();
119        assert_eq!(self.data.len(), new_size, "New shape must have same size");
120        Self {
121            data: self.data.clone(),
122            shape: new_shape,
123        }
124    }
125}
126
127#[cfg(test)]
128mod tests {
129    use super::*;
130
131    #[test]
132    fn test_dense_tensor() {
133        let t = DenseTensor::new(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![2, 3]);
134
135        assert_eq!(t.order(), 2);
136        assert!((t.get(&[0, 0]) - 1.0).abs() < 1e-10);
137        assert!((t.get(&[1, 2]) - 6.0).abs() < 1e-10);
138    }
139
140    #[test]
141    fn test_frobenius_norm() {
142        let t = DenseTensor::new(vec![3.0, 4.0], vec![2]);
143        assert!((t.frobenius_norm() - 5.0).abs() < 1e-10);
144    }
145}