ruvector_math/tensor_networks/
mod.rs

1//! Tensor Networks
2//!
3//! Efficient representations of high-dimensional tensors using network decompositions.
4//!
5//! ## Background
6//!
7//! High-dimensional tensors suffer from the "curse of dimensionality" - a tensor of
8//! order d with mode sizes n has O(n^d) elements. Tensor networks provide compressed
9//! representations with controllable approximation error.
10//!
11//! ## Decompositions
12//!
13//! - **Tensor Train (TT)**: A[i1,...,id] = G1[i1] × G2[i2] × ... × Gd[id]
14//! - **Tucker**: Core tensor with factor matrices
15//! - **CP (CANDECOMP/PARAFAC)**: Sum of rank-1 tensors
16//!
17//! ## Applications
18//!
19//! - Quantum-inspired algorithms
20//! - High-dimensional integration
21//! - Attention mechanism compression
22//! - Scientific computing
23
24mod tensor_train;
25mod tucker;
26mod cp_decomposition;
27mod contraction;
28
29pub use tensor_train::{TensorTrain, TTCore, TensorTrainConfig};
30pub use tucker::{TuckerDecomposition, TuckerConfig};
31pub use cp_decomposition::{CPDecomposition, CPConfig};
32pub use contraction::{TensorNetwork, TensorNode, NetworkContraction};
33
34/// Dense tensor for input/output
35#[derive(Debug, Clone)]
36pub struct DenseTensor {
37    /// Tensor data in row-major order
38    pub data: Vec<f64>,
39    /// Shape of the tensor
40    pub shape: Vec<usize>,
41}
42
43impl DenseTensor {
44    /// Create tensor from data and shape
45    pub fn new(data: Vec<f64>, shape: Vec<usize>) -> Self {
46        let expected_size: usize = shape.iter().product();
47        assert_eq!(data.len(), expected_size, "Data size must match shape");
48        Self { data, shape }
49    }
50
51    /// Create zeros tensor
52    pub fn zeros(shape: Vec<usize>) -> Self {
53        let size: usize = shape.iter().product();
54        Self { data: vec![0.0; size], shape }
55    }
56
57    /// Create ones tensor
58    pub fn ones(shape: Vec<usize>) -> Self {
59        let size: usize = shape.iter().product();
60        Self { data: vec![1.0; size], shape }
61    }
62
63    /// Create random tensor
64    pub fn random(shape: Vec<usize>, seed: u64) -> Self {
65        let size: usize = shape.iter().product();
66        let mut data = Vec::with_capacity(size);
67
68        let mut s = seed;
69        for _ in 0..size {
70            s = s.wrapping_mul(6364136223846793005).wrapping_add(1);
71            let x = ((s >> 33) as f64 / (1u64 << 31) as f64) * 2.0 - 1.0;
72            data.push(x);
73        }
74
75        Self { data, shape }
76    }
77
78    /// Get tensor order (number of dimensions)
79    pub fn order(&self) -> usize {
80        self.shape.len()
81    }
82
83    /// Get linear index from multi-index
84    pub fn linear_index(&self, indices: &[usize]) -> usize {
85        let mut idx = 0;
86        let mut stride = 1;
87        for (i, &s) in self.shape.iter().enumerate().rev() {
88            idx += indices[i] * stride;
89            stride *= s;
90        }
91        idx
92    }
93
94    /// Get element at multi-index
95    pub fn get(&self, indices: &[usize]) -> f64 {
96        self.data[self.linear_index(indices)]
97    }
98
99    /// Set element at multi-index
100    pub fn set(&mut self, indices: &[usize], value: f64) {
101        let idx = self.linear_index(indices);
102        self.data[idx] = value;
103    }
104
105    /// Compute Frobenius norm
106    pub fn frobenius_norm(&self) -> f64 {
107        self.data.iter().map(|x| x * x).sum::<f64>().sqrt()
108    }
109
110    /// Reshape tensor (view only, same data)
111    pub fn reshape(&self, new_shape: Vec<usize>) -> Self {
112        let new_size: usize = new_shape.iter().product();
113        assert_eq!(self.data.len(), new_size, "New shape must have same size");
114        Self { data: self.data.clone(), shape: new_shape }
115    }
116}
117
118#[cfg(test)]
119mod tests {
120    use super::*;
121
122    #[test]
123    fn test_dense_tensor() {
124        let t = DenseTensor::new(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![2, 3]);
125
126        assert_eq!(t.order(), 2);
127        assert!((t.get(&[0, 0]) - 1.0).abs() < 1e-10);
128        assert!((t.get(&[1, 2]) - 6.0).abs() < 1e-10);
129    }
130
131    #[test]
132    fn test_frobenius_norm() {
133        let t = DenseTensor::new(vec![3.0, 4.0], vec![2]);
134        assert!((t.frobenius_norm() - 5.0).abs() < 1e-10);
135    }
136}