temporal_lead/
core.rs

1//! Core mathematical structures for FTL information system
2
3use ndarray::{Array2, Array1, ArrayView2, ArrayView1};
4use sprs::{CsMat, TriMat};
5use serde::{Deserialize, Serialize};
6use std::ops::{Add, Mul, Sub};
7
8/// Dense matrix representation
9#[derive(Debug, Clone, Serialize, Deserialize)]
10pub struct Matrix {
11    data: Array2<f64>,
12}
13
14impl Matrix {
15    /// Create a new matrix from 2D array
16    pub fn new(data: Array2<f64>) -> Self {
17        Self { data }
18    }
19
20    /// Create a random matrix for testing
21    pub fn random(rows: usize, cols: usize) -> Self {
22        use rand::Rng;
23        let mut rng = rand::thread_rng();
24        let data = Array2::from_shape_fn((rows, cols), |_| rng.gen_range(-1.0..1.0));
25        Self { data }
26    }
27
28    /// Create a diagonally dominant matrix (guaranteed solvable)
29    pub fn diagonally_dominant(size: usize, dominance_factor: f64) -> Self {
30        use rand::Rng;
31        let mut rng = rand::thread_rng();
32        let mut data = Array2::zeros((size, size));
33
34        for i in 0..size {
35            let mut row_sum = 0.0;
36            for j in 0..size {
37                if i != j {
38                    let val = rng.gen_range(-1.0..1.0);
39                    data[[i, j]] = val;
40                    row_sum += val.abs();
41                }
42            }
43            // Make diagonal dominant
44            data[[i, i]] = row_sum * dominance_factor;
45        }
46
47        Self { data }
48    }
49
50    /// Get matrix dimensions
51    pub fn shape(&self) -> (usize, usize) {
52        let shape = self.data.shape();
53        (shape[0], shape[1])
54    }
55
56    /// Get matrix as array view
57    pub fn view(&self) -> ArrayView2<f64> {
58        self.data.view()
59    }
60
61    /// Check if matrix is square
62    pub fn is_square(&self) -> bool {
63        let (rows, cols) = self.shape();
64        rows == cols
65    }
66
67    /// Compute spectral radius (largest eigenvalue magnitude)
68    pub fn spectral_radius(&self) -> f64 {
69        // Simplified power iteration method
70        if !self.is_square() {
71            return 0.0;
72        }
73
74        let n = self.shape().0;
75        let mut v = Vector::ones(n);
76        let max_iter = 100;
77
78        for _ in 0..max_iter {
79            let new_v = self.multiply_vector(&v);
80            let norm = new_v.norm();
81            if norm > 0.0 {
82                v = new_v.scale(1.0 / norm);
83            }
84        }
85
86        let mv = self.multiply_vector(&v);
87        mv.dot(&v) / v.dot(&v)
88    }
89
90    /// Multiply matrix by vector
91    pub fn multiply_vector(&self, v: &Vector) -> Vector {
92        let result = self.data.dot(&v.data);
93        Vector::new(result)
94    }
95
96    /// Convert to sparse format
97    pub fn to_sparse(&self) -> SparseMatrix {
98        let (rows, cols) = self.shape();
99        let mut tri = TriMat::new((rows, cols));
100
101        for i in 0..rows {
102            for j in 0..cols {
103                let val = self.data[[i, j]];
104                if val.abs() > 1e-10 {
105                    tri.add_triplet(i, j, val);
106                }
107            }
108        }
109
110        SparseMatrix {
111            data: tri.to_csr(),
112        }
113    }
114}
115
116/// Dense vector representation
117#[derive(Debug, Clone, Serialize, Deserialize)]
118pub struct Vector {
119    data: Array1<f64>,
120}
121
122impl Vector {
123    /// Create a new vector
124    pub fn new(data: Array1<f64>) -> Self {
125        Self { data }
126    }
127
128    /// Create a vector of ones
129    pub fn ones(size: usize) -> Self {
130        Self {
131            data: Array1::ones(size),
132        }
133    }
134
135    /// Create a vector of zeros
136    pub fn zeros(size: usize) -> Self {
137        Self {
138            data: Array1::zeros(size),
139        }
140    }
141
142    /// Create a random vector
143    pub fn random(size: usize) -> Self {
144        use rand::Rng;
145        let mut rng = rand::thread_rng();
146        let data = Array1::from_shape_fn(size, |_| rng.gen_range(-1.0..1.0));
147        Self { data }
148    }
149
150    /// Get vector length
151    pub fn len(&self) -> usize {
152        self.data.len()
153    }
154
155    /// Check if vector is empty
156    pub fn is_empty(&self) -> bool {
157        self.data.is_empty()
158    }
159
160    /// Compute L2 norm
161    pub fn norm(&self) -> f64 {
162        self.data.dot(&self.data).sqrt()
163    }
164
165    /// Dot product with another vector
166    pub fn dot(&self, other: &Vector) -> f64 {
167        self.data.dot(&other.data)
168    }
169
170    /// Scale vector by scalar
171    pub fn scale(&self, scalar: f64) -> Self {
172        Self {
173            data: &self.data * scalar,
174        }
175    }
176
177    /// Get as array view
178    pub fn view(&self) -> ArrayView1<f64> {
179        self.data.view()
180    }
181
182    /// Add another vector
183    pub fn add(&self, other: &Vector) -> Self {
184        Self {
185            data: &self.data + &other.data,
186        }
187    }
188
189    /// Subtract another vector
190    pub fn sub(&self, other: &Vector) -> Self {
191        Self {
192            data: &self.data - &other.data,
193        }
194    }
195}
196
197/// Sparse matrix representation for efficient large-scale computation
198#[derive(Debug, Clone)]
199pub struct SparseMatrix {
200    data: CsMat<f64>,
201}
202
203impl SparseMatrix {
204    /// Create from CSR data
205    pub fn from_csr(data: CsMat<f64>) -> Self {
206        Self { data }
207    }
208
209    /// Get number of non-zero elements
210    pub fn nnz(&self) -> usize {
211        self.data.nnz()
212    }
213
214    /// Get shape
215    pub fn shape(&self) -> (usize, usize) {
216        self.data.shape()
217    }
218
219    /// Multiply by vector
220    pub fn multiply_vector(&self, v: &Vector) -> Vector {
221        let result = &self.data * v.view();
222        Vector::new(result.to_owned())
223    }
224
225    /// Get sparsity (percentage of zeros)
226    pub fn sparsity(&self) -> f64 {
227        let (rows, cols) = self.shape();
228        let total = rows * cols;
229        if total == 0 {
230            return 1.0;
231        }
232        1.0 - (self.nnz() as f64 / total as f64)
233    }
234}
235
236/// Complexity class for algorithm analysis
237#[derive(Debug, Clone, Copy, PartialEq, Eq)]
238pub enum Complexity {
239    /// O(1) - Constant time
240    Constant,
241    /// O(log n) - Logarithmic (our target)
242    Logarithmic,
243    /// O(n) - Linear
244    Linear,
245    /// O(n log n) - Linearithmic
246    Linearithmic,
247    /// O(n²) - Quadratic
248    Quadratic,
249    /// O(n³) - Cubic (traditional matrix operations)
250    Cubic,
251}
252
253impl Complexity {
254    /// Estimate time for given input size (nanoseconds)
255    pub fn estimate_time_ns(&self, n: usize) -> u64 {
256        const BASE_TIME: u64 = 10; // 10ns base operation
257        match self {
258            Complexity::Constant => BASE_TIME,
259            Complexity::Logarithmic => BASE_TIME * (n as f64).log2() as u64,
260            Complexity::Linear => BASE_TIME * n as u64,
261            Complexity::Linearithmic => BASE_TIME * n as u64 * (n as f64).log2() as u64,
262            Complexity::Quadratic => BASE_TIME * (n * n) as u64,
263            Complexity::Cubic => BASE_TIME * (n * n * n) as u64,
264        }
265    }
266}
267
268#[cfg(test)]
269mod tests {
270    use super::*;
271
272    #[test]
273    fn test_matrix_creation() {
274        let m = Matrix::diagonally_dominant(10, 2.0);
275        assert_eq!(m.shape(), (10, 10));
276        assert!(m.is_square());
277    }
278
279    #[test]
280    fn test_vector_operations() {
281        let v1 = Vector::ones(5);
282        let v2 = Vector::ones(5);
283        let v3 = v1.add(&v2);
284        assert_eq!(v3.data[0], 2.0);
285    }
286
287    #[test]
288    fn test_complexity_estimation() {
289        let n = 1000;
290        let log_time = Complexity::Logarithmic.estimate_time_ns(n);
291        let cubic_time = Complexity::Cubic.estimate_time_ns(n);
292        assert!(log_time < cubic_time / 1000000);
293    }
294}