1use ndarray::{Array2, Array1, ArrayView2, ArrayView1};
4use sprs::{CsMat, TriMat};
5use serde::{Deserialize, Serialize};
6use std::ops::{Add, Mul, Sub};
7
8#[derive(Debug, Clone, Serialize, Deserialize)]
10pub struct Matrix {
11 data: Array2<f64>,
12}
13
14impl Matrix {
15 pub fn new(data: Array2<f64>) -> Self {
17 Self { data }
18 }
19
20 pub fn random(rows: usize, cols: usize) -> Self {
22 use rand::Rng;
23 let mut rng = rand::thread_rng();
24 let data = Array2::from_shape_fn((rows, cols), |_| rng.gen_range(-1.0..1.0));
25 Self { data }
26 }
27
28 pub fn diagonally_dominant(size: usize, dominance_factor: f64) -> Self {
30 use rand::Rng;
31 let mut rng = rand::thread_rng();
32 let mut data = Array2::zeros((size, size));
33
34 for i in 0..size {
35 let mut row_sum = 0.0;
36 for j in 0..size {
37 if i != j {
38 let val = rng.gen_range(-1.0..1.0);
39 data[[i, j]] = val;
40 row_sum += val.abs();
41 }
42 }
43 data[[i, i]] = row_sum * dominance_factor;
45 }
46
47 Self { data }
48 }
49
50 pub fn shape(&self) -> (usize, usize) {
52 let shape = self.data.shape();
53 (shape[0], shape[1])
54 }
55
56 pub fn view(&self) -> ArrayView2<f64> {
58 self.data.view()
59 }
60
61 pub fn is_square(&self) -> bool {
63 let (rows, cols) = self.shape();
64 rows == cols
65 }
66
67 pub fn spectral_radius(&self) -> f64 {
69 if !self.is_square() {
71 return 0.0;
72 }
73
74 let n = self.shape().0;
75 let mut v = Vector::ones(n);
76 let max_iter = 100;
77
78 for _ in 0..max_iter {
79 let new_v = self.multiply_vector(&v);
80 let norm = new_v.norm();
81 if norm > 0.0 {
82 v = new_v.scale(1.0 / norm);
83 }
84 }
85
86 let mv = self.multiply_vector(&v);
87 mv.dot(&v) / v.dot(&v)
88 }
89
90 pub fn multiply_vector(&self, v: &Vector) -> Vector {
92 let result = self.data.dot(&v.data);
93 Vector::new(result)
94 }
95
96 pub fn to_sparse(&self) -> SparseMatrix {
98 let (rows, cols) = self.shape();
99 let mut tri = TriMat::new((rows, cols));
100
101 for i in 0..rows {
102 for j in 0..cols {
103 let val = self.data[[i, j]];
104 if val.abs() > 1e-10 {
105 tri.add_triplet(i, j, val);
106 }
107 }
108 }
109
110 SparseMatrix {
111 data: tri.to_csr(),
112 }
113 }
114}
115
116#[derive(Debug, Clone, Serialize, Deserialize)]
118pub struct Vector {
119 data: Array1<f64>,
120}
121
122impl Vector {
123 pub fn new(data: Array1<f64>) -> Self {
125 Self { data }
126 }
127
128 pub fn ones(size: usize) -> Self {
130 Self {
131 data: Array1::ones(size),
132 }
133 }
134
135 pub fn zeros(size: usize) -> Self {
137 Self {
138 data: Array1::zeros(size),
139 }
140 }
141
142 pub fn random(size: usize) -> Self {
144 use rand::Rng;
145 let mut rng = rand::thread_rng();
146 let data = Array1::from_shape_fn(size, |_| rng.gen_range(-1.0..1.0));
147 Self { data }
148 }
149
150 pub fn len(&self) -> usize {
152 self.data.len()
153 }
154
155 pub fn is_empty(&self) -> bool {
157 self.data.is_empty()
158 }
159
160 pub fn norm(&self) -> f64 {
162 self.data.dot(&self.data).sqrt()
163 }
164
165 pub fn dot(&self, other: &Vector) -> f64 {
167 self.data.dot(&other.data)
168 }
169
170 pub fn scale(&self, scalar: f64) -> Self {
172 Self {
173 data: &self.data * scalar,
174 }
175 }
176
177 pub fn view(&self) -> ArrayView1<f64> {
179 self.data.view()
180 }
181
182 pub fn add(&self, other: &Vector) -> Self {
184 Self {
185 data: &self.data + &other.data,
186 }
187 }
188
189 pub fn sub(&self, other: &Vector) -> Self {
191 Self {
192 data: &self.data - &other.data,
193 }
194 }
195}
196
197#[derive(Debug, Clone)]
199pub struct SparseMatrix {
200 data: CsMat<f64>,
201}
202
203impl SparseMatrix {
204 pub fn from_csr(data: CsMat<f64>) -> Self {
206 Self { data }
207 }
208
209 pub fn nnz(&self) -> usize {
211 self.data.nnz()
212 }
213
214 pub fn shape(&self) -> (usize, usize) {
216 self.data.shape()
217 }
218
219 pub fn multiply_vector(&self, v: &Vector) -> Vector {
221 let result = &self.data * v.view();
222 Vector::new(result.to_owned())
223 }
224
225 pub fn sparsity(&self) -> f64 {
227 let (rows, cols) = self.shape();
228 let total = rows * cols;
229 if total == 0 {
230 return 1.0;
231 }
232 1.0 - (self.nnz() as f64 / total as f64)
233 }
234}
235
236#[derive(Debug, Clone, Copy, PartialEq, Eq)]
238pub enum Complexity {
239 Constant,
241 Logarithmic,
243 Linear,
245 Linearithmic,
247 Quadratic,
249 Cubic,
251}
252
253impl Complexity {
254 pub fn estimate_time_ns(&self, n: usize) -> u64 {
256 const BASE_TIME: u64 = 10; match self {
258 Complexity::Constant => BASE_TIME,
259 Complexity::Logarithmic => BASE_TIME * (n as f64).log2() as u64,
260 Complexity::Linear => BASE_TIME * n as u64,
261 Complexity::Linearithmic => BASE_TIME * n as u64 * (n as f64).log2() as u64,
262 Complexity::Quadratic => BASE_TIME * (n * n) as u64,
263 Complexity::Cubic => BASE_TIME * (n * n * n) as u64,
264 }
265 }
266}
267
268#[cfg(test)]
269mod tests {
270 use super::*;
271
272 #[test]
273 fn test_matrix_creation() {
274 let m = Matrix::diagonally_dominant(10, 2.0);
275 assert_eq!(m.shape(), (10, 10));
276 assert!(m.is_square());
277 }
278
279 #[test]
280 fn test_vector_operations() {
281 let v1 = Vector::ones(5);
282 let v2 = Vector::ones(5);
283 let v3 = v1.add(&v2);
284 assert_eq!(v3.data[0], 2.0);
285 }
286
287 #[test]
288 fn test_complexity_estimation() {
289 let n = 1000;
290 let log_time = Complexity::Logarithmic.estimate_time_ns(n);
291 let cubic_time = Complexity::Cubic.estimate_time_ns(n);
292 assert!(log_time < cubic_time / 1000000);
293 }
294}