1use crate::error::{LoopError, Result};
8use rand::{Rng, SeedableRng};
10use rand::rngs::StdRng;
11use serde::{Deserialize, Serialize};
12use std::collections::HashMap;
13
14pub type Precision = f64;
16
17#[derive(Debug, Clone)]
19pub struct JLEmbedding {
20 projection_matrix: Vec<Vec<Precision>>,
21 original_dim: usize,
22 target_dim: usize,
23 eps: Precision,
24}
25
26impl JLEmbedding {
27 pub fn new(original_dim: usize, eps: Precision, seed: Option<u64>) -> Result<Self> {
29 if eps <= 0.0 || eps >= 1.0 {
30 return Err(LoopError::math_error(
31 "JL distortion parameter must be in (0, 1)"
32 ));
33 }
34
35 let target_dim = Self::compute_target_dimension(original_dim, eps);
36 let mut rng = match seed {
37 Some(s) => StdRng::seed_from_u64(s),
38 None => StdRng::from_entropy(),
39 };
40
41 let mut projection_matrix = vec![vec![0.0; original_dim]; target_dim];
42 let scale_factor = (1.0 / target_dim as Precision).sqrt();
43
44 for i in 0..target_dim {
45 for j in 0..original_dim {
46 projection_matrix[i][j] = (rng.gen::<f64>() * 2.0 - 1.0) * scale_factor;
47 }
48 }
49
50 Ok(Self {
51 projection_matrix,
52 original_dim,
53 target_dim,
54 eps,
55 })
56 }
57
58 fn compute_target_dimension(n: usize, eps: Precision) -> usize {
60 let ln_n = (n as Precision).ln();
61 let k = (8.0 * ln_n / (eps * eps)).ceil() as usize;
62 k.max(10)
63 }
64
65 pub fn project_vector(&self, x: &[Precision]) -> Result<Vec<Precision>> {
67 if x.len() != self.original_dim {
68 return Err(LoopError::math_error(format!(
69 "Vector dimension mismatch: expected {}, got {}",
70 self.original_dim, x.len()
71 )));
72 }
73
74 let mut result = vec![0.0; self.target_dim];
75 for i in 0..self.target_dim {
76 for j in 0..self.original_dim {
77 result[i] += self.projection_matrix[i][j] * x[j];
78 }
79 }
80
81 Ok(result)
82 }
83
84 pub fn reconstruct_vector(&self, y: &[Precision]) -> Result<Vec<Precision>> {
86 if y.len() != self.target_dim {
87 return Err(LoopError::math_error(format!(
88 "Vector dimension mismatch: expected {}, got {}",
89 self.target_dim, y.len()
90 )));
91 }
92
93 let mut result = vec![0.0; self.original_dim];
94 for j in 0..self.original_dim {
95 for i in 0..self.target_dim {
96 result[j] += self.projection_matrix[i][j] * y[i];
97 }
98 }
99
100 Ok(result)
101 }
102
103 pub fn compression_ratio(&self) -> Precision {
104 self.target_dim as Precision / self.original_dim as Precision
105 }
106
107 pub fn target_dimension(&self) -> usize {
108 self.target_dim
109 }
110}
111
112#[derive(Debug, Clone, Serialize, Deserialize)]
114pub struct SublinearConfig {
115 pub max_iterations: usize,
116 pub tolerance: Precision,
117 pub jl_distortion: Precision,
118 pub sketch_ratio: Precision,
119 pub use_importance_sampling: bool,
120 pub adaptive_threshold: Precision,
121 pub series_truncation: usize,
122}
123
124impl Default for SublinearConfig {
125 fn default() -> Self {
126 Self {
127 max_iterations: 100,
128 tolerance: 1e-6,
129 jl_distortion: 0.3,
130 sketch_ratio: 0.1,
131 use_importance_sampling: true,
132 adaptive_threshold: 0.1,
133 series_truncation: 20,
134 }
135 }
136}
137
138#[derive(Debug, Clone, Copy, PartialEq)]
140pub enum ComplexityBound {
141 Logarithmic,
142 Sublinear,
143 Linear,
144 Superlinear,
145}
146
147#[derive(Debug, Clone)]
149pub struct SublinearNeumannResult {
150 pub solution: Vec<Precision>,
151 pub iterations_used: usize,
152 pub final_residual: Precision,
153 pub complexity_bound: ComplexityBound,
154 pub compression_ratio: Precision,
155 pub convergence_rate: Precision,
156 pub solve_time_ns: u64,
157}
158
159pub struct SublinearNeumannSolver {
161 config: SublinearConfig,
162}
163
164impl SublinearNeumannSolver {
165 pub fn new(config: SublinearConfig) -> Self {
166 Self { config }
167 }
168
169 pub fn verify_sublinear_conditions(&self, matrix: &[Vec<Precision>]) -> Result<ComplexityBound> {
171 let n = matrix.len();
172 if n == 0 || matrix[0].len() != n {
173 return Err(LoopError::math_error(
174 "Matrix must be square and non-empty"
175 ));
176 }
177
178 let mut is_diagonally_dominant = true;
180 let mut max_off_diagonal_ratio: f64 = 0.0;
181
182 for i in 0..n {
183 let diagonal = matrix[i][i].abs();
184 if diagonal < 1e-14 {
185 return Ok(ComplexityBound::Superlinear);
186 }
187
188 let off_diagonal_sum: Precision = matrix[i].iter()
189 .enumerate()
190 .filter(|(j, _)| *j != i)
191 .map(|(_, val)| val.abs())
192 .sum();
193
194 let ratio = off_diagonal_sum / diagonal;
195 max_off_diagonal_ratio = max_off_diagonal_ratio.max(ratio);
196
197 if ratio >= 1.0 {
198 is_diagonally_dominant = false;
199 }
200 }
201
202 if is_diagonally_dominant && max_off_diagonal_ratio < 0.5 {
203 Ok(ComplexityBound::Logarithmic)
204 } else if is_diagonally_dominant {
205 Ok(ComplexityBound::Sublinear)
206 } else {
207 Ok(ComplexityBound::Linear)
208 }
209 }
210
211 pub fn solve_sublinear_guaranteed(
213 &self,
214 matrix: &[Vec<Precision>],
215 b: &[Precision]
216 ) -> Result<SublinearNeumannResult> {
217 let start_time = std::time::Instant::now();
218 let n = matrix.len();
219
220 if n != b.len() || n == 0 {
221 return Err(LoopError::math_error(
222 "Matrix and vector dimensions must match and be non-zero"
223 ));
224 }
225
226 let complexity_bound = self.verify_sublinear_conditions(matrix)?;
227
228 let jl_embedding = JLEmbedding::new(n, self.config.jl_distortion, Some(42))?;
230
231 let (reduced_matrix, reduced_b) = self.create_reduced_problem(matrix, b, &jl_embedding)?;
233
234 let reduced_solution = self.solve_neumann_truncated(&reduced_matrix, &reduced_b)?;
236
237 let solution = jl_embedding.reconstruct_vector(&reduced_solution)?;
239
240 let final_residual = self.compute_residual(matrix, b, &solution);
242 let solve_time_ns = start_time.elapsed().as_nanos() as u64;
243
244 let spectral_radius = self.estimate_spectral_radius(&reduced_matrix);
246 let convergence_rate = if spectral_radius < 1.0 {
247 -spectral_radius.ln()
248 } else {
249 0.0
250 };
251
252 Ok(SublinearNeumannResult {
253 solution,
254 iterations_used: self.config.series_truncation,
255 final_residual,
256 complexity_bound,
257 compression_ratio: jl_embedding.compression_ratio(),
258 convergence_rate,
259 solve_time_ns,
260 })
261 }
262
263 fn create_reduced_problem(
265 &self,
266 matrix: &[Vec<Precision>],
267 b: &[Precision],
268 jl_embedding: &JLEmbedding,
269 ) -> Result<(Vec<Vec<Precision>>, Vec<Precision>)> {
270 let n = matrix.len();
271 let k = jl_embedding.target_dimension();
272
273 let mut reduced_matrix = vec![vec![0.0; k]; k];
275 let reduced_b = jl_embedding.project_vector(b)?;
276
277 for i in 0..k {
279 let mut row_i = vec![0.0; n];
280 for l in 0..n {
281 for j in 0..n {
282 row_i[j] += jl_embedding.projection_matrix[i][l] * matrix[l][j];
283 }
284 }
285
286 let projected_row = jl_embedding.project_vector(&row_i)?;
287 for j in 0..k {
288 reduced_matrix[i][j] = projected_row[j];
289 }
290 }
291
292 Ok((reduced_matrix, reduced_b))
293 }
294
295 fn solve_neumann_truncated(
297 &self,
298 matrix: &[Vec<Precision>],
299 b: &[Precision],
300 ) -> Result<Vec<Precision>> {
301 let n = matrix.len();
302
303 let mut diagonal_inv = vec![0.0; n];
305 for i in 0..n {
306 let diag_val = matrix[i][i];
307 if diag_val.abs() < 1e-14 {
308 return Err(LoopError::math_error(
309 "Matrix has zero or near-zero diagonal element"
310 ));
311 }
312 diagonal_inv[i] = 1.0 / diag_val;
313 }
314
315 let mut c = vec![0.0; n];
317 for i in 0..n {
318 c[i] = diagonal_inv[i] * b[i];
319 }
320
321 let mut solution = c.clone();
323 let mut current_term = c.clone();
324
325 for iteration in 1..self.config.series_truncation {
326 let mut next_term = vec![0.0; n];
328
329 for i in 0..n {
330 let mut sum = current_term[i]; for j in 0..n {
332 if i != j {
333 sum -= diagonal_inv[i] * matrix[i][j] * current_term[j];
334 }
335 }
336 next_term[i] = sum;
337 }
338
339 for i in 0..n {
341 solution[i] += next_term[i];
342 }
343
344 let term_norm: Precision = next_term.iter().map(|x| x.abs()).sum();
346 if term_norm < self.config.tolerance {
347 break;
348 }
349
350 current_term = next_term;
351 }
352
353 Ok(solution)
354 }
355
356 fn compute_residual(
358 &self,
359 matrix: &[Vec<Precision>],
360 b: &[Precision],
361 x: &[Precision],
362 ) -> Precision {
363 let n = matrix.len();
364 let mut residual = 0.0;
365
366 for i in 0..n {
367 let mut ax_i = 0.0;
368 for j in 0..n {
369 ax_i += matrix[i][j] * x[j];
370 }
371 let diff = ax_i - b[i];
372 residual += diff * diff;
373 }
374
375 residual.sqrt()
376 }
377
378 fn estimate_spectral_radius(&self, matrix: &[Vec<Precision>]) -> Precision {
380 let n = matrix.len();
381 if n == 0 {
382 return 0.0;
383 }
384
385 let mut v = vec![1.0; n];
387 let mut lambda = 0.0;
388
389 for _ in 0..10 {
390 let mut av = vec![0.0; n];
391 for i in 0..n {
392 for j in 0..n {
393 av[i] += matrix[i][j] * v[j];
394 }
395 }
396
397 let norm: Precision = av.iter().map(|x| x * x).sum::<Precision>().sqrt();
398 if norm < 1e-14 {
399 break;
400 }
401
402 lambda = norm;
403 for i in 0..n {
404 v[i] = av[i] / norm;
405 }
406 }
407
408 lambda
409 }
410
411 pub fn page_rank_sublinear(
413 &self,
414 adjacency: &[Vec<Precision>],
415 damping: Precision,
416 personalized: Option<&[Precision]>,
417 ) -> Result<Vec<Precision>> {
418 let n = adjacency.len();
419 if n == 0 {
420 return Ok(Vec::new());
421 }
422
423 let mut transition = vec![vec![0.0; n]; n];
425 for i in 0..n {
426 let row_sum: Precision = adjacency[i].iter().sum();
427 if row_sum > 1e-14 {
428 for j in 0..n {
429 transition[j][i] = adjacency[i][j] / row_sum;
430 }
431 } else {
432 for j in 0..n {
434 transition[j][i] = 1.0 / n as Precision;
435 }
436 }
437 }
438
439 let teleport_prob = (1.0 - damping) / n as Precision;
441 let mut google_matrix = vec![vec![teleport_prob; n]; n];
442
443 for i in 0..n {
444 for j in 0..n {
445 google_matrix[i][j] += damping * transition[i][j];
446 }
447 }
448
449 let b = match personalized {
451 Some(p) => {
452 let mut rhs = vec![0.0; n];
453 for i in 0..n {
454 rhs[i] = (1.0 - damping) * p[i];
455 }
456 rhs
457 }
458 None => vec![(1.0 - damping) / n as Precision; n],
459 };
460
461 let mut system_matrix = vec![vec![0.0; n]; n];
463 for i in 0..n {
464 for j in 0..n {
465 system_matrix[i][j] = if i == j { 1.0 } else { 0.0 };
466 system_matrix[i][j] -= damping * transition[j][i];
467 }
468 }
469
470 self.solve_sublinear_guaranteed(&system_matrix, &b)
471 .map(|result| result.solution)
472 }
473
474 pub fn analyze_complexity(&self, matrix: &[Vec<Precision>]) -> Result<HashMap<String, Precision>> {
476 let n = matrix.len();
477 if n == 0 {
478 return Ok(HashMap::new());
479 }
480
481 let mut analysis = HashMap::new();
482
483 let mut min_dominance_ratio: f64 = f64::INFINITY;
485 let mut max_dominance_ratio: f64 = 0.0;
486 let mut dominant_count = 0;
487
488 for i in 0..n {
489 let diagonal = matrix[i][i].abs();
490 if diagonal > 1e-14 {
491 let off_diagonal_sum: Precision = matrix[i].iter()
492 .enumerate()
493 .filter(|(j, _)| *j != i)
494 .map(|(_, val)| val.abs())
495 .sum();
496
497 let ratio = off_diagonal_sum / diagonal;
498 min_dominance_ratio = min_dominance_ratio.min(ratio);
499 max_dominance_ratio = max_dominance_ratio.max(ratio);
500
501 if ratio < 1.0 {
502 dominant_count += 1;
503 }
504 }
505 }
506
507 analysis.insert("diagonal_dominance_ratio".to_string(),
508 dominant_count as f64 / n as f64);
509 analysis.insert("min_dominance_ratio".to_string(), min_dominance_ratio);
510 analysis.insert("max_dominance_ratio".to_string(), max_dominance_ratio);
511
512 let condition_estimate = max_dominance_ratio / min_dominance_ratio.max(1e-14);
514 analysis.insert("condition_estimate".to_string(), condition_estimate);
515
516 let mut nonzero_count = 0;
518 for i in 0..n {
519 for j in 0..n {
520 if matrix[i][j].abs() > 1e-14 {
521 nonzero_count += 1;
522 }
523 }
524 }
525
526 let sparsity_ratio = 1.0 - (nonzero_count as f64 / (n * n) as f64);
527 analysis.insert("sparsity_ratio".to_string(), sparsity_ratio);
528
529 let complexity_bound = self.verify_sublinear_conditions(matrix)?;
531 let complexity_score = match complexity_bound {
532 ComplexityBound::Logarithmic => 1.0,
533 ComplexityBound::Sublinear => 2.0,
534 ComplexityBound::Linear => 3.0,
535 ComplexityBound::Superlinear => 4.0,
536 };
537 analysis.insert("complexity_classification".to_string(), complexity_score);
538
539 Ok(analysis)
540 }
541}
542
543#[cfg(test)]
544mod tests {
545 use super::*;
546
547 fn create_test_matrix() -> Vec<Vec<Precision>> {
548 vec![
549 vec![4.0, 1.0, 1.0],
550 vec![1.0, 4.0, 1.0],
551 vec![1.0, 1.0, 4.0],
552 ]
553 }
554
555 #[test]
556 fn test_jl_embedding() {
557 let embedding = JLEmbedding::new(10, 0.3, Some(42)).unwrap();
558 let x = vec![1.0; 10];
559 let projected = embedding.project_vector(&x).unwrap();
560 assert!(projected.len() < 10);
561 assert!(embedding.compression_ratio() < 1.0);
562 }
563
564 #[test]
565 fn test_sublinear_solver() {
566 let matrix = create_test_matrix();
567 let b = vec![6.0, 6.0, 6.0];
568 let config = SublinearConfig::default();
569 let solver = SublinearNeumannSolver::new(config);
570
571 let result = solver.solve_sublinear_guaranteed(&matrix, &b).unwrap();
572 assert!(result.final_residual < 1e-3);
573 assert!(matches!(result.complexity_bound, ComplexityBound::Logarithmic));
574 }
575
576 #[test]
577 fn test_page_rank_sublinear() {
578 let adjacency = vec![
579 vec![0.0, 1.0, 1.0],
580 vec![1.0, 0.0, 0.0],
581 vec![0.0, 1.0, 0.0],
582 ];
583
584 let config = SublinearConfig::default();
585 let solver = SublinearNeumannSolver::new(config);
586
587 let pagerank = solver.page_rank_sublinear(&adjacency, 0.85, None).unwrap();
588 assert_eq!(pagerank.len(), 3);
589
590 let sum: Precision = pagerank.iter().sum();
592 assert!((sum - 3.0).abs() < 0.1);
593 for &val in &pagerank {
594 assert!(val > 0.0);
595 }
596 }
597
598 #[test]
599 fn test_complexity_analysis() {
600 let matrix = create_test_matrix();
601 let config = SublinearConfig::default();
602 let solver = SublinearNeumannSolver::new(config);
603
604 let analysis = solver.analyze_complexity(&matrix).unwrap();
605 assert!(analysis.contains_key("diagonal_dominance_ratio"));
606 assert!(analysis.contains_key("complexity_classification"));
607
608 let complexity = analysis["complexity_classification"];
610 assert!((complexity - 1.0).abs() < 1e-10);
611 }
612
613 #[test]
614 fn test_complexity_bounds() {
615 let config = SublinearConfig::default();
616 let solver = SublinearNeumannSolver::new(config);
617
618 let good_matrix = vec![
620 vec![10.0, 1.0, 1.0],
621 vec![1.0, 10.0, 1.0],
622 vec![1.0, 1.0, 10.0],
623 ];
624 let bound = solver.verify_sublinear_conditions(&good_matrix).unwrap();
625 assert!(matches!(bound, ComplexityBound::Logarithmic));
626
627 let bad_matrix = vec![
629 vec![1.0, 2.0, 3.0],
630 vec![4.0, 1.0, 6.0],
631 vec![7.0, 8.0, 1.0],
632 ];
633 let bound = solver.verify_sublinear_conditions(&bad_matrix).unwrap();
634 assert!(!matches!(bound, ComplexityBound::Logarithmic));
635 }
636}