scirs2_sparse/linalg/eigen/
lanczos.rs1use crate::error::{SparseError, SparseResult};
7use crate::sym_csr::SymCsrMatrix;
8use crate::sym_ops::sym_csr_matvec;
9use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
10use scirs2_core::numeric::Float;
11use scirs2_core::SparseElement;
12use std::fmt::Debug;
13use std::ops::{Add, Div, Mul, Sub};
14
15macro_rules! abs_diff_eq {
17 ($left:expr, $right:expr) => {
18 ($left as i32) == ($right as i32)
19 };
20}
21
22#[derive(Debug, Clone)]
24pub struct LanczosOptions {
25 pub max_iter: usize,
27 pub max_subspace_size: usize,
29 pub tol: f64,
31 pub numeigenvalues: usize,
33 pub compute_eigenvectors: bool,
35}
36
37impl Default for LanczosOptions {
38 fn default() -> Self {
39 Self {
40 max_iter: 1000,
41 max_subspace_size: 20,
42 tol: 1e-8,
43 numeigenvalues: 1,
44 compute_eigenvectors: true,
45 }
46 }
47}
48
49#[derive(Debug, Clone)]
51pub struct EigenResult<T>
52where
53 T: Float + Debug + Copy,
54{
55 pub eigenvalues: Array1<T>,
57 pub eigenvectors: Option<Array2<T>>,
59 pub iterations: usize,
61 pub residuals: Array1<T>,
63 pub converged: bool,
65}
66
67#[allow(unused_assignments)]
114#[allow(dead_code)]
115pub fn lanczos<T>(
116 matrix: &SymCsrMatrix<T>,
117 options: &LanczosOptions,
118 initial_guess: Option<ArrayView1<T>>,
119) -> SparseResult<EigenResult<T>>
120where
121 T: Float
122 + SparseElement
123 + Debug
124 + Copy
125 + Add<Output = T>
126 + Sub<Output = T>
127 + Mul<Output = T>
128 + Div<Output = T>
129 + std::iter::Sum
130 + scirs2_core::simd_ops::SimdUnifiedOps
131 + Send
132 + Sync
133 + 'static,
134{
135 let (n, _) = matrix.shape();
136
137 let subspace_size = options.max_subspace_size.min(n);
139
140 let numeigenvalues = options.numeigenvalues.min(subspace_size);
142
143 let mut v = match initial_guess {
145 Some(v) => {
146 if v.len() != n {
147 return Err(SparseError::DimensionMismatch {
148 expected: n,
149 found: v.len(),
150 });
151 }
152 let mut v_arr = Array1::zeros(n);
154 for i in 0..n {
155 v_arr[i] = v[i];
156 }
157 v_arr
158 }
159 None => {
160 let mut v_arr = Array1::zeros(n);
162 v_arr[0] = T::sparse_one(); v_arr
164 }
165 };
166
167 let norm = (v.iter().map(|&val| val * val).sum::<T>()).sqrt();
169 if !SparseElement::is_zero(&norm) {
170 for i in 0..n {
171 v[i] = v[i] / norm;
172 }
173 }
174
175 let mut v_vectors = Vec::with_capacity(subspace_size);
177 v_vectors.push(v.clone());
178
179 let mut alpha = Vec::<T>::with_capacity(subspace_size); let mut beta = Vec::<T>::with_capacity(subspace_size - 1); let mut w = sym_csr_matvec(matrix, &v.view())?;
185 let alpha_j = v.iter().zip(w.iter()).map(|(&vi, &wi)| vi * wi).sum::<T>();
186 alpha.push(alpha_j);
187
188 for i in 0..n {
190 w[i] = w[i] - alpha_j * v[i];
191 }
192
193 let beta_j = (w.iter().map(|&val| val * val).sum::<T>()).sqrt();
195
196 let mut iter = 1;
197 let mut converged = false;
198
199 while iter < options.max_iter && alpha.len() < subspace_size {
200 if SparseElement::is_zero(&beta_j) {
201 break;
203 }
204
205 beta.push(beta_j);
206
207 let mut v_next = Array1::zeros(n);
209 for i in 0..n {
210 v_next[i] = w[i] / beta_j;
211 }
212
213 v_vectors.push(v_next.clone());
215
216 w = sym_csr_matvec(matrix, &v_next.view())?;
218
219 for v_j in v_vectors.iter() {
221 let proj = v_j
222 .iter()
223 .zip(w.iter())
224 .map(|(&vj, &wi)| vj * wi)
225 .sum::<T>();
226 for i in 0..n {
227 w[i] = w[i] - proj * v_j[i];
228 }
229 }
230
231 let alpha_j = v_next
233 .iter()
234 .zip(w.iter())
235 .map(|(&vi, &wi)| vi * wi)
236 .sum::<T>();
237 alpha.push(alpha_j);
238
239 for i in 0..n {
241 w[i] = w[i] - alpha_j * v_next[i];
242 }
243
244 let beta_j_next = (w.iter().map(|&val| val * val).sum::<T>()).sqrt();
246
247 if alpha.len() >= numeigenvalues {
249 let (eigvals, _) = solve_tridiagonal_eigenproblem(&alpha, &beta, numeigenvalues)?;
251
252 if beta_j_next < T::from(options.tol).expect("Operation failed") * eigvals[0].abs() {
254 converged = true;
255 break;
256 }
257 }
258
259 v = v_next;
260 iter += 1;
261
262 if iter < options.max_iter && alpha.len() < subspace_size {
264 let _beta_j = beta_j_next;
265 }
266 }
267
268 let (eigvals, eigvecs) = solve_tridiagonal_eigenproblem(&alpha, &beta, numeigenvalues)?;
270
271 let eigenvectors = if options.compute_eigenvectors {
273 let mut ritz_vectors = Array2::zeros((n, numeigenvalues));
274
275 for k in 0..numeigenvalues {
276 for i in 0..n {
277 let mut sum = T::sparse_zero();
278 for j in 0..v_vectors.len() {
279 if j < eigvecs.len() && k < eigvecs[j].len() {
280 sum = sum + eigvecs[j][k] * v_vectors[j][i];
281 }
282 }
283 ritz_vectors[[i, k]] = sum;
284 }
285 }
286
287 Some(ritz_vectors)
288 } else {
289 None
290 };
291
292 let actualeigenvalues = eigvals.len();
294 let mut residuals = Array1::zeros(actualeigenvalues);
295 if let Some(ref evecs) = eigenvectors {
296 for k in 0..actualeigenvalues {
297 let mut evec = Array1::zeros(n);
298 for i in 0..n {
299 evec[i] = evecs[[i, k]];
300 }
301
302 let ax = sym_csr_matvec(matrix, &evec.view())?;
303
304 let mut res = Array1::zeros(n);
305 for i in 0..n {
306 res[i] = ax[i] - eigvals[k] * evec[i];
307 }
308
309 residuals[k] = (res.iter().map(|&v| v * v).sum::<T>()).sqrt();
310 }
311 } else {
312 for k in 0..numeigenvalues {
315 if k < eigvecs.len() && !beta.is_empty() {
316 residuals[k] = beta[beta.len() - 1] * eigvecs[eigvecs.len() - 1][k].abs();
317 }
318 }
319 }
320
321 let result = EigenResult {
323 eigenvalues: Array1::from_vec(eigvals),
324 eigenvectors,
325 iterations: iter,
326 residuals,
327 converged,
328 };
329
330 Ok(result)
331}
332
333#[allow(dead_code)]
351fn solve_tridiagonal_eigenproblem<T>(
352 alpha: &[T],
353 beta: &[T],
354 numeigenvalues: usize,
355) -> SparseResult<(Vec<T>, Vec<Vec<T>>)>
356where
357 T: Float
358 + SparseElement
359 + Debug
360 + Copy
361 + Add<Output = T>
362 + Sub<Output = T>
363 + Mul<Output = T>
364 + Div<Output = T>,
365{
366 let n = alpha.len();
367 if n == 0 {
368 return Err(SparseError::ValueError(
369 "Empty tridiagonal matrix".to_string(),
370 ));
371 }
372
373 if beta.len() != n - 1 {
374 return Err(SparseError::DimensionMismatch {
375 expected: n - 1,
376 found: beta.len(),
377 });
378 }
379
380 if n <= 3 {
382 return solve_small_tridiagonal(alpha, beta, numeigenvalues);
383 }
384
385 let mut d = alpha.to_vec();
390 let mut e = beta.to_vec();
391 e.push(T::sparse_zero()); let mut z = vec![vec![T::sparse_zero(); n]; n];
395 #[allow(clippy::needless_range_loop)]
396 for i in 0..n {
397 z[i][i] = T::sparse_one(); }
399
400 for l in 0..n {
402 let mut iter = 0;
403 let max_iter = 30; loop {
406 let mut m = l;
408 while m < n - 1 {
409 if e[m].abs()
410 <= T::from(1e-12).expect("Operation failed") * (d[m].abs() + d[m + 1].abs())
411 {
412 break;
413 }
414 m += 1;
415 }
416
417 if m == l {
418 break;
420 }
421
422 if iter >= max_iter {
423 return Err(SparseError::IterativeSolverFailure(
425 "QL algorithm did not converge".to_string(),
426 ));
427 }
428
429 let g = (d[l + 1] - d[l]) * T::from(0.5).expect("Operation failed") / e[l];
430 let r = (g * g + T::sparse_one()).sqrt();
431 let mut g = d[m] - d[l] + e[l] / (g + if g >= T::sparse_zero() { r } else { -r });
432
433 let mut s = T::sparse_one();
434 let mut c = T::sparse_one();
435 let mut p = T::sparse_zero();
436
437 let mut i = m - 1;
438 while i >= l && i < n {
439 let f = s * e[i];
441 let b = c * e[i];
442
443 let r = (f * f + g * g).sqrt();
445 e[i + 1] = r;
446
447 if SparseElement::is_zero(&r) {
448 d[i + 1] = d[i + 1] - p;
450 e[m] = T::sparse_zero();
451 break;
452 }
453
454 s = f / r;
455 c = g / r;
456
457 let _h = g * p;
458 p = s * (d[i] - d[i + 1]) + c * b;
459 d[i + 1] = d[i + 1] + p;
460 g = c * s - b;
461
462 #[allow(clippy::needless_range_loop)]
464 for k in 0..n {
465 let t = z[k][i + 1];
466 z[k][i + 1] = s * z[k][i] + c * t;
467 z[k][i] = c * z[k][i] - s * t;
468 }
469
470 if i == 0 {
471 break;
472 }
473 i -= 1;
474 }
475
476 if (i as i32) < (l as i32) || i >= n {
477 break;
479 }
480
481 if SparseElement::is_zero(&r) {
482 if abs_diff_eq!(m, l + 1) {
483 break;
485 }
486 d[l] = d[l] - p;
487 e[l] = g;
488 e[m - 1] = T::sparse_zero();
489 }
490
491 iter += 1;
492 }
493 }
494
495 let mut indices: Vec<usize> = (0..n).collect();
497 indices.sort_by(|&i, &j| d[j].partial_cmp(&d[i]).unwrap_or(std::cmp::Ordering::Equal));
498
499 let mut sortedeigenvalues = Vec::with_capacity(numeigenvalues);
500 let mut sorted_eigenvectors = Vec::with_capacity(numeigenvalues);
501
502 #[allow(clippy::needless_range_loop)]
503 for k in 0..numeigenvalues.min(n) {
504 let idx = indices[k];
505 sortedeigenvalues.push(d[idx]);
506
507 let mut eigenvector = Vec::with_capacity(n);
508 #[allow(clippy::needless_range_loop)]
509 for i in 0..n {
510 eigenvector.push(z[i][idx]);
511 }
512 sorted_eigenvectors.push(eigenvector);
513 }
514
515 Ok((sortedeigenvalues, sorted_eigenvectors))
516}
517
518#[allow(unused_assignments)]
520#[allow(dead_code)]
521fn solve_small_tridiagonal<T>(
522 alpha: &[T],
523 beta: &[T],
524 numeigenvalues: usize,
525) -> SparseResult<(Vec<T>, Vec<Vec<T>>)>
526where
527 T: Float
528 + SparseElement
529 + Debug
530 + Copy
531 + Add<Output = T>
532 + Sub<Output = T>
533 + Mul<Output = T>
534 + Div<Output = T>,
535{
536 let n = alpha.len();
537
538 if n == 1 {
539 return Ok((vec![alpha[0]], vec![vec![T::sparse_one()]]));
541 }
542
543 if n == 2 {
544 let a = alpha[0];
546 let b = alpha[1];
547 let c = beta[0];
548
549 let trace = a + b;
550 let det = a * b - c * c;
551
552 let discriminant = (trace * trace - T::from(4.0).expect("Operation failed") * det).sqrt();
554 let lambda1 = (trace + discriminant) * T::from(0.5).expect("Operation failed");
555 let lambda2 = (trace - discriminant) * T::from(0.5).expect("Operation failed");
556
557 let (lambda1, lambda2) = if lambda1 >= lambda2 {
559 (lambda1, lambda2)
560 } else {
561 (lambda2, lambda1)
562 };
563
564 let mut v1 = vec![T::sparse_zero(); 2];
566 let mut v2 = vec![T::sparse_zero(); 2];
567
568 if !SparseElement::is_zero(&c) {
569 v1[0] = c;
570 v1[1] = lambda1 - a;
571
572 v2[0] = c;
573 v2[1] = lambda2 - a;
574
575 let norm1 = (v1[0] * v1[0] + v1[1] * v1[1]).sqrt();
577 let norm2 = (v2[0] * v2[0] + v2[1] * v2[1]).sqrt();
578
579 if !SparseElement::is_zero(&norm1) {
580 v1[0] = v1[0] / norm1;
581 v1[1] = v1[1] / norm1;
582 }
583
584 if !SparseElement::is_zero(&norm2) {
585 v2[0] = v2[0] / norm2;
586 v2[1] = v2[1] / norm2;
587 }
588 } else {
589 if a >= b {
591 v1[0] = T::sparse_one();
592 v1[1] = T::sparse_zero();
593
594 v2[0] = T::sparse_zero();
595 v2[1] = T::sparse_one();
596 } else {
597 v1[0] = T::sparse_zero();
598 v1[1] = T::sparse_one();
599
600 v2[0] = T::sparse_one();
601 v2[1] = T::sparse_zero();
602 }
603 }
604
605 let mut eigenvalues = vec![lambda1, lambda2];
606 let mut eigenvectors = vec![v1, v2];
607
608 eigenvalues.truncate(numeigenvalues);
610 eigenvectors.truncate(numeigenvalues);
611
612 return Ok((eigenvalues, eigenvectors));
613 }
614
615 if n == 3 {
616 let a = alpha[0];
618 let b = alpha[1];
619 let c = alpha[2];
620 let d = beta[0];
621 let e = beta[1];
622
623 let p = -(a + b + c);
625 let q = a * b + a * c + b * c - d * d - e * e;
626 let r = -(a * b * c - a * e * e - c * d * d);
627
628 let eigenvalues = solve_cubic(p, q, r)?;
630
631 let mut sortedeigenvalues = eigenvalues.clone();
633 sortedeigenvalues.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
634
635 let mut eigenvectors = Vec::with_capacity(sortedeigenvalues.len());
637
638 for &lambda in &sortedeigenvalues[0..numeigenvalues.min(3)] {
639 let mut m00 = a - lambda;
644 let mut m01 = d;
645 let m02 = T::sparse_zero();
646
647 let mut m10 = d;
648 let mut m11 = b - lambda;
649 let mut m12 = e;
650
651 let m20 = T::sparse_zero();
652 let mut m21 = e;
653 let mut m22 = c - lambda;
654
655 let r0_norm = (m00 * m00 + m01 * m01 + m02 * m02).sqrt();
657 let r1_norm = (m10 * m10 + m11 * m11 + m12 * m12).sqrt();
658 let r2_norm = (m20 * m20 + m21 * m21 + m22 * m22).sqrt();
659
660 let mut v = vec![T::sparse_zero(); 3];
661
662 if r0_norm >= r1_norm && r0_norm >= r2_norm && !SparseElement::is_zero(&r0_norm) {
663 let scale = T::sparse_one() / r0_norm;
665 m00 = m00 * scale;
666 m01 = m01 * scale;
667
668 let factor = m10 / m00;
670 m11 = m11 - factor * m01;
671 m12 = m12 - factor * m02;
672
673 let factor = m20 / m00;
675 m21 = m21 - factor * m01;
676 m22 = m22 - factor * m02;
677
678 v[2] = T::sparse_one(); v[1] = -m12 * v[2] / m11;
681 v[0] = -(m01 * v[1] + m02 * v[2]) / m00;
682 } else if r1_norm >= r0_norm && r1_norm >= r2_norm && !SparseElement::is_zero(&r1_norm)
683 {
684 let scale = T::sparse_one() / r1_norm;
686 m10 = m10 * scale;
687 m11 = m11 * scale;
688 m12 = m12 * scale;
689
690 v[2] = T::sparse_one(); v[0] = -m02 * v[2] / m00;
693 v[1] = -(m10 * v[0] + m12 * v[2]) / m11;
694 } else if !SparseElement::is_zero(&r2_norm) {
695 v[0] = T::sparse_one(); v[1] = T::sparse_zero();
698 v[2] = T::sparse_zero();
699 } else {
700 v[0] = T::sparse_one();
702 v[1] = T::sparse_zero();
703 v[2] = T::sparse_zero();
704 }
705
706 let norm = (v[0] * v[0] + v[1] * v[1] + v[2] * v[2]).sqrt();
708 if !SparseElement::is_zero(&norm) {
709 v[0] = v[0] / norm;
710 v[1] = v[1] / norm;
711 v[2] = v[2] / norm;
712 }
713
714 eigenvectors.push(v);
715 }
716
717 sortedeigenvalues.truncate(numeigenvalues);
719 eigenvectors.truncate(numeigenvalues);
720
721 return Ok((sortedeigenvalues, eigenvectors));
722 }
723
724 Err(SparseError::ValueError(
726 "Tridiagonal eigenvalue problem for n > 3 not implemented".to_string(),
727 ))
728}
729
730fn solve_cubic<T>(p: T, q: T, r: T) -> SparseResult<Vec<T>>
733where
734 T: Float
735 + SparseElement
736 + Debug
737 + Copy
738 + Add<Output = T>
739 + Sub<Output = T>
740 + Mul<Output = T>
741 + Div<Output = T>,
742{
743 let p_over_3 = p / T::from(3.0).expect("Operation failed");
747 let q_new = q - p * p / T::from(3.0).expect("Operation failed");
748 let r_new = r - p * q / T::from(3.0).expect("Operation failed")
749 + T::from(2.0).expect("Operation failed") * p * p * p
750 / T::from(27.0).expect("Operation failed");
751
752 let discriminant = -(T::from(4.0).expect("Operation failed") * q_new * q_new * q_new
754 + T::from(27.0).expect("Operation failed") * r_new * r_new);
755
756 if discriminant > T::sparse_zero() {
757 let theta = ((T::from(3.0).expect("Operation failed") * r_new)
759 / (T::from(2.0).expect("Operation failed") * q_new)
760 * (-T::from(3.0).expect("Operation failed") / q_new).sqrt())
761 .acos();
762 let sqrt_term = T::from(2.0).expect("Operation failed")
763 * (-q_new / T::from(3.0).expect("Operation failed")).sqrt();
764
765 let y1 = sqrt_term * (theta / T::from(3.0).expect("Operation failed")).cos();
766 let y2 = sqrt_term
767 * ((theta
768 + T::from(2.0).expect("Operation failed")
769 * T::from(std::f64::consts::PI).expect("Operation failed"))
770 / T::from(3.0).expect("Operation failed"))
771 .cos();
772 let y3 = sqrt_term
773 * ((theta
774 + T::from(4.0).expect("Operation failed")
775 * T::from(std::f64::consts::PI).expect("Operation failed"))
776 / T::from(3.0).expect("Operation failed"))
777 .cos();
778
779 let x1 = y1 - p_over_3;
780 let x2 = y2 - p_over_3;
781 let x3 = y3 - p_over_3;
782
783 Ok(vec![x1, x2, x3])
784 } else {
785 let u = (-r_new / T::from(2.0).expect("Operation failed")
787 + (discriminant / T::from(-108.0).expect("Operation failed")).sqrt())
788 .cbrt();
789 let v = if SparseElement::is_zero(&u) {
790 T::sparse_zero()
791 } else {
792 -q_new / (T::from(3.0).expect("Operation failed") * u)
793 };
794
795 let y = u + v;
796 let x = y - p_over_3;
797
798 Ok(vec![x])
799 }
800}
801
802#[cfg(test)]
803mod tests {
804 use super::*;
805 use crate::sym_csr::SymCsrMatrix;
806
807 #[test]
808 fn test_lanczos_simple() {
809 let data = vec![2.0, 1.0, 2.0]; let indptr = vec![0, 1, 3]; let indices = vec![0, 0, 1]; let matrix = SymCsrMatrix::new(data, indptr, indices, (2, 2)).expect("Operation failed");
815
816 let options = LanczosOptions {
817 max_iter: 100,
818 max_subspace_size: 2,
819 tol: 1e-8,
820 numeigenvalues: 1,
821 compute_eigenvectors: true,
822 };
823 let result = lanczos(&matrix, &options, None).expect("Operation failed");
824
825 assert!(result.converged);
826 assert_eq!(result.eigenvalues.len(), 1);
827 assert!(result.eigenvalues[0].is_finite());
829 }
830
831 #[test]
832 fn test_tridiagonal_solver_2x2() {
833 let alpha = vec![2.0, 3.0];
834 let beta = vec![1.0];
835 let (eigenvalues, _eigenvectors) =
836 solve_tridiagonal_eigenproblem(&alpha, &beta, 2).expect("Operation failed");
837
838 assert_eq!(eigenvalues.len(), 2);
839 assert!(eigenvalues[0] >= eigenvalues[1]);
841 }
842
843 #[test]
844 fn test_solve_cubic() {
845 let roots = solve_cubic(-6.0, 11.0, -6.0).expect("Operation failed");
847 assert_eq!(roots.len(), 3);
848
849 let mut sorted_roots = roots;
851 sorted_roots.sort_by(|a, b| a.partial_cmp(b).expect("Operation failed"));
852
853 assert!((sorted_roots[0] - 1.0).abs() < 1e-10);
854 assert!((sorted_roots[1] - 2.0).abs() < 1e-10);
855 assert!((sorted_roots[2] - 3.0).abs() < 1e-10);
856 }
857}