scirs2_sparse/linalg/eigen/
lanczos.rs1use crate::error::{SparseError, SparseResult};
7use crate::sym_csr::SymCsrMatrix;
8use crate::sym_ops::sym_csr_matvec;
9use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
10use scirs2_core::numeric::Float;
11use scirs2_core::SparseElement;
12use std::fmt::Debug;
13use std::ops::{Add, Div, Mul, Sub};
14
15#[derive(Debug, Clone)]
17pub struct LanczosOptions {
18 pub max_iter: usize,
20 pub max_subspace_size: usize,
22 pub tol: f64,
24 pub numeigenvalues: usize,
26 pub compute_eigenvectors: bool,
28}
29
30impl Default for LanczosOptions {
31 fn default() -> Self {
32 Self {
33 max_iter: 1000,
34 max_subspace_size: 20,
35 tol: 1e-8,
36 numeigenvalues: 1,
37 compute_eigenvectors: true,
38 }
39 }
40}
41
42#[derive(Debug, Clone)]
44pub struct EigenResult<T>
45where
46 T: Float + Debug + Copy,
47{
48 pub eigenvalues: Array1<T>,
50 pub eigenvectors: Option<Array2<T>>,
52 pub iterations: usize,
54 pub residuals: Array1<T>,
56 pub converged: bool,
58}
59
60#[allow(unused_assignments)]
107#[allow(dead_code)]
108pub fn lanczos<T>(
109 matrix: &SymCsrMatrix<T>,
110 options: &LanczosOptions,
111 initial_guess: Option<ArrayView1<T>>,
112) -> SparseResult<EigenResult<T>>
113where
114 T: Float
115 + SparseElement
116 + Debug
117 + Copy
118 + Add<Output = T>
119 + Sub<Output = T>
120 + Mul<Output = T>
121 + Div<Output = T>
122 + std::iter::Sum
123 + scirs2_core::simd_ops::SimdUnifiedOps
124 + Send
125 + Sync
126 + 'static,
127{
128 let (n, _) = matrix.shape();
129
130 let subspace_size = options.max_subspace_size.min(n);
132
133 let numeigenvalues = options.numeigenvalues.min(subspace_size);
135
136 let mut v = match initial_guess {
138 Some(v) => {
139 if v.len() != n {
140 return Err(SparseError::DimensionMismatch {
141 expected: n,
142 found: v.len(),
143 });
144 }
145 let mut v_arr = Array1::zeros(n);
147 for i in 0..n {
148 v_arr[i] = v[i];
149 }
150 v_arr
151 }
152 None => {
153 let mut v_arr = Array1::zeros(n);
155 v_arr[0] = T::sparse_one(); v_arr
157 }
158 };
159
160 let norm = (v.iter().map(|&val| val * val).sum::<T>()).sqrt();
162 if !SparseElement::is_zero(&norm) {
163 for i in 0..n {
164 v[i] = v[i] / norm;
165 }
166 }
167
168 let mut v_vectors = Vec::with_capacity(subspace_size);
170 v_vectors.push(v.clone());
171
172 let mut alpha = Vec::<T>::with_capacity(subspace_size); let mut beta = Vec::<T>::with_capacity(subspace_size - 1); let mut w = sym_csr_matvec(matrix, &v.view())?;
178 let alpha_j = v.iter().zip(w.iter()).map(|(&vi, &wi)| vi * wi).sum::<T>();
179 alpha.push(alpha_j);
180
181 for i in 0..n {
183 w[i] = w[i] - alpha_j * v[i];
184 }
185
186 let mut beta_j = (w.iter().map(|&val| val * val).sum::<T>()).sqrt();
188
189 let mut iter = 1;
190 let mut converged = false;
191
192 while iter < options.max_iter && alpha.len() < subspace_size {
193 if SparseElement::is_zero(&beta_j) {
194 break;
196 }
197
198 beta.push(beta_j);
199
200 let mut v_next = Array1::zeros(n);
202 for i in 0..n {
203 v_next[i] = w[i] / beta_j;
204 }
205
206 v_vectors.push(v_next.clone());
208
209 w = sym_csr_matvec(matrix, &v_next.view())?;
211
212 let alpha_j = v_next
214 .iter()
215 .zip(w.iter())
216 .map(|(&vi, &wi)| vi * wi)
217 .sum::<T>();
218 alpha.push(alpha_j);
219
220 for i in 0..n {
224 w[i] = w[i] - alpha_j * v_next[i];
225 }
226
227 for v_j in v_vectors.iter() {
230 let proj = v_j
231 .iter()
232 .zip(w.iter())
233 .map(|(&vj, &wi)| vj * wi)
234 .sum::<T>();
235 for i in 0..n {
236 w[i] = w[i] - proj * v_j[i];
237 }
238 }
239
240 let beta_j_next = (w.iter().map(|&val| val * val).sum::<T>()).sqrt();
242
243 if alpha.len() >= numeigenvalues {
245 if let Ok((eigvals, _)) = solve_tridiagonal_eigenproblem(&alpha, &beta, numeigenvalues)
247 {
248 if !eigvals.is_empty()
250 && eigvals[0].abs() > T::sparse_zero()
251 && beta_j_next < T::from(options.tol).unwrap_or(T::epsilon()) * eigvals[0].abs()
252 {
253 converged = true;
254 break;
255 }
256 }
257 }
258
259 v = v_next;
260 iter += 1;
261
262 beta_j = beta_j_next;
264 }
265
266 let n_tridiag = alpha.len();
269 let (eigvals, eigvecs) = solve_tridiagonal_eigenproblem(&alpha, &beta, n_tridiag)?;
270
271 let actual_neig = eigvals.len();
273 let eigenvectors = if options.compute_eigenvectors {
274 let mut ritz_vectors = Array2::zeros((n, actual_neig));
275
276 for k in 0..actual_neig {
277 for i in 0..n {
278 let mut sum = T::sparse_zero();
279 for j in 0..v_vectors.len() {
280 if j < eigvecs.len() && k < eigvecs[j].len() {
281 sum = sum + eigvecs[j][k] * v_vectors[j][i];
282 }
283 }
284 ritz_vectors[[i, k]] = sum;
285 }
286 }
287
288 Some(ritz_vectors)
289 } else {
290 None
291 };
292
293 let actualeigenvalues = eigvals.len();
295 let mut residuals = Array1::zeros(actualeigenvalues);
296 if let Some(ref evecs) = eigenvectors {
297 for k in 0..actualeigenvalues {
298 let mut evec = Array1::zeros(n);
299 for i in 0..n {
300 evec[i] = evecs[[i, k]];
301 }
302
303 let ax = sym_csr_matvec(matrix, &evec.view())?;
304
305 let mut res = Array1::zeros(n);
306 for i in 0..n {
307 res[i] = ax[i] - eigvals[k] * evec[i];
308 }
309
310 residuals[k] = (res.iter().map(|&v| v * v).sum::<T>()).sqrt();
311 }
312 } else {
313 for k in 0..actualeigenvalues {
316 if k < eigvecs.len() && !beta.is_empty() {
317 residuals[k] = beta[beta.len() - 1] * eigvecs[eigvecs.len() - 1][k].abs();
318 }
319 }
320 }
321
322 let result = EigenResult {
324 eigenvalues: Array1::from_vec(eigvals),
325 eigenvectors,
326 iterations: iter,
327 residuals,
328 converged,
329 };
330
331 Ok(result)
332}
333
334#[allow(dead_code)]
352fn solve_tridiagonal_eigenproblem<T>(
353 alpha: &[T],
354 beta: &[T],
355 numeigenvalues: usize,
356) -> SparseResult<(Vec<T>, Vec<Vec<T>>)>
357where
358 T: Float
359 + SparseElement
360 + Debug
361 + Copy
362 + Add<Output = T>
363 + Sub<Output = T>
364 + Mul<Output = T>
365 + Div<Output = T>,
366{
367 let n = alpha.len();
368 if n == 0 {
369 return Err(SparseError::ValueError(
370 "Empty tridiagonal matrix".to_string(),
371 ));
372 }
373
374 if beta.len() != n - 1 {
375 return Err(SparseError::DimensionMismatch {
376 expected: n - 1,
377 found: beta.len(),
378 });
379 }
380
381 if n <= 3 {
383 return solve_small_tridiagonal(alpha, beta, numeigenvalues);
384 }
385
386 let mut d = alpha.to_vec();
391 let mut e = beta.to_vec();
392 e.push(T::sparse_zero()); let mut z = vec![vec![T::sparse_zero(); n]; n];
396 #[allow(clippy::needless_range_loop)]
397 for i in 0..n {
398 z[i][i] = T::sparse_one(); }
400
401 let max_ql_iter: usize = 200; for l in 0..n {
408 let mut iter_count: usize = 0;
409
410 loop {
411 let mut m = l;
413 while m < n - 1 {
414 let dd = d[m].abs() + d[m + 1].abs();
415 if e[m].abs() <= T::from(2.0e-15).unwrap_or(T::epsilon()) * dd {
417 break;
418 }
419 m += 1;
420 }
421
422 if m == l {
423 break; }
425
426 if iter_count >= max_ql_iter {
427 return Err(SparseError::IterativeSolverFailure(
428 "QL algorithm did not converge".to_string(),
429 ));
430 }
431 iter_count += 1;
432
433 let half = T::from(0.5).unwrap_or(T::sparse_one());
435 let mut g = (d[l + 1] - d[l]) / (e[l] + e[l]); let mut r = (g * g + T::sparse_one()).sqrt();
437 g = d[m] - d[l] + e[l] / (g + if g >= T::sparse_zero() { r } else { -r });
439
440 let mut s = T::sparse_one();
441 let mut c = T::sparse_one();
442 let mut p = T::sparse_zero();
443
444 let mut broke_early = false;
446 let mut ii = m; while ii > l {
448 let i = ii - 1;
449
450 let f = s * e[i];
451 let b = c * e[i];
452
453 r = (f * f + g * g).sqrt();
455 e[i + 1] = r;
456
457 if r < T::from(1e-30).unwrap_or(T::epsilon()) {
458 d[i + 1] = d[i + 1] - p;
460 e[m] = T::sparse_zero();
461 broke_early = true;
462 break;
463 }
464
465 s = f / r;
466 c = g / r;
467 g = d[i + 1] - p;
468 r = (d[i] - g) * s + c * b + c * b;
469 let _ = half; p = s * r;
472 d[i + 1] = g + p;
473 g = c * r - b;
474
475 #[allow(clippy::needless_range_loop)]
477 for k in 0..n {
478 let t = z[k][i + 1];
479 z[k][i + 1] = s * z[k][i] + c * t;
480 z[k][i] = c * z[k][i] - s * t;
481 }
482
483 ii -= 1;
484 }
485
486 if broke_early {
487 continue; }
489
490 d[l] = d[l] - p;
491 e[l] = g;
492 e[m] = T::sparse_zero();
493 }
494 }
495
496 let mut indices: Vec<usize> = (0..n).collect();
498 indices.sort_by(|&i, &j| d[j].partial_cmp(&d[i]).unwrap_or(std::cmp::Ordering::Equal));
499
500 let mut sortedeigenvalues = Vec::with_capacity(numeigenvalues);
501 let mut sorted_eigenvectors = Vec::with_capacity(numeigenvalues);
502
503 #[allow(clippy::needless_range_loop)]
504 for k in 0..numeigenvalues.min(n) {
505 let idx = indices[k];
506 sortedeigenvalues.push(d[idx]);
507
508 let mut eigenvector = Vec::with_capacity(n);
509 #[allow(clippy::needless_range_loop)]
510 for i in 0..n {
511 eigenvector.push(z[i][idx]);
512 }
513 sorted_eigenvectors.push(eigenvector);
514 }
515
516 Ok((sortedeigenvalues, sorted_eigenvectors))
517}
518
519#[allow(unused_assignments)]
521#[allow(dead_code)]
522fn solve_small_tridiagonal<T>(
523 alpha: &[T],
524 beta: &[T],
525 numeigenvalues: usize,
526) -> SparseResult<(Vec<T>, Vec<Vec<T>>)>
527where
528 T: Float
529 + SparseElement
530 + Debug
531 + Copy
532 + Add<Output = T>
533 + Sub<Output = T>
534 + Mul<Output = T>
535 + Div<Output = T>,
536{
537 let n = alpha.len();
538
539 if n == 1 {
540 return Ok((vec![alpha[0]], vec![vec![T::sparse_one()]]));
542 }
543
544 if n == 2 {
545 let a = alpha[0];
547 let b = alpha[1];
548 let c = beta[0];
549
550 let trace = a + b;
551 let det = a * b - c * c;
552
553 let discriminant = (trace * trace - T::from(4.0).expect("Operation failed") * det).sqrt();
555 let lambda1 = (trace + discriminant) * T::from(0.5).expect("Operation failed");
556 let lambda2 = (trace - discriminant) * T::from(0.5).expect("Operation failed");
557
558 let (lambda1, lambda2) = if lambda1 >= lambda2 {
560 (lambda1, lambda2)
561 } else {
562 (lambda2, lambda1)
563 };
564
565 let mut v1 = vec![T::sparse_zero(); 2];
567 let mut v2 = vec![T::sparse_zero(); 2];
568
569 if !SparseElement::is_zero(&c) {
570 v1[0] = c;
571 v1[1] = lambda1 - a;
572
573 v2[0] = c;
574 v2[1] = lambda2 - a;
575
576 let norm1 = (v1[0] * v1[0] + v1[1] * v1[1]).sqrt();
578 let norm2 = (v2[0] * v2[0] + v2[1] * v2[1]).sqrt();
579
580 if !SparseElement::is_zero(&norm1) {
581 v1[0] = v1[0] / norm1;
582 v1[1] = v1[1] / norm1;
583 }
584
585 if !SparseElement::is_zero(&norm2) {
586 v2[0] = v2[0] / norm2;
587 v2[1] = v2[1] / norm2;
588 }
589 } else {
590 if a >= b {
592 v1[0] = T::sparse_one();
593 v1[1] = T::sparse_zero();
594
595 v2[0] = T::sparse_zero();
596 v2[1] = T::sparse_one();
597 } else {
598 v1[0] = T::sparse_zero();
599 v1[1] = T::sparse_one();
600
601 v2[0] = T::sparse_one();
602 v2[1] = T::sparse_zero();
603 }
604 }
605
606 let mut eigenvalues = vec![lambda1, lambda2];
607 let mut eigenvectors = vec![v1, v2];
608
609 eigenvalues.truncate(numeigenvalues);
611 eigenvectors.truncate(numeigenvalues);
612
613 return Ok((eigenvalues, eigenvectors));
614 }
615
616 if n == 3 {
617 let a = alpha[0];
619 let b = alpha[1];
620 let c = alpha[2];
621 let d = beta[0];
622 let e = beta[1];
623
624 let p = -(a + b + c);
626 let q = a * b + a * c + b * c - d * d - e * e;
627 let r = -(a * b * c - a * e * e - c * d * d);
628
629 let eigenvalues = solve_cubic(p, q, r)?;
631
632 let mut sortedeigenvalues = eigenvalues.clone();
634 sortedeigenvalues.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
635
636 let mut eigenvectors = Vec::with_capacity(sortedeigenvalues.len());
638
639 for &lambda in &sortedeigenvalues[0..numeigenvalues.min(3)] {
640 let mut m00 = a - lambda;
645 let mut m01 = d;
646 let m02 = T::sparse_zero();
647
648 let mut m10 = d;
649 let mut m11 = b - lambda;
650 let mut m12 = e;
651
652 let m20 = T::sparse_zero();
653 let mut m21 = e;
654 let mut m22 = c - lambda;
655
656 let r0_norm = (m00 * m00 + m01 * m01 + m02 * m02).sqrt();
658 let r1_norm = (m10 * m10 + m11 * m11 + m12 * m12).sqrt();
659 let r2_norm = (m20 * m20 + m21 * m21 + m22 * m22).sqrt();
660
661 let mut v = vec![T::sparse_zero(); 3];
662
663 if r0_norm >= r1_norm && r0_norm >= r2_norm && !SparseElement::is_zero(&r0_norm) {
664 let scale = T::sparse_one() / r0_norm;
666 m00 = m00 * scale;
667 m01 = m01 * scale;
668
669 let factor = m10 / m00;
671 m11 = m11 - factor * m01;
672 m12 = m12 - factor * m02;
673
674 let factor = m20 / m00;
676 m21 = m21 - factor * m01;
677 m22 = m22 - factor * m02;
678
679 v[2] = T::sparse_one(); v[1] = -m12 * v[2] / m11;
682 v[0] = -(m01 * v[1] + m02 * v[2]) / m00;
683 } else if r1_norm >= r0_norm && r1_norm >= r2_norm && !SparseElement::is_zero(&r1_norm)
684 {
685 let scale = T::sparse_one() / r1_norm;
687 m10 = m10 * scale;
688 m11 = m11 * scale;
689 m12 = m12 * scale;
690
691 v[2] = T::sparse_one(); v[0] = -m02 * v[2] / m00;
694 v[1] = -(m10 * v[0] + m12 * v[2]) / m11;
695 } else if !SparseElement::is_zero(&r2_norm) {
696 v[0] = T::sparse_one(); v[1] = T::sparse_zero();
699 v[2] = T::sparse_zero();
700 } else {
701 v[0] = T::sparse_one();
703 v[1] = T::sparse_zero();
704 v[2] = T::sparse_zero();
705 }
706
707 let norm = (v[0] * v[0] + v[1] * v[1] + v[2] * v[2]).sqrt();
709 if !SparseElement::is_zero(&norm) {
710 v[0] = v[0] / norm;
711 v[1] = v[1] / norm;
712 v[2] = v[2] / norm;
713 }
714
715 eigenvectors.push(v);
716 }
717
718 sortedeigenvalues.truncate(numeigenvalues);
720 eigenvectors.truncate(numeigenvalues);
721
722 return Ok((sortedeigenvalues, eigenvectors));
723 }
724
725 let mut d = alpha.to_vec();
730 let mut e = beta.to_vec();
731 e.push(T::sparse_zero());
732
733 let mut z = vec![vec![T::sparse_zero(); n]; n];
735 for i in 0..n {
736 z[i][i] = T::sparse_one();
737 }
738
739 let max_ql_iter: usize = 30 * n;
740
741 for l in 0..n {
742 let mut iter_count = 0usize;
743 loop {
744 let mut m = l;
745 while m < n - 1 {
746 let dd = d[m].abs() + d[m + 1].abs();
747 if dd + e[m].abs() == dd {
748 break;
749 }
750 m += 1;
751 }
752 if m == l {
753 break;
754 }
755 if iter_count >= max_ql_iter {
756 return Err(SparseError::IterativeSolverFailure(
757 "QL algorithm for tridiagonal eigenproblem did not converge".to_string(),
758 ));
759 }
760 iter_count += 1;
761
762 let g0 = (d[l + 1] - d[l]) / (T::from(2.0).expect("conv") * e[l]);
764 let r0 = (g0 * g0 + T::sparse_one()).sqrt();
765 let signed_r = if g0 >= T::sparse_zero() { r0 } else { -r0 };
766 let mut g = d[m] - d[l] + e[l] / (g0 + signed_r);
767
768 let mut s = T::sparse_one();
769 let mut c = T::sparse_one();
770 let mut p = T::sparse_zero();
771
772 if m == 0 {
773 break;
774 }
775 let mut i = m - 1;
776 loop {
777 let f = s * e[i];
778 let b = c * e[i];
779 let r = (f * f + g * g).sqrt();
780 e[i + 1] = r;
781
782 if SparseElement::is_zero(&r) {
783 d[i + 1] = d[i + 1] - p;
784 e[m] = T::sparse_zero();
785 break;
786 }
787
788 s = f / r;
789 c = g / r;
790 g = d[i + 1] - p;
791 let rr = (d[i] - g) * s + T::from(2.0).expect("conv") * c * b;
792 p = s * rr;
793 d[i + 1] = g + p;
794 g = c * rr - b;
795
796 for k in 0..n {
797 let t = z[k][i + 1];
798 z[k][i + 1] = s * z[k][i] + c * t;
799 z[k][i] = c * z[k][i] - s * t;
800 }
801
802 if i == l {
803 break;
804 }
805 i -= 1;
806 }
807 d[l] = d[l] - p;
808 e[l] = g;
809 e[m] = T::sparse_zero();
810 }
811 }
812
813 let mut indices: Vec<usize> = (0..n).collect();
815 indices.sort_by(|&a, &b| d[b].partial_cmp(&d[a]).unwrap_or(std::cmp::Ordering::Equal));
816
817 let take = numeigenvalues.min(n);
818 let mut eigenvalues = Vec::with_capacity(take);
819 let mut eigenvectors = Vec::with_capacity(take);
820
821 for &idx in indices.iter().take(take) {
822 eigenvalues.push(d[idx]);
823 let mut v = Vec::with_capacity(n);
824 for row in 0..n {
825 v.push(z[row][idx]);
826 }
827 let nrm: T = v
828 .iter()
829 .map(|&x| x * x)
830 .fold(T::sparse_zero(), |a, b| a + b)
831 .sqrt();
832 if !SparseElement::is_zero(&nrm) {
833 for x in &mut v {
834 *x = *x / nrm;
835 }
836 }
837 eigenvectors.push(v);
838 }
839
840 Ok((eigenvalues, eigenvectors))
841}
842
843fn solve_cubic<T>(p: T, q: T, r: T) -> SparseResult<Vec<T>>
846where
847 T: Float
848 + SparseElement
849 + Debug
850 + Copy
851 + Add<Output = T>
852 + Sub<Output = T>
853 + Mul<Output = T>
854 + Div<Output = T>,
855{
856 let p_over_3 = p / T::from(3.0).expect("Operation failed");
860 let q_new = q - p * p / T::from(3.0).expect("Operation failed");
861 let r_new = r - p * q / T::from(3.0).expect("Operation failed")
862 + T::from(2.0).expect("Operation failed") * p * p * p
863 / T::from(27.0).expect("Operation failed");
864
865 let discriminant = -(T::from(4.0).expect("Operation failed") * q_new * q_new * q_new
867 + T::from(27.0).expect("Operation failed") * r_new * r_new);
868
869 if discriminant > T::sparse_zero() {
870 let theta = ((T::from(3.0).expect("Operation failed") * r_new)
872 / (T::from(2.0).expect("Operation failed") * q_new)
873 * (-T::from(3.0).expect("Operation failed") / q_new).sqrt())
874 .acos();
875 let sqrt_term = T::from(2.0).expect("Operation failed")
876 * (-q_new / T::from(3.0).expect("Operation failed")).sqrt();
877
878 let y1 = sqrt_term * (theta / T::from(3.0).expect("Operation failed")).cos();
879 let y2 = sqrt_term
880 * ((theta
881 + T::from(2.0).expect("Operation failed")
882 * T::from(std::f64::consts::PI).expect("Operation failed"))
883 / T::from(3.0).expect("Operation failed"))
884 .cos();
885 let y3 = sqrt_term
886 * ((theta
887 + T::from(4.0).expect("Operation failed")
888 * T::from(std::f64::consts::PI).expect("Operation failed"))
889 / T::from(3.0).expect("Operation failed"))
890 .cos();
891
892 let x1 = y1 - p_over_3;
893 let x2 = y2 - p_over_3;
894 let x3 = y3 - p_over_3;
895
896 Ok(vec![x1, x2, x3])
897 } else {
898 let u = (-r_new / T::from(2.0).expect("Operation failed")
900 + (discriminant / T::from(-108.0).expect("Operation failed")).sqrt())
901 .cbrt();
902 let v = if SparseElement::is_zero(&u) {
903 T::sparse_zero()
904 } else {
905 -q_new / (T::from(3.0).expect("Operation failed") * u)
906 };
907
908 let y = u + v;
909 let x = y - p_over_3;
910
911 Ok(vec![x])
912 }
913}
914
915#[cfg(test)]
916mod tests {
917 use super::*;
918 use crate::sym_csr::SymCsrMatrix;
919
920 #[test]
921 fn test_lanczos_simple() {
922 let data = vec![2.0, 1.0, 2.0]; let indptr = vec![0, 1, 3]; let indices = vec![0, 0, 1]; let matrix = SymCsrMatrix::new(data, indptr, indices, (2, 2)).expect("Operation failed");
928
929 let options = LanczosOptions {
930 max_iter: 100,
931 max_subspace_size: 2,
932 tol: 1e-8,
933 numeigenvalues: 1,
934 compute_eigenvectors: true,
935 };
936 let result = lanczos(&matrix, &options, None).expect("Operation failed");
937
938 assert!(result.converged);
939 assert!(
942 !result.eigenvalues.is_empty(),
943 "expected at least 1 eigenvalue, got {}",
944 result.eigenvalues.len()
945 );
946 for &ev in result.eigenvalues.iter() {
948 assert!(ev.is_finite());
949 }
950 }
951
952 #[test]
953 fn test_tridiagonal_solver_2x2() {
954 let alpha = vec![2.0, 3.0];
955 let beta = vec![1.0];
956 let (eigenvalues, _eigenvectors) =
957 solve_tridiagonal_eigenproblem(&alpha, &beta, 2).expect("Operation failed");
958
959 assert_eq!(eigenvalues.len(), 2);
960 assert!(eigenvalues[0] >= eigenvalues[1]);
962 }
963
964 #[test]
965 fn test_solve_cubic() {
966 let roots = solve_cubic(-6.0, 11.0, -6.0).expect("Operation failed");
968 assert_eq!(roots.len(), 3);
969
970 let mut sorted_roots = roots;
972 sorted_roots.sort_by(|a, b| a.partial_cmp(b).expect("Operation failed"));
973
974 assert!((sorted_roots[0] - 1.0).abs() < 1e-10);
975 assert!((sorted_roots[1] - 2.0).abs() < 1e-10);
976 assert!((sorted_roots[2] - 3.0).abs() < 1e-10);
977 }
978}