1use scirs2_core::ndarray::{Array1, Array2, ArrayView2, ScalarOperand};
11use scirs2_core::numeric::Complex;
12use scirs2_core::numeric::{Float, NumAssign};
13use scirs2_core::random::prelude::*;
14use std::iter::Sum;
15
16use crate::decomposition::qr;
17use crate::error::{LinalgError, LinalgResult};
18use crate::norm::vector_norm;
19use crate::validation::validate_decomposition;
20
21pub type EigenResult<F> = LinalgResult<(Array1<Complex<F>>, Array2<Complex<F>>)>;
25
26#[allow(dead_code)]
55pub fn eig<F>(a: &ArrayView2<F>, workers: Option<usize>) -> EigenResult<F>
56where
57 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
58{
59 use crate::parallel;
60
61 parallel::configure_workers(workers);
63
64 validate_decomposition(a, "Eigenvalue computation", true)?;
66
67 let n = a.nrows();
68
69 if n == 1 {
71 let eigenvalue = Complex::new(a[[0, 0]], F::zero());
72 let eigenvector = Array2::eye(1).mapv(|x| Complex::new(x, F::zero()));
73
74 return Ok((Array1::from_elem(1, eigenvalue), eigenvector));
75 } else if n == 2 {
76 return solve_2x2_eigenvalue_problem(a);
77 }
78
79 solve_qr_algorithm(a)
81}
82
83#[allow(dead_code)]
111pub fn eigvals<F>(a: &ArrayView2<F>, workers: Option<usize>) -> LinalgResult<Array1<Complex<F>>>
112where
113 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
114{
115 let (eigenvalues, _) = eig(a, workers)?;
118 Ok(eigenvalues)
119}
120
121#[allow(dead_code)]
148pub fn power_iteration<F>(
149 a: &ArrayView2<F>,
150 max_iter: usize,
151 tol: F,
152) -> LinalgResult<(F, Array1<F>)>
153where
154 F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
155{
156 if a.nrows() != a.ncols() {
158 return Err(LinalgError::ShapeError(format!(
159 "Expected square matrix, got shape {:?}",
160 a.shape()
161 )));
162 }
163
164 let n = a.nrows();
165
166 let mut rng = scirs2_core::random::rng();
168 let mut b = Array1::zeros(n);
169 for i in 0..n {
170 b[i] = F::from(rng.random_range(-1.0..=1.0)).unwrap_or(F::zero());
171 }
172
173 let norm_b = vector_norm(&b.view(), 2)?;
175 b.mapv_inplace(|x| x / norm_b);
176
177 let mut eigenvalue = F::zero();
178 let mut prev_eigenvalue = F::zero();
179
180 for _ in 0..max_iter {
181 let mut b_new = Array1::zeros(n);
183 for i in 0..n {
184 let mut sum = F::zero();
185 for j in 0..n {
186 sum += a[[i, j]] * b[j];
187 }
188 b_new[i] = sum;
189 }
190
191 eigenvalue = F::zero();
193 for i in 0..n {
194 eigenvalue += b[i] * b_new[i];
195 }
196
197 let norm_b_new = vector_norm(&b_new.view(), 2)?;
199 for i in 0..n {
200 b[i] = b_new[i] / norm_b_new;
201 }
202
203 if (eigenvalue - prev_eigenvalue).abs() < tol {
205 return Ok((eigenvalue, b));
206 }
207
208 prev_eigenvalue = eigenvalue;
209 }
210
211 Ok((eigenvalue, b))
213}
214
215#[allow(dead_code)]
244pub fn eigh<F>(a: &ArrayView2<F>, workers: Option<usize>) -> LinalgResult<(Array1<F>, Array2<F>)>
245where
246 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
247{
248 use crate::parallel;
249
250 parallel::configure_workers(workers);
252
253 if a.nrows() != a.ncols() {
255 return Err(LinalgError::ShapeError(format!(
256 "Expected square matrix, got shape {:?}",
257 a.shape()
258 )));
259 }
260
261 for i in 0..a.nrows() {
263 for j in (i + 1)..a.ncols() {
264 if (a[[i, j]] - a[[j, i]]).abs() > F::epsilon() {
265 return Err(LinalgError::ShapeError(
266 "Matrix must be symmetric for Hermitian eigenvalue computation".to_string(),
267 ));
268 }
269 }
270 }
271
272 let n = a.nrows();
273
274 if n == 1 {
276 let eigenvalue = a[[0, 0]];
277 let eigenvector = Array2::eye(1);
278
279 return Ok((Array1::from_elem(1, eigenvalue), eigenvector));
280 } else if n == 2 {
281 return solve_2x2_symmetric_eigenvalue_problem(a);
282 } else if n == 3 {
283 return solve_3x3_symmetric_eigenvalue_problem(a);
284 } else if n == 4 {
285 return solve_4x4_symmetric_eigenvalue_problem(a);
286 }
287
288 let use_work_stealing = if let Some(num_workers) = workers {
290 num_workers > 1 && n > 100
292 } else {
293 false
294 };
295
296 if use_work_stealing {
298 if let Some(num_workers) = workers {
299 return crate::parallel::parallel_eigvalsh_work_stealing(a, num_workers);
300 }
301 }
302
303 solve_symmetric_with_power_iteration(a)
305}
306
307#[allow(dead_code)]
316pub fn advanced_precision_eig<F>(
317 a: &ArrayView2<F>,
318 workers: Option<usize>,
319) -> LinalgResult<(Array1<F>, Array2<F>)>
320where
321 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
322{
323 use crate::parallel;
324
325 parallel::configure_workers(workers);
327
328 if a.nrows() != a.ncols() {
330 return Err(LinalgError::ShapeError(format!(
331 "Expected square matrix, got shape {:?}",
332 a.shape()
333 )));
334 }
335
336 let n = a.nrows();
337
338 let condition_number = estimate_condition_number(a)?;
340
341 let precision_target =
343 if condition_number > F::from(1e12).expect("Failed to convert constant to float") {
344 F::from(1e-8).expect("Failed to convert constant to float") } else if condition_number > F::from(1e8).expect("Failed to convert constant to float") {
346 F::from(1e-9).expect("Failed to convert constant to float") } else {
348 F::from(1e-10).expect("Failed to convert constant to float") };
350
351 if n <= 4 {
353 return advanced_precise_smallmatrix_eig(a, precision_target);
354 }
355
356 advanced_precise_iterative_eig(a, precision_target, workers)
358}
359
360#[allow(dead_code)]
362fn estimate_condition_number<F>(a: &ArrayView2<F>) -> LinalgResult<F>
363where
364 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
365{
366 let n = a.nrows();
367
368 if n == 1 {
369 return Ok(F::one());
370 }
371
372 let (lambda_max_, _) = power_iteration(
374 a,
375 100,
376 F::from(1e-8).expect("Failed to convert constant to float"),
377 )?;
378
379 let _trace = (0..n).map(|i| a[[i, i]]).fold(F::zero(), |acc, x| acc + x);
382 let det_estimate = if n == 2 {
383 a[[0, 0]] * a[[1, 1]] - a[[0, 1]] * a[[1, 0]]
384 } else {
385 let product = (0..n)
387 .map(|i| a[[i, i]].abs())
388 .fold(F::one(), |acc, x| acc * x);
389 if product > F::zero() {
390 product.powf(F::one() / F::from(n).expect("Failed to convert to float"))
391 } else {
392 F::from(1e-15).expect("Failed to convert constant to float") }
394 };
395
396 let lambda_min =
397 if det_estimate.abs() > F::from(1e-15).expect("Failed to convert constant to float") {
398 det_estimate / lambda_max_.powf(F::from(n - 1).expect("Failed to convert to float"))
399 } else {
400 F::from(1e-15).expect("Failed to convert constant to float")
401 };
402
403 let condition = lambda_max_.abs()
404 / lambda_min
405 .abs()
406 .max(F::from(1e-15).expect("Failed to convert constant to float"));
407 Ok(condition)
408}
409
410#[allow(dead_code)]
412fn advanced_precise_smallmatrix_eig<F>(
413 a: &ArrayView2<F>,
414 precision_target: F,
415) -> LinalgResult<(Array1<F>, Array2<F>)>
416where
417 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
418{
419 let n = a.nrows();
420
421 match n {
422 1 => {
423 let eigenvalue = a[[0, 0]];
424 let eigenvector = Array2::eye(1);
425 Ok((Array1::from_elem(1, eigenvalue), eigenvector))
426 }
427 2 => advanced_precise_2x2_eig(a, precision_target),
428 3 => advanced_precise_3x3_eig(a, precision_target),
429 4 => advanced_precise_4x4_eig(a, precision_target),
430 _ => Err(LinalgError::InvalidInput(
431 "Matrix size not supported for advanced-precise small matrix solver".to_string(),
432 )),
433 }
434}
435
436#[allow(dead_code)]
438fn advanced_precise_2x2_eig<F>(
439 a: &ArrayView2<F>,
440 _precision_target: F,
441) -> LinalgResult<(Array1<F>, Array2<F>)>
442where
443 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
444{
445 let a11 = a[[0, 0]];
446 let a12 = a[[0, 1]];
447 let a21 = a[[1, 0]];
448 let a22 = a[[1, 1]];
449
450 let trace = kahan_sum(&[a11, a22]);
452 let det = kahan_sum(&[a11 * a22, -(a12 * a21)]);
453
454 let trace_squared = trace * trace;
456 let four_det = F::from(4.0).expect("Failed to convert constant to float") * det;
457 let discriminant = kahan_sum(&[trace_squared, -four_det]);
458
459 let half = F::from(0.5).expect("Failed to convert constant to float");
460 let sqrt_discriminant = discriminant.abs().sqrt();
461
462 let lambda1 = if discriminant >= F::zero() {
463 (trace + sqrt_discriminant) * half
464 } else {
465 trace * half
466 };
467
468 let lambda2 = if discriminant >= F::zero() {
469 (trace - sqrt_discriminant) * half
470 } else {
471 trace * half
472 };
473
474 let mut eigenvalues = Array1::zeros(2);
475 eigenvalues[0] = lambda1;
476 eigenvalues[1] = lambda2;
477
478 let eigenvectors = compute_advanced_precise_eigenvectors_2x2(a, &eigenvalues)?;
480
481 Ok((eigenvalues, eigenvectors))
482}
483
484#[allow(dead_code)]
486fn advanced_precise_3x3_eig<F>(
487 a: &ArrayView2<F>,
488 precision_target: F,
489) -> LinalgResult<(Array1<F>, Array2<F>)>
490where
491 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
492{
493 let characteristic_poly = compute_characteristic_polynomial_3x3(a)?;
495 let eigenvalues =
496 solve_cubic_equation_advanced_precise(&characteristic_poly, precision_target)?;
497
498 let eigenvectors =
500 compute_advanced_precise_eigenvectors_3x3(a, &eigenvalues, precision_target)?;
501
502 Ok((eigenvalues, eigenvectors))
503}
504
505#[allow(dead_code)]
507fn advanced_precise_4x4_eig<F>(
508 a: &ArrayView2<F>,
509 precision_target: F,
510) -> LinalgResult<(Array1<F>, Array2<F>)>
511where
512 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
513{
514 advanced_precise_qr_iteration(a, precision_target, 1000)
516}
517
518#[allow(dead_code)]
520fn kahan_sum<F>(values: &[F]) -> F
521where
522 F: Float + NumAssign,
523{
524 let mut sum = F::zero();
525 let mut c = F::zero(); for &value in values {
528 let y = value - c;
529 let t = sum + y;
530 c = (t - sum) - y;
531 sum = t;
532 }
533
534 sum
535}
536
537#[allow(dead_code)]
539fn compute_characteristic_polynomial_3x3<F>(a: &ArrayView2<F>) -> LinalgResult<[F; 4]>
540where
541 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
542{
543 let a00 = a[[0, 0]];
544 let a01 = a[[0, 1]];
545 let a02 = a[[0, 2]];
546 let a10 = a[[1, 0]];
547 let a11 = a[[1, 1]];
548 let a12 = a[[1, 2]];
549 let a20 = a[[2, 0]];
550 let a21 = a[[2, 1]];
551 let a22 = a[[2, 2]];
552
553 let c3 = -F::one(); let c2 = kahan_sum(&[-a00, -a11, -a22]);
558
559 let minor_00_11 = a00 * a11 - a01 * a10;
561 let minor_00_22 = a00 * a22 - a02 * a20;
562 let minor_11_22 = a11 * a22 - a12 * a21;
563 let c1 = kahan_sum(&[minor_00_11, minor_00_22, minor_11_22]);
564
565 let det_part1 = a00 * (a11 * a22 - a12 * a21);
567 let det_part2 = -a01 * (a10 * a22 - a12 * a20);
568 let det_part3 = a02 * (a10 * a21 - a11 * a20);
569 let c0 = -kahan_sum(&[det_part1, det_part2, det_part3]);
570
571 Ok([c0, c1, c2, c3])
572}
573
574#[allow(dead_code)]
576fn solve_cubic_equation_advanced_precise<F>(
577 coeffs: &[F; 4],
578 precision_target: F,
579) -> LinalgResult<Array1<F>>
580where
581 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
582{
583 let [c0, c1, c2, c3] = *coeffs;
584
585 let a = c2 / c3;
587 let b = c1 / c3;
588 let c = c0 / c3;
589
590 let third = F::one() / F::from(3.0).expect("Failed to convert constant to float");
592 let p = b - a * a * third;
593 let q_part1 = F::from(2.0).expect("Failed to convert constant to float") * a * a * a
594 / F::from(27.0).expect("Failed to convert constant to float");
595 let q_part2 = -a * b * third;
596 let q = kahan_sum(&[q_part1, q_part2, c]);
597
598 let discriminant_part1 =
600 (q / F::from(2.0).expect("Failed to convert constant to float")).powi(2);
601 let discriminant_part2 =
602 (p / F::from(3.0).expect("Failed to convert constant to float")).powi(3);
603 let discriminant = discriminant_part1 + discriminant_part2;
604
605 let mut roots = Array1::zeros(3);
606
607 if discriminant > precision_target {
608 let sqrt_disc = discriminant.sqrt();
610 let half_q = q / F::from(2.0).expect("Failed to convert constant to float");
611
612 let u = if half_q + sqrt_disc >= F::zero() {
613 (half_q + sqrt_disc).powf(third)
614 } else {
615 -(-half_q - sqrt_disc).powf(third)
616 };
617
618 let v = if u.abs() > precision_target {
619 -p / (F::from(3.0).expect("Failed to convert constant to float") * u)
620 } else {
621 F::zero()
622 };
623
624 let root = u + v - a * third;
625
626 let refined_root = newton_method_cubic_refinement(coeffs, root, precision_target, 20)?;
628
629 roots[0] = refined_root;
630 roots[1] = refined_root; roots[2] = refined_root;
632 } else {
633 let m = F::from(2.0).expect("Failed to convert constant to float")
635 * (-p / F::from(3.0).expect("Failed to convert constant to float")).sqrt();
636 let theta = (F::from(3.0).expect("Failed to convert constant to float") * q / (p * m))
637 .acos()
638 / F::from(3.0).expect("Failed to convert constant to float");
639
640 let two_pi_third = F::from(2.0).expect("Failed to convert constant to float")
641 * F::from(std::f64::consts::PI).expect("Failed to convert to float")
642 / F::from(3.0).expect("Failed to convert constant to float");
643 let a_third = a * third;
644
645 roots[0] = m * theta.cos() - a_third;
646 roots[1] = m * (theta - two_pi_third).cos() - a_third;
647 roots[2] = m
648 * (theta - F::from(2.0).expect("Failed to convert constant to float") * two_pi_third)
649 .cos()
650 - a_third;
651
652 for i in 0..3 {
654 roots[i] = newton_method_cubic_refinement(coeffs, roots[i], precision_target, 20)?;
655 }
656 }
657
658 let mut root_vec: Vec<F> = roots.to_vec();
660 root_vec.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
661
662 for (i, &root) in root_vec.iter().enumerate() {
663 roots[i] = root;
664 }
665
666 Ok(roots)
667}
668
669#[allow(dead_code)]
671fn newton_method_cubic_refinement<F>(
672 coeffs: &[F; 4],
673 initial_guess: F,
674 tolerance: F,
675 max_iter: usize,
676) -> LinalgResult<F>
677where
678 F: Float + NumAssign,
679{
680 let [c0, c1, c2, c3] = *coeffs;
681 let mut x = initial_guess;
682
683 for _ in 0..max_iter {
684 let f_x = ((c3 * x + c2) * x + c1) * x + c0;
686 let df_x = (F::from(3.0).expect("Failed to convert constant to float") * c3 * x
687 + F::from(2.0).expect("Failed to convert constant to float") * c2)
688 * x
689 + c1;
690
691 if df_x.abs() < tolerance {
692 break; }
694
695 let x_new = x - f_x / df_x;
696
697 if (x_new - x).abs() < tolerance {
698 return Ok(x_new);
699 }
700
701 x = x_new;
702 }
703
704 Ok(x)
705}
706
707#[allow(dead_code)]
709fn compute_advanced_precise_eigenvectors_2x2<F>(
710 a: &ArrayView2<F>,
711 eigenvalues: &Array1<F>,
712) -> LinalgResult<Array2<F>>
713where
714 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
715{
716 let mut eigenvectors = Array2::zeros((2, 2));
717
718 for (i, &lambda) in eigenvalues.iter().enumerate() {
719 let mut v = Array1::zeros(2);
720
721 let a11_lambda = a[[0, 0]] - lambda;
723 let a22_lambda = a[[1, 1]] - lambda;
724
725 if a[[0, 1]].abs() > a[[1, 0]].abs() {
726 if a[[0, 1]].abs() > F::epsilon() {
727 v[0] = F::one();
728 v[1] = -a11_lambda / a[[0, 1]];
729 } else {
730 v[0] = F::one();
731 v[1] = F::zero();
732 }
733 } else if a[[1, 0]].abs() > F::epsilon() {
734 v[0] = -a22_lambda / a[[1, 0]];
735 v[1] = F::one();
736 } else {
737 if a11_lambda.abs() < a22_lambda.abs() {
739 v[0] = F::one();
740 v[1] = F::zero();
741 } else {
742 v[0] = F::zero();
743 v[1] = F::one();
744 }
745 }
746
747 let norm = vector_norm(&v.view(), 2)?;
749 if norm > F::epsilon() {
750 v.mapv_inplace(|x| x / norm);
751 }
752
753 eigenvectors.column_mut(i).assign(&v);
754 }
755
756 gram_schmidt_orthogonalization(&mut eigenvectors)?;
758
759 Ok(eigenvectors)
760}
761
762#[allow(dead_code)]
764fn compute_advanced_precise_eigenvectors_3x3<F>(
765 a: &ArrayView2<F>,
766 eigenvalues: &Array1<F>,
767 precision_target: F,
768) -> LinalgResult<Array2<F>>
769where
770 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
771{
772 let mut eigenvectors = Array2::zeros((3, 3));
773
774 for (i, &lambda) in eigenvalues.iter().enumerate() {
775 let mut matrix = a.to_owned();
777 for j in 0..3 {
778 matrix[[j, j]] -= lambda;
779 }
780
781 let eigenvector = enhanced_null_space_computation(&matrix.view(), precision_target)?;
783 eigenvectors.column_mut(i).assign(&eigenvector);
784 }
785
786 for _ in 0..3 {
788 gram_schmidt_orthogonalization(&mut eigenvectors)?;
789 }
790
791 Ok(eigenvectors)
792}
793
794#[allow(dead_code)]
796fn enhanced_null_space_computation<F>(
797 matrix: &ArrayView2<F>,
798 precision_target: F,
799) -> LinalgResult<Array1<F>>
800where
801 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
802{
803 let n = matrix.nrows();
804 let mut v = Array1::zeros(n);
805
806 let mut best_v = v.clone();
808 let mut best_residual = F::infinity();
809
810 for _trial in 0..5 {
811 let mut rng = scirs2_core::random::rng();
813 for i in 0..n {
814 v[i] = F::from(rng.random_range(-1.0..=1.0)).unwrap_or(F::zero());
815 }
816
817 let norm = vector_norm(&v.view(), 2)?;
819 if norm > F::epsilon() {
820 v.mapv_inplace(|x| x / norm);
821 }
822
823 for _iter in 0..50 {
825 let mut av = Array1::zeros(n);
826 for i in 0..n {
827 for j in 0..n {
828 av[i] += matrix[[i, j]] * v[j];
829 }
830 }
831
832 let residual = vector_norm(&av.view(), 2)?;
834 if residual < best_residual {
835 best_residual = residual;
836 best_v.assign(&v);
837 }
838
839 if residual < precision_target {
840 break;
841 }
842
843 let norm_v = vector_norm(&v.view(), 2)?;
845 if norm_v > F::epsilon() {
846 v.mapv_inplace(|x| x / norm_v);
847 }
848 }
849 }
850
851 Ok(best_v)
852}
853
854#[allow(dead_code)]
856fn gram_schmidt_orthogonalization<F>(matrix: &mut Array2<F>) -> LinalgResult<()>
857where
858 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
859{
860 let n = matrix.nrows();
861 let m = matrix.ncols();
862
863 for i in 0..m {
864 let mut col_i = matrix.column(i).to_owned();
866 let norm_i = vector_norm(&col_i.view(), 2)?;
867
868 if norm_i > F::epsilon() {
869 col_i.mapv_inplace(|x| x / norm_i);
870 matrix.column_mut(i).assign(&col_i);
871 }
872
873 for j in (i + 1)..m {
875 let mut col_j = matrix.column(j).to_owned();
876
877 let mut dot_product = F::zero();
879 let mut c = F::zero();
880 for k in 0..n {
881 let y = col_i[k] * col_j[k] - c;
882 let t = dot_product + y;
883 c = (t - dot_product) - y;
884 dot_product = t;
885 }
886
887 for k in 0..n {
889 col_j[k] -= dot_product * col_i[k];
890 }
891
892 matrix.column_mut(j).assign(&col_j);
893 }
894 }
895
896 Ok(())
897}
898
899#[allow(dead_code)]
901fn advanced_precise_iterative_eig<F>(
902 a: &ArrayView2<F>,
903 precision_target: F,
904 _workers: Option<usize>,
905) -> LinalgResult<(Array1<F>, Array2<F>)>
906where
907 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
908{
909 advanced_precise_qr_iteration(a, precision_target, 2000)
911}
912
913#[allow(dead_code)]
915fn advanced_precise_qr_iteration<F>(
916 a: &ArrayView2<F>,
917 precision_target: F,
918 max_iter: usize,
919) -> LinalgResult<(Array1<F>, Array2<F>)>
920where
921 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
922{
923 let n = a.nrows();
924 let mut h = a.to_owned(); let mut q_total = Array2::eye(n);
926
927 let mut iter_count = 0;
928
929 while iter_count < max_iter {
930 let mut converged = true;
932 for i in 1..n {
933 for j in 0..i {
934 if h[[i, j]].abs() > precision_target {
935 converged = false;
936 break;
937 }
938 }
939 if !converged {
940 break;
941 }
942 }
943
944 if converged {
945 break;
946 }
947
948 let (q, r) = enhanced_qr_decomposition(&h.view())?;
950
951 h = r.dot(&q);
953
954 q_total = q_total.dot(&q);
956
957 iter_count += 1;
958 }
959
960 let mut eigenvalues = Array1::zeros(n);
962 for i in 0..n {
963 eigenvalues[i] = h[[i, i]];
964 }
965
966 let mut indices: Vec<usize> = (0..n).collect();
968 indices.sort_by(|&a, &b| {
969 eigenvalues[a]
970 .partial_cmp(&eigenvalues[b])
971 .unwrap_or(std::cmp::Ordering::Equal)
972 });
973
974 let mut sorted_eigenvalues = Array1::zeros(n);
975 let mut sorted_eigenvectors = Array2::zeros((n, n));
976
977 for (new_idx, &old_idx) in indices.iter().enumerate() {
978 sorted_eigenvalues[new_idx] = eigenvalues[old_idx];
979 sorted_eigenvectors
980 .column_mut(new_idx)
981 .assign(&q_total.column(old_idx));
982 }
983
984 Ok((sorted_eigenvalues, sorted_eigenvectors))
985}
986
987#[allow(dead_code)]
989fn enhanced_qr_decomposition<F>(a: &ArrayView2<F>) -> LinalgResult<(Array2<F>, Array2<F>)>
990where
991 F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
992{
993 let (m, n) = a.dim();
994 let mut q = Array2::eye(m);
995 let mut r = a.to_owned();
996
997 for k in 0..n.min(m - 1) {
998 let mut x = Array1::zeros(m - k);
1000 for i in k..m {
1001 x[i - k] = r[[i, k]];
1002 }
1003
1004 let norm_x = vector_norm(&x.view(), 2)?;
1005 if norm_x < F::epsilon() {
1006 continue;
1007 }
1008
1009 let mut v = x.clone();
1010 v[0] += if x[0] >= F::zero() { norm_x } else { -norm_x };
1011
1012 let norm_v = vector_norm(&v.view(), 2)?;
1013 if norm_v > F::epsilon() {
1014 v.mapv_inplace(|val| val / norm_v);
1015 }
1016
1017 for j in k..n {
1019 let mut col = Array1::zeros(m - k);
1020 for i in k..m {
1021 col[i - k] = r[[i, j]];
1022 }
1023
1024 let dot_product = v.dot(&col);
1025 for i in k..m {
1026 r[[i, j]] -= F::from(2.0).expect("Failed to convert constant to float")
1027 * dot_product
1028 * v[i - k];
1029 }
1030 }
1031
1032 for j in 0..m {
1034 let mut col = Array1::zeros(m - k);
1035 for i in k..m {
1036 col[i - k] = q[[i, j]];
1037 }
1038
1039 let dot_product = v.dot(&col);
1040 for i in k..m {
1041 q[[i, j]] -= F::from(2.0).expect("Failed to convert constant to float")
1042 * dot_product
1043 * v[i - k];
1044 }
1045 }
1046 }
1047
1048 Ok((q.t().to_owned(), r))
1049}
1050
1051#[allow(dead_code)]
1053fn solve_2x2_eigenvalue_problem<F>(a: &ArrayView2<F>) -> EigenResult<F>
1054where
1055 F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
1056{
1057 let a11 = a[[0, 0]];
1059 let a12 = a[[0, 1]];
1060 let a21 = a[[1, 0]];
1061 let a22 = a[[1, 1]];
1062
1063 let trace = a11 + a22;
1064 let det = a11 * a22 - a12 * a21;
1065
1066 let discriminant =
1067 trace * trace - F::from(4.0).expect("Failed to convert constant to float") * det;
1068
1069 let mut eigenvalues = Array1::zeros(2);
1071 let mut eigenvectors = Array2::zeros((2, 2));
1072
1073 if discriminant >= F::zero() {
1074 let sqrt_discriminant = discriminant.sqrt();
1076 let lambda1 = (trace + sqrt_discriminant)
1077 / F::from(2.0).expect("Failed to convert constant to float");
1078 let lambda2 = (trace - sqrt_discriminant)
1079 / F::from(2.0).expect("Failed to convert constant to float");
1080
1081 eigenvalues[0] = Complex::new(lambda1, F::zero());
1082 eigenvalues[1] = Complex::new(lambda2, F::zero());
1083
1084 for (i, &lambda) in [lambda1, lambda2].iter().enumerate() {
1086 let mut eigenvector = Array1::zeros(2);
1087
1088 if a12 != F::zero() {
1089 eigenvector[0] = a12;
1090 eigenvector[1] = lambda - a11;
1091 } else if a21 != F::zero() {
1092 eigenvector[0] = lambda - a22;
1093 eigenvector[1] = a21;
1094 } else {
1095 eigenvector[0] = if (a11 - lambda).abs() < F::epsilon() {
1097 F::one()
1098 } else {
1099 F::zero()
1100 };
1101 eigenvector[1] = if (a22 - lambda).abs() < F::epsilon() {
1102 F::one()
1103 } else {
1104 F::zero()
1105 };
1106 }
1107
1108 let norm = vector_norm(&eigenvector.view(), 2)?;
1110 if norm > F::epsilon() {
1111 eigenvector.mapv_inplace(|x| x / norm);
1112 }
1113
1114 eigenvectors.column_mut(i).assign(&eigenvector);
1115 }
1116
1117 let complex_eigenvectors = eigenvectors.mapv(|x| Complex::new(x, F::zero()));
1119
1120 Ok((eigenvalues, complex_eigenvectors))
1121 } else {
1122 let real_part = trace / F::from(2.0).expect("Failed to convert constant to float");
1124 let imag_part =
1125 (-discriminant).sqrt() / F::from(2.0).expect("Failed to convert constant to float");
1126
1127 eigenvalues[0] = Complex::new(real_part, imag_part);
1128 eigenvalues[1] = Complex::new(real_part, -imag_part);
1129
1130 let mut complex_eigenvectors = Array2::zeros((2, 2));
1132
1133 if a12 != F::zero() {
1134 complex_eigenvectors[[0, 0]] = Complex::new(a12, F::zero());
1135 complex_eigenvectors[[1, 0]] = Complex::new(eigenvalues[0].re - a11, eigenvalues[0].im);
1136
1137 complex_eigenvectors[[0, 1]] = Complex::new(a12, F::zero());
1138 complex_eigenvectors[[1, 1]] = Complex::new(eigenvalues[1].re - a11, eigenvalues[1].im);
1139 } else if a21 != F::zero() {
1140 complex_eigenvectors[[0, 0]] = Complex::new(eigenvalues[0].re - a22, eigenvalues[0].im);
1141 complex_eigenvectors[[1, 0]] = Complex::new(a21, F::zero());
1142
1143 complex_eigenvectors[[0, 1]] = Complex::new(eigenvalues[1].re - a22, eigenvalues[1].im);
1144 complex_eigenvectors[[1, 1]] = Complex::new(a21, F::zero());
1145 }
1146
1147 for i in 0..2 {
1149 let mut norm_sq = Complex::new(F::zero(), F::zero());
1150 for j in 0..2 {
1151 norm_sq += complex_eigenvectors[[j, i]] * complex_eigenvectors[[j, i]].conj();
1152 }
1153 let norm = norm_sq.re.sqrt();
1154
1155 if norm > F::epsilon() {
1156 for j in 0..2 {
1157 complex_eigenvectors[[j, i]] /= Complex::new(norm, F::zero());
1158 }
1159 }
1160 }
1161
1162 Ok((eigenvalues, complex_eigenvectors))
1163 }
1164}
1165
1166#[allow(dead_code)]
1168fn solve_2x2_symmetric_eigenvalue_problem<F>(
1169 a: &ArrayView2<F>,
1170) -> LinalgResult<(Array1<F>, Array2<F>)>
1171where
1172 F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
1173{
1174 let a11 = a[[0, 0]];
1176 let a12 = a[[0, 1]];
1177 let a22 = a[[1, 1]];
1178
1179 let trace = a11 + a22;
1180 let det = a11 * a22 - a12 * a12; let discriminant =
1183 trace * trace - F::from(4.0).expect("Failed to convert constant to float") * det;
1184 let sqrt_discriminant = discriminant.sqrt();
1185
1186 let lambda1 =
1188 (trace + sqrt_discriminant) / F::from(2.0).expect("Failed to convert constant to float");
1189 let lambda2 =
1190 (trace - sqrt_discriminant) / F::from(2.0).expect("Failed to convert constant to float");
1191
1192 let (lambda_small, lambda_large) = if lambda1 <= lambda2 {
1194 (lambda1, lambda2)
1195 } else {
1196 (lambda2, lambda1)
1197 };
1198
1199 let mut eigenvalues = Array1::zeros(2);
1200 eigenvalues[0] = lambda_small;
1201 eigenvalues[1] = lambda_large;
1202
1203 let mut eigenvectors = Array2::zeros((2, 2));
1205
1206 if a12 != F::zero() {
1208 eigenvectors[[0, 0]] = a12;
1209 eigenvectors[[1, 0]] = lambda_small - a11;
1210 } else {
1211 eigenvectors[[0, 0]] = if (a11 - lambda_small).abs() < F::epsilon() {
1213 F::one()
1214 } else {
1215 F::zero()
1216 };
1217 eigenvectors[[1, 0]] = if (a22 - lambda_small).abs() < F::epsilon() {
1218 F::one()
1219 } else {
1220 F::zero()
1221 };
1222 }
1223
1224 let norm1 = (eigenvectors[[0, 0]] * eigenvectors[[0, 0]]
1226 + eigenvectors[[1, 0]] * eigenvectors[[1, 0]])
1227 .sqrt();
1228 if norm1 > F::epsilon() {
1229 eigenvectors[[0, 0]] /= norm1;
1230 eigenvectors[[1, 0]] /= norm1;
1231 }
1232
1233 if a12 != F::zero() {
1235 eigenvectors[[0, 1]] = a12;
1236 eigenvectors[[1, 1]] = lambda_large - a11;
1237 } else {
1238 eigenvectors[[0, 1]] = if (a11 - lambda_large).abs() < F::epsilon() {
1240 F::one()
1241 } else {
1242 F::zero()
1243 };
1244 eigenvectors[[1, 1]] = if (a22 - lambda_large).abs() < F::epsilon() {
1245 F::one()
1246 } else {
1247 F::zero()
1248 };
1249 }
1250
1251 let norm2 = (eigenvectors[[0, 1]] * eigenvectors[[0, 1]]
1253 + eigenvectors[[1, 1]] * eigenvectors[[1, 1]])
1254 .sqrt();
1255 if norm2 > F::epsilon() {
1256 eigenvectors[[0, 1]] /= norm2;
1257 eigenvectors[[1, 1]] /= norm2;
1258 }
1259
1260 Ok((eigenvalues, eigenvectors))
1261}
1262
1263#[allow(dead_code)]
1266fn solve_3x3_symmetric_eigenvalue_problem<F>(
1267 a: &ArrayView2<F>,
1268) -> LinalgResult<(Array1<F>, Array2<F>)>
1269where
1270 F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
1271{
1272 let mut workmatrix = a.to_owned();
1276 let mut q_total = Array2::eye(3);
1277 let max_iter = 50;
1278 let tol = F::from(1e-12).expect("Failed to convert constant to float");
1279
1280 for _ in 0..max_iter {
1282 let off_diag =
1284 workmatrix[[0, 1]].abs() + workmatrix[[0, 2]].abs() + workmatrix[[1, 2]].abs();
1285 if off_diag < tol {
1286 break;
1287 }
1288
1289 let (q, r) = qr(&workmatrix.view(), None)?;
1291
1292 workmatrix = r.dot(&q);
1294
1295 q_total = q_total.dot(&q);
1297 }
1298
1299 let mut eigenvalues = Array1::zeros(3);
1301 for i in 0..3 {
1302 eigenvalues[i] = workmatrix[[i, i]];
1303 }
1304
1305 let mut indices = [0, 1, 2];
1307 indices.sort_by(|&i, &j| {
1308 eigenvalues[i]
1309 .partial_cmp(&eigenvalues[j])
1310 .expect("Operation failed")
1311 });
1312
1313 let mut sorted_eigenvalues = Array1::zeros(3);
1314 let mut sorted_eigenvectors = Array2::zeros((3, 3));
1315
1316 for (new_idx, &old_idx) in indices.iter().enumerate() {
1317 sorted_eigenvalues[new_idx] = eigenvalues[old_idx];
1318 for i in 0..3 {
1319 sorted_eigenvectors[[i, new_idx]] = q_total[[i, old_idx]];
1320 }
1321 }
1322
1323 Ok((sorted_eigenvalues, sorted_eigenvectors))
1324}
1325
1326#[allow(dead_code)]
1328fn solve_4x4_symmetric_eigenvalue_problem<F>(
1329 a: &ArrayView2<F>,
1330) -> LinalgResult<(Array1<F>, Array2<F>)>
1331where
1332 F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
1333{
1334 let mut workmatrix = a.to_owned();
1338 let mut q_total = Array2::eye(4);
1339 let max_iter = 100;
1340 let tol = F::from(1e-12).expect("Failed to convert constant to float");
1341
1342 for _ in 0..max_iter {
1344 let mut off_diag = F::zero();
1346 for i in 0..4 {
1347 for j in (i + 1)..4 {
1348 off_diag += workmatrix[[i, j]].abs();
1349 }
1350 }
1351 if off_diag < tol {
1352 break;
1353 }
1354
1355 let (q, r) = qr(&workmatrix.view(), None)?;
1357
1358 workmatrix = r.dot(&q);
1360
1361 q_total = q_total.dot(&q);
1363 }
1364
1365 let mut eigenvalues = Array1::zeros(4);
1367 for i in 0..4 {
1368 eigenvalues[i] = workmatrix[[i, i]];
1369 }
1370
1371 let mut indices = [0, 1, 2, 3];
1373 indices.sort_by(|&i, &j| {
1374 eigenvalues[i]
1375 .partial_cmp(&eigenvalues[j])
1376 .expect("Operation failed")
1377 });
1378
1379 let mut sorted_eigenvalues = Array1::zeros(4);
1380 let mut sorted_eigenvectors = Array2::zeros((4, 4));
1381
1382 for (new_idx, &old_idx) in indices.iter().enumerate() {
1383 sorted_eigenvalues[new_idx] = eigenvalues[old_idx];
1384 for i in 0..4 {
1385 sorted_eigenvectors[[i, new_idx]] = q_total[[i, old_idx]];
1386 }
1387 }
1388
1389 Ok((sorted_eigenvalues, sorted_eigenvectors))
1390}
1391
1392#[allow(dead_code)]
1394fn solve_qr_algorithm<F>(a: &ArrayView2<F>) -> EigenResult<F>
1395where
1396 F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
1397{
1398 let mut a_k = a.to_owned();
1400 let n = a.nrows();
1401 let max_iter = 100;
1402 let tol = F::epsilon() * F::from(100.0).expect("Failed to convert constant to float");
1403
1404 let mut eigenvalues = Array1::zeros(n);
1406 let mut eigenvectors = Array2::eye(n);
1407
1408 for _iter in 0..max_iter {
1409 let (q, r) = qr(&a_k.view(), None)?;
1411
1412 let a_next = r.dot(&q);
1414
1415 eigenvectors = eigenvectors.dot(&q);
1417
1418 let mut converged = true;
1420 for i in 1..n {
1421 if a_next[[i, i - 1]].abs() > tol {
1422 converged = false;
1423 break;
1424 }
1425 }
1426
1427 if converged {
1428 for i in 0..n {
1430 eigenvalues[i] = Complex::new(a_next[[i, i]], F::zero());
1431 }
1432
1433 let complex_eigenvectors = eigenvectors.mapv(|x| Complex::new(x, F::zero()));
1435 return Ok((eigenvalues, complex_eigenvectors));
1436 }
1437
1438 a_k = a_next;
1440 }
1441
1442 let mut eigenvals = Array1::zeros(n);
1445 for i in 0..n {
1446 eigenvals[i] = Complex::new(a_k[[i, i]], F::zero());
1447 }
1448
1449 let complex_eigenvectors = eigenvectors.mapv(|x| Complex::new(x, F::zero()));
1451 Ok((eigenvals, complex_eigenvectors))
1452}
1453
1454#[allow(dead_code)]
1456fn solve_symmetric_with_power_iteration<F>(
1457 a: &ArrayView2<F>,
1458) -> LinalgResult<(Array1<F>, Array2<F>)>
1459where
1460 F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
1461{
1462 use crate::decomposition::qr;
1463 use crate::norm::vector_norm;
1464
1465 let n = a.nrows();
1466 let max_iter = 1000;
1467 let tolerance =
1468 F::from(1e-10).unwrap_or_else(|| F::epsilon() * F::from(100.0).unwrap_or(F::one()));
1469
1470 let mut workmatrix = a.to_owned();
1475 let mut q_accumulated = Array2::eye(n);
1476
1477 for iter in 0..max_iter {
1479 let mut max_off_diag = F::zero();
1481 for i in 0..n {
1482 for j in 0..n {
1483 if i != j {
1484 let abs_val = workmatrix[[i, j]].abs();
1485 if abs_val > max_off_diag {
1486 max_off_diag = abs_val;
1487 }
1488 }
1489 }
1490 }
1491
1492 if max_off_diag < tolerance {
1493 break;
1494 }
1495
1496 let shift = if n >= 2 {
1500 let a_nn = workmatrix[[n - 1, n - 1]];
1501 let a_n1n1 = workmatrix[[n - 2, n - 2]];
1502 let a_nn1 = workmatrix[[n - 1, n - 2]];
1503
1504 let b = a_nn + a_n1n1;
1505 let c = a_nn * a_n1n1 - a_nn1 * a_nn1;
1506 let discriminant = b * b - F::from(4.0).unwrap_or(F::one()) * c;
1507
1508 if discriminant >= F::zero() {
1509 let sqrt_disc = discriminant.sqrt();
1510 let lambda1 = (b + sqrt_disc) / F::from(2.0).unwrap_or(F::one());
1511 let lambda2 = (b - sqrt_disc) / F::from(2.0).unwrap_or(F::one());
1512
1513 if (lambda1 - a_nn).abs() < (lambda2 - a_nn).abs() {
1515 lambda1
1516 } else {
1517 lambda2
1518 }
1519 } else {
1520 a_nn
1521 }
1522 } else {
1523 workmatrix[[n - 1, n - 1]]
1524 };
1525
1526 for i in 0..n {
1528 workmatrix[[i, i]] -= shift;
1529 }
1530
1531 let (q, r) = qr(&workmatrix.view(), None).map_err(|_| {
1533 LinalgError::ConvergenceError(format!(
1534 "QR decomposition failed in symmetric eigenvalue computation at iteration {iter}"
1535 ))
1536 })?;
1537
1538 workmatrix = r.dot(&q);
1540 for i in 0..n {
1541 workmatrix[[i, i]] += shift;
1542 }
1543
1544 q_accumulated = q_accumulated.dot(&q);
1546
1547 if iter > 10 && max_off_diag < tolerance * F::from(10.0).unwrap_or(F::one()) {
1549 break;
1550 }
1551 }
1552
1553 let mut eigenvalues = Array1::zeros(n);
1555 for i in 0..n {
1556 eigenvalues[i] = workmatrix[[i, i]];
1557 }
1558
1559 let mut indices: Vec<usize> = (0..n).collect();
1561 indices.sort_by(|&i, &j| {
1562 eigenvalues[j]
1563 .partial_cmp(&eigenvalues[i])
1564 .unwrap_or(std::cmp::Ordering::Equal)
1565 });
1566
1567 let sorted_eigenvalues = Array1::from_iter(indices.iter().map(|&i| eigenvalues[i]));
1568 let mut sorted_eigenvectors = Array2::zeros((n, n));
1569
1570 for (new_idx, &old_idx) in indices.iter().enumerate() {
1571 for i in 0..n {
1572 sorted_eigenvectors[[i, new_idx]] = q_accumulated[[i, old_idx]];
1573 }
1574 }
1575
1576 for j in 0..n {
1578 let mut col = sorted_eigenvectors.column_mut(j);
1579 let norm = vector_norm(&col.to_owned().view(), 2)?;
1580
1581 if norm > F::epsilon() {
1582 col.mapv_inplace(|x| x / norm);
1583 }
1584
1585 let mut max_idx = 0;
1587 let mut max_abs = F::zero();
1588 for i in 0..n {
1589 let abs_val = col[i].abs();
1590 if abs_val > max_abs {
1591 max_abs = abs_val;
1592 max_idx = i;
1593 }
1594 }
1595
1596 if col[max_idx] < F::zero() {
1597 col.mapv_inplace(|x| x * (-F::one()));
1598 }
1599 }
1600
1601 Ok((sorted_eigenvalues, sorted_eigenvectors))
1602}
1603
1604#[cfg(test)]
1605mod tests {
1606 use super::*;
1607 use approx::assert_relative_eq;
1608 use scirs2_core::ndarray::array;
1609
1610 #[test]
1611 fn test_1x1matrix() {
1612 let a = array![[3.0_f64]];
1613 let (eigenvalues, eigenvectors) = eig(&a.view(), None).expect("Operation failed");
1614
1615 assert_relative_eq!(eigenvalues[0].re, 3.0, epsilon = 1e-10);
1616 assert_relative_eq!(eigenvalues[0].im, 0.0, epsilon = 1e-10);
1617 assert_relative_eq!(eigenvectors[[0, 0]].re, 1.0, epsilon = 1e-10);
1618 assert_relative_eq!(eigenvectors[[0, 0]].im, 0.0, epsilon = 1e-10);
1619 }
1620
1621 #[test]
1622 fn test_2x2_diagonalmatrix() {
1623 let a = array![[3.0_f64, 0.0], [0.0, 4.0]];
1624
1625 let (eigenvalues, eigenvectors) = eig(&a.view(), None).expect("Operation failed");
1626
1627 assert_relative_eq!(eigenvalues[0].im, 0.0, epsilon = 1e-10);
1629 assert_relative_eq!(eigenvalues[1].im, 0.0, epsilon = 1e-10);
1630
1631 let (eigenvalues, eigenvectors) = eigh(&a.view(), None).expect("Operation failed");
1633 assert!(
1635 (eigenvalues[0] - 3.0).abs() < 1e-10 && (eigenvalues[1] - 4.0).abs() < 1e-10
1636 || (eigenvalues[1] - 3.0).abs() < 1e-10 && (eigenvalues[0] - 4.0).abs() < 1e-10
1637 );
1638 }
1639
1640 #[test]
1641 fn test_2x2_symmetricmatrix() {
1642 let a = array![[1.0, 2.0], [2.0, 4.0]];
1643
1644 let (eigenvalues, eigenvectors) = eigh(&a.view(), None).expect("Operation failed");
1646
1647 assert!(
1649 (eigenvalues[0] - 5.0).abs() < 1e-10 && eigenvalues[1].abs() < 1e-10
1650 || (eigenvalues[1] - 5.0).abs() < 1e-10 && eigenvalues[0].abs() < 1e-10
1651 );
1652
1653 let dot_product = eigenvectors[[0, 0]] * eigenvectors[[0, 1]]
1655 + eigenvectors[[1, 0]] * eigenvectors[[1, 1]];
1656 assert!(
1657 (dot_product).abs() < 1e-10,
1658 "Eigenvectors should be orthogonal"
1659 );
1660
1661 let norm1 = (eigenvectors[[0, 0]] * eigenvectors[[0, 0]]
1663 + eigenvectors[[1, 0]] * eigenvectors[[1, 0]])
1664 .sqrt();
1665 let norm2 = (eigenvectors[[0, 1]] * eigenvectors[[0, 1]]
1666 + eigenvectors[[1, 1]] * eigenvectors[[1, 1]])
1667 .sqrt();
1668 assert!(
1669 (norm1 - 1.0).abs() < 1e-10,
1670 "First eigenvector should be normalized"
1671 );
1672 assert!(
1673 (norm2 - 1.0).abs() < 1e-10,
1674 "Second eigenvector should be normalized"
1675 );
1676 }
1677
1678 #[test]
1679 fn test_power_iteration() {
1680 let a = array![[3.0, 1.0], [1.0, 3.0]];
1682
1683 let (eigenvalue, eigenvector) =
1684 power_iteration(&a.view(), 100, 1e-10).expect("Operation failed");
1685
1686 assert_relative_eq!(eigenvalue, 4.0, epsilon = 1e-8);
1688
1689 let norm = (eigenvector[0] * eigenvector[0] + eigenvector[1] * eigenvector[1]).sqrt();
1691 assert_relative_eq!(norm, 1.0, epsilon = 1e-10);
1692
1693 let av = a.dot(&eigenvector);
1695 let lambda_v = &eigenvector * eigenvalue;
1696
1697 let mut max_diff = 0.0;
1698 for i in 0..eigenvector.len() {
1699 let diff = (av[i] - lambda_v[i]).abs();
1700 if diff > max_diff {
1701 max_diff = diff;
1702 }
1703 }
1704
1705 assert!(
1706 max_diff < 1e-5,
1707 "A*v should approximately equal lambda*v, max diff: {}",
1708 max_diff
1709 );
1710 }
1711}