scirs2_sparse/linalg/eigen/
power_iteration.rs1use crate::error::{SparseError, SparseResult};
7use crate::sym_csr::SymCsrMatrix;
8use crate::sym_ops::sym_csr_matvec;
9use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
10use scirs2_core::numeric::Float;
11use scirs2_core::SparseElement;
12use std::fmt::Debug;
13use std::ops::{Add, Div, Mul, Sub};
14
15#[derive(Debug, Clone)]
17pub struct PowerIterationOptions {
18 pub max_iter: usize,
20 pub tol: f64,
22 pub normalize: bool,
24}
25
26impl Default for PowerIterationOptions {
27 fn default() -> Self {
28 Self {
29 max_iter: 100,
30 tol: 1e-8,
31 normalize: true,
32 }
33 }
34}
35
36#[derive(Debug, Clone)]
38pub struct EigenResult<T>
39where
40 T: Float + Debug + Copy,
41{
42 pub eigenvalues: Array1<T>,
44 pub eigenvectors: Option<Array2<T>>,
46 pub iterations: usize,
48 pub residuals: Array1<T>,
50 pub converged: bool,
52}
53
54#[allow(dead_code)]
99pub fn power_iteration<T>(
100 matrix: &SymCsrMatrix<T>,
101 options: &PowerIterationOptions,
102 initial_guess: Option<ArrayView1<T>>,
103) -> SparseResult<EigenResult<T>>
104where
105 T: Float
106 + SparseElement
107 + Debug
108 + Copy
109 + Add<Output = T>
110 + Sub<Output = T>
111 + Mul<Output = T>
112 + Div<Output = T>
113 + std::iter::Sum
114 + scirs2_core::simd_ops::SimdUnifiedOps
115 + Send
116 + Sync
117 + 'static,
118{
119 let (n, _) = matrix.shape();
120
121 let mut x = match initial_guess {
123 Some(v) => {
124 if v.len() != n {
125 return Err(SparseError::DimensionMismatch {
126 expected: n,
127 found: v.len(),
128 });
129 }
130 let mut x_arr = Array1::zeros(n);
132 for i in 0..n {
133 x_arr[i] = v[i];
134 }
135 x_arr
136 }
137 None => {
138 let mut x_arr = Array1::zeros(n);
140 x_arr[0] = T::sparse_one(); x_arr
142 }
143 };
144
145 if options.normalize {
147 let norm = (x.iter().map(|&v| v * v).sum::<T>()).sqrt();
148 if !SparseElement::is_zero(&norm) {
149 for i in 0..n {
150 x[i] = x[i] / norm;
151 }
152 }
153 }
154
155 let mut lambda = T::sparse_zero();
156 let mut prev_lambda = T::sparse_zero();
157 let mut converged = false;
158 let mut iter = 0;
159
160 while iter < options.max_iter {
162 let y = sym_csr_matvec(matrix, &x.view())?;
164
165 let rayleigh_numerator = x.iter().zip(y.iter()).map(|(&xi, &yi)| xi * yi).sum::<T>();
167
168 if options.normalize {
169 lambda = rayleigh_numerator;
170
171 let diff = (lambda - prev_lambda).abs();
173 if diff < T::from(options.tol).unwrap() {
174 converged = true;
175 break;
176 }
177
178 let norm = (y.iter().map(|&v| v * v).sum::<T>()).sqrt();
180 if !SparseElement::is_zero(&norm) {
181 for i in 0..n {
182 x[i] = y[i] / norm;
183 }
184 }
185 } else {
186 x = y;
188
189 let norm_x = (x.iter().map(|&v| v * v).sum::<T>()).sqrt();
191 if !SparseElement::is_zero(&norm_x) {
192 lambda = rayleigh_numerator / (norm_x * norm_x);
193 }
194
195 let diff = (lambda - prev_lambda).abs();
197 if diff < T::from(options.tol).unwrap() {
198 converged = true;
199 break;
200 }
201 }
202
203 prev_lambda = lambda;
204 iter += 1;
205 }
206
207 let ax = sym_csr_matvec(matrix, &x.view())?;
209 let mut residual = Array1::zeros(n);
210 for i in 0..n {
211 residual[i] = ax[i] - lambda * x[i];
212 }
213 let residual_norm = (residual.iter().map(|&v| v * v).sum::<T>()).sqrt();
214
215 let eigenvectors = {
217 let mut vecs = Array2::zeros((n, 1));
218 for i in 0..n {
219 vecs[[i, 0]] = x[i];
220 }
221 Some(vecs)
222 };
223
224 let result = EigenResult {
226 eigenvalues: Array1::from_vec(vec![lambda]),
227 eigenvectors,
228 iterations: iter,
229 residuals: Array1::from_vec(vec![residual_norm]),
230 converged,
231 };
232
233 Ok(result)
234}
235
236#[cfg(test)]
237mod tests {
238 use super::*;
239 use crate::sym_csr::SymCsrMatrix;
240 use scirs2_core::ndarray::Array1;
241
242 #[test]
243 fn test_power_iteration_simple() {
244 let data = vec![2.0, 1.0, 2.0];
247 let indices = vec![0, 0, 1]; let indptr = vec![0, 1, 3]; let matrix = SymCsrMatrix::new(data, indptr, indices, (2, 2)).unwrap();
250
251 let options = PowerIterationOptions::default();
252 let result = power_iteration(&matrix, &options, None).unwrap();
253
254 assert!(result.converged);
255 assert_eq!(result.eigenvalues.len(), 1);
256 assert!((result.eigenvalues[0] - 3.0).abs() < 1e-6);
258 }
259
260 #[test]
261 fn test_power_iteration_with_initial_guess() {
262 let data = vec![4.0, 1.0, 3.0];
264 let indptr = vec![0, 1, 3];
265 let indices = vec![0, 0, 1];
266 let matrix = SymCsrMatrix::new(data, indptr, indices, (2, 2)).unwrap();
267
268 let initial_guess = Array1::from_vec(vec![1.0, 1.0]);
269 let options = PowerIterationOptions::default();
270 let result = power_iteration(&matrix, &options, Some(initial_guess.view())).unwrap();
271
272 assert!(result.converged);
273 assert_eq!(result.eigenvalues.len(), 1);
274 }
275
276 #[test]
277 fn test_power_iteration_convergence() {
278 let data = vec![5.0, 2.0, 4.0];
280 let indptr = vec![0, 1, 3];
281 let indices = vec![0, 0, 1];
282 let matrix = SymCsrMatrix::new(data, indptr, indices, (2, 2)).unwrap();
283
284 let options = PowerIterationOptions {
285 max_iter: 50,
286 tol: 1e-10,
287 normalize: true,
288 };
289 let result = power_iteration(&matrix, &options, None).unwrap();
290
291 assert!(result.converged);
292 assert!(result.iterations <= 50);
293 assert!(result.residuals[0] < 1e-4);
294 }
295}