scirs2_sparse/linalg/eigen/
power_iteration.rs

1//! Power iteration method for sparse matrix eigenvalue computation
2//!
3//! This module implements the power iteration algorithm for finding the largest
4//! eigenvalue and corresponding eigenvector of symmetric sparse matrices.
5
6use crate::error::{SparseError, SparseResult};
7use crate::sym_csr::SymCsrMatrix;
8use crate::sym_ops::sym_csr_matvec;
9use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
10use scirs2_core::numeric::Float;
11use scirs2_core::SparseElement;
12use std::fmt::Debug;
13use std::ops::{Add, Div, Mul, Sub};
14
15/// Configuration options for the power iteration method
16#[derive(Debug, Clone)]
17pub struct PowerIterationOptions {
18    /// Maximum number of iterations
19    pub max_iter: usize,
20    /// Convergence tolerance
21    pub tol: f64,
22    /// Whether to normalize at each iteration
23    pub normalize: bool,
24}
25
26impl Default for PowerIterationOptions {
27    fn default() -> Self {
28        Self {
29            max_iter: 100,
30            tol: 1e-8,
31            normalize: true,
32        }
33    }
34}
35
36/// Result of an eigenvalue computation
37#[derive(Debug, Clone)]
38pub struct EigenResult<T>
39where
40    T: Float + Debug + Copy,
41{
42    /// Converged eigenvalues
43    pub eigenvalues: Array1<T>,
44    /// Corresponding eigenvectors (if requested)
45    pub eigenvectors: Option<Array2<T>>,
46    /// Number of iterations performed
47    pub iterations: usize,
48    /// Residual norms for each eigenpair
49    pub residuals: Array1<T>,
50    /// Whether the algorithm converged
51    pub converged: bool,
52}
53
54/// Computes the largest eigenvalue and corresponding eigenvector of a symmetric
55/// matrix using the power iteration method.
56///
57/// # Arguments
58///
59/// * `matrix` - The symmetric matrix
60/// * `options` - Configuration options
61/// * `initial_guess` - Initial guess for the eigenvector (optional)
62///
63/// # Returns
64///
65/// Result containing eigenvalue and eigenvector
66///
67/// # Example
68///
69/// ```
70/// use scirs2_core::ndarray::Array1;
71/// use scirs2_sparse::{
72///     sym_csr::SymCsrMatrix,
73///     linalg::{power_iteration, PowerIterationOptions},
74/// };
75///
76/// // Create a symmetric matrix
77/// let data = vec![2.0, 1.0, 2.0, 3.0];
78/// let indices = vec![0, 0, 1, 2];
79/// let indptr = vec![0, 1, 3, 4];
80/// let matrix = SymCsrMatrix::new(data, indptr, indices, (3, 3)).unwrap();
81///
82/// // Configure options
83/// let options = PowerIterationOptions {
84///     max_iter: 100,
85///     tol: 1e-8,
86///     normalize: true,
87/// };
88///
89/// // Compute the largest eigenvalue and eigenvector
90/// let result = power_iteration(&matrix, &options, None).unwrap();
91///
92/// // Check the result
93/// println!("Eigenvalue: {}", result.eigenvalues[0]);
94/// println!("Converged in {} iterations", result.iterations);
95/// println!("Final residual: {}", result.residuals[0]);
96/// assert!(result.converged);
97/// ```
98#[allow(dead_code)]
99pub fn power_iteration<T>(
100    matrix: &SymCsrMatrix<T>,
101    options: &PowerIterationOptions,
102    initial_guess: Option<ArrayView1<T>>,
103) -> SparseResult<EigenResult<T>>
104where
105    T: Float
106        + SparseElement
107        + Debug
108        + Copy
109        + Add<Output = T>
110        + Sub<Output = T>
111        + Mul<Output = T>
112        + Div<Output = T>
113        + std::iter::Sum
114        + scirs2_core::simd_ops::SimdUnifiedOps
115        + Send
116        + Sync
117        + 'static,
118{
119    let (n, _) = matrix.shape();
120
121    // Initialize eigenvector
122    let mut x = match initial_guess {
123        Some(v) => {
124            if v.len() != n {
125                return Err(SparseError::DimensionMismatch {
126                    expected: n,
127                    found: v.len(),
128                });
129            }
130            // Create a copy of the initial guess
131            let mut x_arr = Array1::zeros(n);
132            for i in 0..n {
133                x_arr[i] = v[i];
134            }
135            x_arr
136        }
137        None => {
138            // Random initialization
139            let mut x_arr = Array1::zeros(n);
140            x_arr[0] = T::sparse_one(); // Simple initialization with [1, 0, 0, ...]
141            x_arr
142        }
143    };
144
145    // Normalize the initial vector
146    if options.normalize {
147        let norm = (x.iter().map(|&v| v * v).sum::<T>()).sqrt();
148        if !SparseElement::is_zero(&norm) {
149            for i in 0..n {
150                x[i] = x[i] / norm;
151            }
152        }
153    }
154
155    let mut lambda = T::sparse_zero();
156    let mut prev_lambda = T::sparse_zero();
157    let mut converged = false;
158    let mut iter = 0;
159
160    // Power iteration loop
161    while iter < options.max_iter {
162        // Compute matrix-vector product: y = A * x
163        let y = sym_csr_matvec(matrix, &x.view())?;
164
165        // Compute Rayleigh quotient: lambda = (x^T * y) / (x^T * x)
166        let rayleigh_numerator = x.iter().zip(y.iter()).map(|(&xi, &yi)| xi * yi).sum::<T>();
167
168        if options.normalize {
169            lambda = rayleigh_numerator;
170
171            // Check for convergence
172            let diff = (lambda - prev_lambda).abs();
173            if diff < T::from(options.tol).unwrap() {
174                converged = true;
175                break;
176            }
177
178            // Normalize y to get the next x
179            let norm = (y.iter().map(|&v| v * v).sum::<T>()).sqrt();
180            if !SparseElement::is_zero(&norm) {
181                for i in 0..n {
182                    x[i] = y[i] / norm;
183                }
184            }
185        } else {
186            // If not normalizing at each iteration, just update x
187            x = y;
188
189            // Compute eigenvalue estimate
190            let norm_x = (x.iter().map(|&v| v * v).sum::<T>()).sqrt();
191            if !SparseElement::is_zero(&norm_x) {
192                lambda = rayleigh_numerator / (norm_x * norm_x);
193            }
194
195            // Check for convergence
196            let diff = (lambda - prev_lambda).abs();
197            if diff < T::from(options.tol).unwrap() {
198                converged = true;
199                break;
200            }
201        }
202
203        prev_lambda = lambda;
204        iter += 1;
205    }
206
207    // Compute final residual: ||Ax - λx||
208    let ax = sym_csr_matvec(matrix, &x.view())?;
209    let mut residual = Array1::zeros(n);
210    for i in 0..n {
211        residual[i] = ax[i] - lambda * x[i];
212    }
213    let residual_norm = (residual.iter().map(|&v| v * v).sum::<T>()).sqrt();
214
215    // Prepare eigenvectors if needed
216    let eigenvectors = {
217        let mut vecs = Array2::zeros((n, 1));
218        for i in 0..n {
219            vecs[[i, 0]] = x[i];
220        }
221        Some(vecs)
222    };
223
224    // Prepare the result
225    let result = EigenResult {
226        eigenvalues: Array1::from_vec(vec![lambda]),
227        eigenvectors,
228        iterations: iter,
229        residuals: Array1::from_vec(vec![residual_norm]),
230        converged,
231    };
232
233    Ok(result)
234}
235
236#[cfg(test)]
237mod tests {
238    use super::*;
239    use crate::sym_csr::SymCsrMatrix;
240    use scirs2_core::ndarray::Array1;
241
242    #[test]
243    fn test_power_iteration_simple() {
244        // Create a simple 2x2 symmetric matrix [[2, 1], [1, 2]]
245        // For symmetric matrix, store lower triangle: (0,0)=2, (1,0)=1, (1,1)=2
246        let data = vec![2.0, 1.0, 2.0];
247        let indices = vec![0, 0, 1]; // Column indices: row 0 has col 0, row 1 has cols 0,1
248        let indptr = vec![0, 1, 3]; // Row 0 has 1 element, row 1 has 2 elements
249        let matrix = SymCsrMatrix::new(data, indptr, indices, (2, 2)).unwrap();
250
251        let options = PowerIterationOptions::default();
252        let result = power_iteration(&matrix, &options, None).unwrap();
253
254        assert!(result.converged);
255        assert_eq!(result.eigenvalues.len(), 1);
256        // The largest eigenvalue of [[2, 1], [1, 2]] is 3.0
257        assert!((result.eigenvalues[0] - 3.0).abs() < 1e-6);
258    }
259
260    #[test]
261    fn test_power_iteration_with_initial_guess() {
262        // Matrix: [[4, 1], [1, 3]] stored as lower: [[4], [1, 3]]
263        let data = vec![4.0, 1.0, 3.0];
264        let indptr = vec![0, 1, 3];
265        let indices = vec![0, 0, 1];
266        let matrix = SymCsrMatrix::new(data, indptr, indices, (2, 2)).unwrap();
267
268        let initial_guess = Array1::from_vec(vec![1.0, 1.0]);
269        let options = PowerIterationOptions::default();
270        let result = power_iteration(&matrix, &options, Some(initial_guess.view())).unwrap();
271
272        assert!(result.converged);
273        assert_eq!(result.eigenvalues.len(), 1);
274    }
275
276    #[test]
277    fn test_power_iteration_convergence() {
278        // Matrix: [[5, 2], [2, 4]] stored as lower: [[5], [2, 4]]
279        let data = vec![5.0, 2.0, 4.0];
280        let indptr = vec![0, 1, 3];
281        let indices = vec![0, 0, 1];
282        let matrix = SymCsrMatrix::new(data, indptr, indices, (2, 2)).unwrap();
283
284        let options = PowerIterationOptions {
285            max_iter: 50,
286            tol: 1e-10,
287            normalize: true,
288        };
289        let result = power_iteration(&matrix, &options, None).unwrap();
290
291        assert!(result.converged);
292        assert!(result.iterations <= 50);
293        assert!(result.residuals[0] < 1e-4);
294    }
295}