Skip to main content

scirs2_sparse/linalg/eigen/
lanczos.rs

1//! Lanczos algorithm for sparse matrix eigenvalue computation
2//!
3//! This module implements the Lanczos algorithm for finding eigenvalues and
4//! eigenvectors of large symmetric sparse matrices.
5
6use crate::error::{SparseError, SparseResult};
7use crate::sym_csr::SymCsrMatrix;
8use crate::sym_ops::sym_csr_matvec;
9use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
10use scirs2_core::numeric::Float;
11use scirs2_core::SparseElement;
12use std::fmt::Debug;
13use std::ops::{Add, Div, Mul, Sub};
14
15/// Configuration options for the Lanczos algorithm
16#[derive(Debug, Clone)]
17pub struct LanczosOptions {
18    /// Maximum number of iterations
19    pub max_iter: usize,
20    /// Maximum dimension of the Krylov subspace
21    pub max_subspace_size: usize,
22    /// Convergence tolerance
23    pub tol: f64,
24    /// Number of eigenvalues to compute
25    pub numeigenvalues: usize,
26    /// Whether to compute eigenvectors
27    pub compute_eigenvectors: bool,
28}
29
30impl Default for LanczosOptions {
31    fn default() -> Self {
32        Self {
33            max_iter: 1000,
34            max_subspace_size: 20,
35            tol: 1e-8,
36            numeigenvalues: 1,
37            compute_eigenvectors: true,
38        }
39    }
40}
41
42/// Result of an eigenvalue computation
43#[derive(Debug, Clone)]
44pub struct EigenResult<T>
45where
46    T: Float + Debug + Copy,
47{
48    /// Converged eigenvalues
49    pub eigenvalues: Array1<T>,
50    /// Corresponding eigenvectors (if requested)
51    pub eigenvectors: Option<Array2<T>>,
52    /// Number of iterations performed
53    pub iterations: usize,
54    /// Residual norms for each eigenpair
55    pub residuals: Array1<T>,
56    /// Whether the algorithm converged
57    pub converged: bool,
58}
59
60/// Computes the extreme eigenvalues and corresponding eigenvectors of a symmetric
61/// matrix using the Lanczos algorithm.
62///
63/// # Arguments
64///
65/// * `matrix` - The symmetric matrix
66/// * `options` - Configuration options
67/// * `initial_guess` - Initial guess for the first Lanczos vector (optional)
68///
69/// # Returns
70///
71/// Result containing eigenvalues and eigenvectors
72///
73/// # Example
74///
75/// ```
76/// use scirs2_core::ndarray::Array1;
77/// use scirs2_sparse::{
78///     sym_csr::SymCsrMatrix,
79///     linalg::{lanczos, LanczosOptions},
80/// };
81///
82/// // Create a symmetric matrix
83/// let data = vec![2.0, 1.0, 2.0, 3.0];
84/// let indices = vec![0, 0, 1, 2];
85/// let indptr = vec![0, 1, 3, 4];
86/// let matrix = SymCsrMatrix::new(data, indptr, indices, (3, 3)).expect("Operation failed");
87///
88/// // Configure options
89/// let options = LanczosOptions {
90///     max_iter: 100,
91///     max_subspace_size: 3, // Matrix is 3x3
92///     tol: 1e-8,
93///     numeigenvalues: 1,   // Find the largest eigenvalue
94///     compute_eigenvectors: true,
95/// };
96///
97/// // Compute eigenvalues and eigenvectors
98/// let result = lanczos(&matrix, &options, None).expect("Operation failed");
99///
100/// // Check the result
101/// println!("Eigenvalues: {:?}", result.eigenvalues);
102/// println!("Converged in {} iterations", result.iterations);
103/// println!("Final residuals: {:?}", result.residuals);
104/// assert!(result.converged);
105/// ```
106#[allow(unused_assignments)]
107#[allow(dead_code)]
108pub fn lanczos<T>(
109    matrix: &SymCsrMatrix<T>,
110    options: &LanczosOptions,
111    initial_guess: Option<ArrayView1<T>>,
112) -> SparseResult<EigenResult<T>>
113where
114    T: Float
115        + SparseElement
116        + Debug
117        + Copy
118        + Add<Output = T>
119        + Sub<Output = T>
120        + Mul<Output = T>
121        + Div<Output = T>
122        + std::iter::Sum
123        + scirs2_core::simd_ops::SimdUnifiedOps
124        + Send
125        + Sync
126        + 'static,
127{
128    let (n, _) = matrix.shape();
129
130    // Ensure the subspace size is valid
131    let subspace_size = options.max_subspace_size.min(n);
132
133    // Ensure the number of eigenvalues requested is valid
134    let numeigenvalues = options.numeigenvalues.min(subspace_size);
135
136    // Initialize the first Lanczos vector
137    let mut v = match initial_guess {
138        Some(v) => {
139            if v.len() != n {
140                return Err(SparseError::DimensionMismatch {
141                    expected: n,
142                    found: v.len(),
143                });
144            }
145            // Create a copy of the initial guess
146            let mut v_arr = Array1::zeros(n);
147            for i in 0..n {
148                v_arr[i] = v[i];
149            }
150            v_arr
151        }
152        None => {
153            // Random initialization
154            let mut v_arr = Array1::zeros(n);
155            v_arr[0] = T::sparse_one(); // Simple initialization with [1, 0, 0, ...]
156            v_arr
157        }
158    };
159
160    // Normalize the initial vector
161    let norm = (v.iter().map(|&val| val * val).sum::<T>()).sqrt();
162    if !SparseElement::is_zero(&norm) {
163        for i in 0..n {
164            v[i] = v[i] / norm;
165        }
166    }
167
168    // Allocate space for Lanczos vectors
169    let mut v_vectors = Vec::with_capacity(subspace_size);
170    v_vectors.push(v.clone());
171
172    // Allocate space for tridiagonal matrix elements
173    let mut alpha = Vec::<T>::with_capacity(subspace_size); // Diagonal elements
174    let mut beta = Vec::<T>::with_capacity(subspace_size - 1); // Off-diagonal elements
175
176    // First iteration step
177    let mut w = sym_csr_matvec(matrix, &v.view())?;
178    let alpha_j = v.iter().zip(w.iter()).map(|(&vi, &wi)| vi * wi).sum::<T>();
179    alpha.push(alpha_j);
180
181    // Orthogonalize against previous vectors
182    for i in 0..n {
183        w[i] = w[i] - alpha_j * v[i];
184    }
185
186    // Compute beta (norm of w)
187    let mut beta_j = (w.iter().map(|&val| val * val).sum::<T>()).sqrt();
188
189    let mut iter = 1;
190    let mut converged = false;
191
192    while iter < options.max_iter && alpha.len() < subspace_size {
193        if SparseElement::is_zero(&beta_j) {
194            // Lucky breakdown - exact invariant subspace found
195            break;
196        }
197
198        beta.push(beta_j);
199
200        // Next Lanczos vector
201        let mut v_next = Array1::zeros(n);
202        for i in 0..n {
203            v_next[i] = w[i] / beta_j;
204        }
205
206        // Store the vector
207        v_vectors.push(v_next.clone());
208
209        // Next iteration step: w = A * v_next
210        w = sym_csr_matvec(matrix, &v_next.view())?;
211
212        // Compute alpha BEFORE orthogonalization (standard 3-term recurrence)
213        let alpha_j = v_next
214            .iter()
215            .zip(w.iter())
216            .map(|(&vi, &wi)| vi * wi)
217            .sum::<T>();
218        alpha.push(alpha_j);
219
220        // Subtract the standard 3-term recurrence components:
221        // w = w - alpha_j * v_next - beta_j * v_prev
222        // (beta_j * v_prev is handled by full reorthogonalization below)
223        for i in 0..n {
224            w[i] = w[i] - alpha_j * v_next[i];
225        }
226
227        // Full reorthogonalization for numerical stability:
228        // Remove any residual components along all previous Lanczos vectors
229        for v_j in v_vectors.iter() {
230            let proj = v_j
231                .iter()
232                .zip(w.iter())
233                .map(|(&vj, &wi)| vj * wi)
234                .sum::<T>();
235            for i in 0..n {
236                w[i] = w[i] - proj * v_j[i];
237            }
238        }
239
240        // Compute beta for next iteration
241        let beta_j_next = (w.iter().map(|&val| val * val).sum::<T>()).sqrt();
242
243        // Check for convergence using the largest eigenvalue approx
244        if alpha.len() >= numeigenvalues {
245            // Build and solve the tridiagonal system
246            if let Ok((eigvals, _)) = solve_tridiagonal_eigenproblem(&alpha, &beta, numeigenvalues)
247            {
248                // Check if the largest eigvals have converged (using beta as an error estimate)
249                if !eigvals.is_empty()
250                    && eigvals[0].abs() > T::sparse_zero()
251                    && beta_j_next < T::from(options.tol).unwrap_or(T::epsilon()) * eigvals[0].abs()
252                {
253                    converged = true;
254                    break;
255                }
256            }
257        }
258
259        v = v_next;
260        iter += 1;
261
262        // Update beta_j for the next iteration
263        beta_j = beta_j_next;
264    }
265
266    // Solve the final tridiagonal eigenproblem for ALL eigenvalues so that
267    // callers (e.g. process_eigenvalue_selection) can pick any subset (LA, SA, LM, SM).
268    let n_tridiag = alpha.len();
269    let (eigvals, eigvecs) = solve_tridiagonal_eigenproblem(&alpha, &beta, n_tridiag)?;
270
271    // Compute the Ritz vectors (eigenvectors in the original space) if requested
272    let actual_neig = eigvals.len();
273    let eigenvectors = if options.compute_eigenvectors {
274        let mut ritz_vectors = Array2::zeros((n, actual_neig));
275
276        for k in 0..actual_neig {
277            for i in 0..n {
278                let mut sum = T::sparse_zero();
279                for j in 0..v_vectors.len() {
280                    if j < eigvecs.len() && k < eigvecs[j].len() {
281                        sum = sum + eigvecs[j][k] * v_vectors[j][i];
282                    }
283                }
284                ritz_vectors[[i, k]] = sum;
285            }
286        }
287
288        Some(ritz_vectors)
289    } else {
290        None
291    };
292
293    // Compute residuals
294    let actualeigenvalues = eigvals.len();
295    let mut residuals = Array1::zeros(actualeigenvalues);
296    if let Some(ref evecs) = eigenvectors {
297        for k in 0..actualeigenvalues {
298            let mut evec = Array1::zeros(n);
299            for i in 0..n {
300                evec[i] = evecs[[i, k]];
301            }
302
303            let ax = sym_csr_matvec(matrix, &evec.view())?;
304
305            let mut res = Array1::zeros(n);
306            for i in 0..n {
307                res[i] = ax[i] - eigvals[k] * evec[i];
308            }
309
310            residuals[k] = (res.iter().map(|&v| v * v).sum::<T>()).sqrt();
311        }
312    } else {
313        // If no eigenvectors were computed, use the Kaniel-Paige error bound
314        // (beta_j * last component of eigenvector in the Krylov basis)
315        for k in 0..actualeigenvalues {
316            if k < eigvecs.len() && !beta.is_empty() {
317                residuals[k] = beta[beta.len() - 1] * eigvecs[eigvecs.len() - 1][k].abs();
318            }
319        }
320    }
321
322    // Create the result
323    let result = EigenResult {
324        eigenvalues: Array1::from_vec(eigvals),
325        eigenvectors,
326        iterations: iter,
327        residuals,
328        converged,
329    };
330
331    Ok(result)
332}
333
334/// Solves a symmetric tridiagonal eigenvalue problem.
335///
336/// This function computes the eigenvalues and eigenvectors of a symmetric
337/// tridiagonal matrix defined by its diagonal elements `alpha` and
338/// off-diagonal elements `beta`.
339///
340/// # Arguments
341///
342/// * `alpha` - Diagonal elements
343/// * `beta` - Off-diagonal elements
344/// * `numeigenvalues` - Number of eigenvalues to compute
345///
346/// # Returns
347///
348/// A tuple containing:
349/// - The eigenvalues in descending order
350/// - The corresponding eigenvectors
351#[allow(dead_code)]
352fn solve_tridiagonal_eigenproblem<T>(
353    alpha: &[T],
354    beta: &[T],
355    numeigenvalues: usize,
356) -> SparseResult<(Vec<T>, Vec<Vec<T>>)>
357where
358    T: Float
359        + SparseElement
360        + Debug
361        + Copy
362        + Add<Output = T>
363        + Sub<Output = T>
364        + Mul<Output = T>
365        + Div<Output = T>,
366{
367    let n = alpha.len();
368    if n == 0 {
369        return Err(SparseError::ValueError(
370            "Empty tridiagonal matrix".to_string(),
371        ));
372    }
373
374    if beta.len() != n - 1 {
375        return Err(SparseError::DimensionMismatch {
376            expected: n - 1,
377            found: beta.len(),
378        });
379    }
380
381    // For small matrices, use a simple algorithm for all eigenvalues
382    if n <= 3 {
383        return solve_small_tridiagonal(alpha, beta, numeigenvalues);
384    }
385
386    // For larger matrices, use the QL algorithm with implicit shifts
387    // This is a simplified implementation and could be optimized further
388
389    // Clone the diagonal and off-diagonal elements
390    let mut d = alpha.to_vec();
391    let mut e = beta.to_vec();
392    e.push(T::sparse_zero()); // Add a zero at the end
393
394    // Allocate space for eigenvectors
395    let mut z = vec![vec![T::sparse_zero(); n]; n];
396    #[allow(clippy::needless_range_loop)]
397    for i in 0..n {
398        z[i][i] = T::sparse_one(); // Initialize with identity matrix
399    }
400
401    // QL algorithm with implicit shifts (tqli from Numerical Recipes)
402    //
403    // d[0..n] = diagonal,  e[0..n] = off-diagonal (e[n-1] unused).
404    // On exit d[] holds eigenvalues and z[][] holds eigenvectors.
405    let max_ql_iter: usize = 200; // generous iteration budget per eigenvalue
406
407    for l in 0..n {
408        let mut iter_count: usize = 0;
409
410        loop {
411            // Find the smallest m >= l such that |e[m]| is negligible
412            let mut m = l;
413            while m < n - 1 {
414                let dd = d[m].abs() + d[m + 1].abs();
415                // Use a relative + absolute tolerance
416                if e[m].abs() <= T::from(2.0e-15).unwrap_or(T::epsilon()) * dd {
417                    break;
418                }
419                m += 1;
420            }
421
422            if m == l {
423                break; // eigenvalue d[l] has converged
424            }
425
426            if iter_count >= max_ql_iter {
427                return Err(SparseError::IterativeSolverFailure(
428                    "QL algorithm did not converge".to_string(),
429                ));
430            }
431            iter_count += 1;
432
433            // Form the implicit QL shift
434            let half = T::from(0.5).unwrap_or(T::sparse_one());
435            let mut g = (d[l + 1] - d[l]) / (e[l] + e[l]); // = (d[l+1]-d[l])/(2*e[l])
436            let mut r = (g * g + T::sparse_one()).sqrt();
437            // g = d[m] - d[l] + e[l] / (g + sign(g)*r)
438            g = d[m] - d[l] + e[l] / (g + if g >= T::sparse_zero() { r } else { -r });
439
440            let mut s = T::sparse_one();
441            let mut c = T::sparse_one();
442            let mut p = T::sparse_zero();
443
444            // Chase the bulge from m-1 down to l
445            let mut broke_early = false;
446            let mut ii = m; // we will iterate i = m-1, m-2, ..., l
447            while ii > l {
448                let i = ii - 1;
449
450                let f = s * e[i];
451                let b = c * e[i];
452
453                // Givens rotation to zero out f
454                r = (f * f + g * g).sqrt();
455                e[i + 1] = r;
456
457                if r < T::from(1e-30).unwrap_or(T::epsilon()) {
458                    // Underflow recovery: undo the accumulated shift
459                    d[i + 1] = d[i + 1] - p;
460                    e[m] = T::sparse_zero();
461                    broke_early = true;
462                    break;
463                }
464
465                s = f / r;
466                c = g / r;
467                g = d[i + 1] - p;
468                r = (d[i] - g) * s + c * b + c * b;
469                // Accumulate shift
470                let _ = half; // suppress unused warning
471                p = s * r;
472                d[i + 1] = g + p;
473                g = c * r - b;
474
475                // Update eigenvectors
476                #[allow(clippy::needless_range_loop)]
477                for k in 0..n {
478                    let t = z[k][i + 1];
479                    z[k][i + 1] = s * z[k][i] + c * t;
480                    z[k][i] = c * z[k][i] - s * t;
481                }
482
483                ii -= 1;
484            }
485
486            if broke_early {
487                continue; // retry this eigenvalue
488            }
489
490            d[l] = d[l] - p;
491            e[l] = g;
492            e[m] = T::sparse_zero();
493        }
494    }
495
496    // Sort eigenvalues and eigenvectors in descending order
497    let mut indices: Vec<usize> = (0..n).collect();
498    indices.sort_by(|&i, &j| d[j].partial_cmp(&d[i]).unwrap_or(std::cmp::Ordering::Equal));
499
500    let mut sortedeigenvalues = Vec::with_capacity(numeigenvalues);
501    let mut sorted_eigenvectors = Vec::with_capacity(numeigenvalues);
502
503    #[allow(clippy::needless_range_loop)]
504    for k in 0..numeigenvalues.min(n) {
505        let idx = indices[k];
506        sortedeigenvalues.push(d[idx]);
507
508        let mut eigenvector = Vec::with_capacity(n);
509        #[allow(clippy::needless_range_loop)]
510        for i in 0..n {
511            eigenvector.push(z[i][idx]);
512        }
513        sorted_eigenvectors.push(eigenvector);
514    }
515
516    Ok((sortedeigenvalues, sorted_eigenvectors))
517}
518
519/// Solves a small (n ≤ 3) symmetric tridiagonal eigenvalue problem.
520#[allow(unused_assignments)]
521#[allow(dead_code)]
522fn solve_small_tridiagonal<T>(
523    alpha: &[T],
524    beta: &[T],
525    numeigenvalues: usize,
526) -> SparseResult<(Vec<T>, Vec<Vec<T>>)>
527where
528    T: Float
529        + SparseElement
530        + Debug
531        + Copy
532        + Add<Output = T>
533        + Sub<Output = T>
534        + Mul<Output = T>
535        + Div<Output = T>,
536{
537    let n = alpha.len();
538
539    if n == 1 {
540        // 1x1 case - just return the single value
541        return Ok((vec![alpha[0]], vec![vec![T::sparse_one()]]));
542    }
543
544    if n == 2 {
545        // 2x2 case - direct formula
546        let a = alpha[0];
547        let b = alpha[1];
548        let c = beta[0];
549
550        let trace = a + b;
551        let det = a * b - c * c;
552
553        // Calculate eigenvalues
554        let discriminant = (trace * trace - T::from(4.0).expect("Operation failed") * det).sqrt();
555        let lambda1 = (trace + discriminant) * T::from(0.5).expect("Operation failed");
556        let lambda2 = (trace - discriminant) * T::from(0.5).expect("Operation failed");
557
558        // Sort in descending order
559        let (lambda1, lambda2) = if lambda1 >= lambda2 {
560            (lambda1, lambda2)
561        } else {
562            (lambda2, lambda1)
563        };
564
565        // Calculate eigenvectors
566        let mut v1 = vec![T::sparse_zero(); 2];
567        let mut v2 = vec![T::sparse_zero(); 2];
568
569        if !SparseElement::is_zero(&c) {
570            v1[0] = c;
571            v1[1] = lambda1 - a;
572
573            v2[0] = c;
574            v2[1] = lambda2 - a;
575
576            // Normalize
577            let norm1 = (v1[0] * v1[0] + v1[1] * v1[1]).sqrt();
578            let norm2 = (v2[0] * v2[0] + v2[1] * v2[1]).sqrt();
579
580            if !SparseElement::is_zero(&norm1) {
581                v1[0] = v1[0] / norm1;
582                v1[1] = v1[1] / norm1;
583            }
584
585            if !SparseElement::is_zero(&norm2) {
586                v2[0] = v2[0] / norm2;
587                v2[1] = v2[1] / norm2;
588            }
589        } else {
590            // c is zero - diagonal matrix case
591            if a >= b {
592                v1[0] = T::sparse_one();
593                v1[1] = T::sparse_zero();
594
595                v2[0] = T::sparse_zero();
596                v2[1] = T::sparse_one();
597            } else {
598                v1[0] = T::sparse_zero();
599                v1[1] = T::sparse_one();
600
601                v2[0] = T::sparse_one();
602                v2[1] = T::sparse_zero();
603            }
604        }
605
606        let mut eigenvalues = vec![lambda1, lambda2];
607        let mut eigenvectors = vec![v1, v2];
608
609        // Return only the requested number of eigenvalues
610        eigenvalues.truncate(numeigenvalues);
611        eigenvectors.truncate(numeigenvalues);
612
613        return Ok((eigenvalues, eigenvectors));
614    }
615
616    if n == 3 {
617        // 3x3 case - use characteristic polynomial
618        let a = alpha[0];
619        let b = alpha[1];
620        let c = alpha[2];
621        let d = beta[0];
622        let e = beta[1];
623
624        // Characteristic polynomial coefficients
625        let p = -(a + b + c);
626        let q = a * b + a * c + b * c - d * d - e * e;
627        let r = -(a * b * c - a * e * e - c * d * d);
628
629        // Solve the cubic equation using the Vieta formulas
630        let eigenvalues = solve_cubic(p, q, r)?;
631
632        // Sort eigenvalues in descending order
633        let mut sortedeigenvalues = eigenvalues.clone();
634        sortedeigenvalues.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
635
636        // Compute eigenvectors
637        let mut eigenvectors = Vec::with_capacity(sortedeigenvalues.len());
638
639        for &lambda in &sortedeigenvalues[0..numeigenvalues.min(3)] {
640            // For each eigenvalue, construct the resulting linear system
641            // (A - lambda*I)v = 0, and solve for v
642
643            // Build the matrix (A - lambda*I)
644            let mut m00 = a - lambda;
645            let mut m01 = d;
646            let m02 = T::sparse_zero();
647
648            let mut m10 = d;
649            let mut m11 = b - lambda;
650            let mut m12 = e;
651
652            let m20 = T::sparse_zero();
653            let mut m21 = e;
654            let mut m22 = c - lambda;
655
656            // Find the largest absolute row to use as pivot
657            let r0_norm = (m00 * m00 + m01 * m01 + m02 * m02).sqrt();
658            let r1_norm = (m10 * m10 + m11 * m11 + m12 * m12).sqrt();
659            let r2_norm = (m20 * m20 + m21 * m21 + m22 * m22).sqrt();
660
661            let mut v = vec![T::sparse_zero(); 3];
662
663            if r0_norm >= r1_norm && r0_norm >= r2_norm && !SparseElement::is_zero(&r0_norm) {
664                // Use first row as pivot
665                let scale = T::sparse_one() / r0_norm;
666                m00 = m00 * scale;
667                m01 = m01 * scale;
668
669                // Eliminate first variable from second row
670                let factor = m10 / m00;
671                m11 = m11 - factor * m01;
672                m12 = m12 - factor * m02;
673
674                // Eliminate first variable from third row
675                let factor = m20 / m00;
676                m21 = m21 - factor * m01;
677                m22 = m22 - factor * m02;
678
679                // Back-substitute
680                v[2] = T::sparse_one(); // Set last component to 1
681                v[1] = -m12 * v[2] / m11;
682                v[0] = -(m01 * v[1] + m02 * v[2]) / m00;
683            } else if r1_norm >= r0_norm && r1_norm >= r2_norm && !SparseElement::is_zero(&r1_norm)
684            {
685                // Use second row as pivot
686                let scale = T::sparse_one() / r1_norm;
687                m10 = m10 * scale;
688                m11 = m11 * scale;
689                m12 = m12 * scale;
690
691                // Back-substitute
692                v[2] = T::sparse_one(); // Set last component to 1
693                v[0] = -m02 * v[2] / m00;
694                v[1] = -(m10 * v[0] + m12 * v[2]) / m11;
695            } else if !SparseElement::is_zero(&r2_norm) {
696                // Use third row as pivot
697                v[0] = T::sparse_one(); // Set first component to 1
698                v[1] = T::sparse_zero();
699                v[2] = T::sparse_zero();
700            } else {
701                // Degenerate case - just use unit vector
702                v[0] = T::sparse_one();
703                v[1] = T::sparse_zero();
704                v[2] = T::sparse_zero();
705            }
706
707            // Normalize the eigenvector
708            let norm = (v[0] * v[0] + v[1] * v[1] + v[2] * v[2]).sqrt();
709            if !SparseElement::is_zero(&norm) {
710                v[0] = v[0] / norm;
711                v[1] = v[1] / norm;
712                v[2] = v[2] / norm;
713            }
714
715            eigenvectors.push(v);
716        }
717
718        // Return only the requested number of eigenvalues
719        sortedeigenvalues.truncate(numeigenvalues);
720        eigenvectors.truncate(numeigenvalues);
721
722        return Ok((sortedeigenvalues, eigenvectors));
723    }
724
725    // For n > 3, use the implicit QL algorithm with shifts.
726    // This is the standard approach for symmetric tridiagonal eigenproblems
727    // (Golub & Van Loan, "Matrix Computations", Algorithm 8.3.3).
728
729    let mut d = alpha.to_vec();
730    let mut e = beta.to_vec();
731    e.push(T::sparse_zero());
732
733    // Eigenvector matrix (rows = components, columns = eigenvector index)
734    let mut z = vec![vec![T::sparse_zero(); n]; n];
735    for i in 0..n {
736        z[i][i] = T::sparse_one();
737    }
738
739    let max_ql_iter: usize = 30 * n;
740
741    for l in 0..n {
742        let mut iter_count = 0usize;
743        loop {
744            let mut m = l;
745            while m < n - 1 {
746                let dd = d[m].abs() + d[m + 1].abs();
747                if dd + e[m].abs() == dd {
748                    break;
749                }
750                m += 1;
751            }
752            if m == l {
753                break;
754            }
755            if iter_count >= max_ql_iter {
756                return Err(SparseError::IterativeSolverFailure(
757                    "QL algorithm for tridiagonal eigenproblem did not converge".to_string(),
758                ));
759            }
760            iter_count += 1;
761
762            // Wilkinson shift
763            let g0 = (d[l + 1] - d[l]) / (T::from(2.0).expect("conv") * e[l]);
764            let r0 = (g0 * g0 + T::sparse_one()).sqrt();
765            let signed_r = if g0 >= T::sparse_zero() { r0 } else { -r0 };
766            let mut g = d[m] - d[l] + e[l] / (g0 + signed_r);
767
768            let mut s = T::sparse_one();
769            let mut c = T::sparse_one();
770            let mut p = T::sparse_zero();
771
772            if m == 0 {
773                break;
774            }
775            let mut i = m - 1;
776            loop {
777                let f = s * e[i];
778                let b = c * e[i];
779                let r = (f * f + g * g).sqrt();
780                e[i + 1] = r;
781
782                if SparseElement::is_zero(&r) {
783                    d[i + 1] = d[i + 1] - p;
784                    e[m] = T::sparse_zero();
785                    break;
786                }
787
788                s = f / r;
789                c = g / r;
790                g = d[i + 1] - p;
791                let rr = (d[i] - g) * s + T::from(2.0).expect("conv") * c * b;
792                p = s * rr;
793                d[i + 1] = g + p;
794                g = c * rr - b;
795
796                for k in 0..n {
797                    let t = z[k][i + 1];
798                    z[k][i + 1] = s * z[k][i] + c * t;
799                    z[k][i] = c * z[k][i] - s * t;
800                }
801
802                if i == l {
803                    break;
804                }
805                i -= 1;
806            }
807            d[l] = d[l] - p;
808            e[l] = g;
809            e[m] = T::sparse_zero();
810        }
811    }
812
813    // Sort eigenvalues in descending order
814    let mut indices: Vec<usize> = (0..n).collect();
815    indices.sort_by(|&a, &b| d[b].partial_cmp(&d[a]).unwrap_or(std::cmp::Ordering::Equal));
816
817    let take = numeigenvalues.min(n);
818    let mut eigenvalues = Vec::with_capacity(take);
819    let mut eigenvectors = Vec::with_capacity(take);
820
821    for &idx in indices.iter().take(take) {
822        eigenvalues.push(d[idx]);
823        let mut v = Vec::with_capacity(n);
824        for row in 0..n {
825            v.push(z[row][idx]);
826        }
827        let nrm: T = v
828            .iter()
829            .map(|&x| x * x)
830            .fold(T::sparse_zero(), |a, b| a + b)
831            .sqrt();
832        if !SparseElement::is_zero(&nrm) {
833            for x in &mut v {
834                *x = *x / nrm;
835            }
836        }
837        eigenvectors.push(v);
838    }
839
840    Ok((eigenvalues, eigenvectors))
841}
842
843/// Solves a cubic equation ax³ + bx² + cx + d = 0
844/// using Cardano's formula.
845fn solve_cubic<T>(p: T, q: T, r: T) -> SparseResult<Vec<T>>
846where
847    T: Float
848        + SparseElement
849        + Debug
850        + Copy
851        + Add<Output = T>
852        + Sub<Output = T>
853        + Mul<Output = T>
854        + Div<Output = T>,
855{
856    // The equation is x³ + px² + qx + r = 0
857
858    // Substitute x = y - p/3 to eliminate the quadratic term
859    let p_over_3 = p / T::from(3.0).expect("Operation failed");
860    let q_new = q - p * p / T::from(3.0).expect("Operation failed");
861    let r_new = r - p * q / T::from(3.0).expect("Operation failed")
862        + T::from(2.0).expect("Operation failed") * p * p * p
863            / T::from(27.0).expect("Operation failed");
864
865    // Now solve y³ + q_new * y + r_new = 0
866    let discriminant = -(T::from(4.0).expect("Operation failed") * q_new * q_new * q_new
867        + T::from(27.0).expect("Operation failed") * r_new * r_new);
868
869    if discriminant > T::sparse_zero() {
870        // Three real roots
871        let theta = ((T::from(3.0).expect("Operation failed") * r_new)
872            / (T::from(2.0).expect("Operation failed") * q_new)
873            * (-T::from(3.0).expect("Operation failed") / q_new).sqrt())
874        .acos();
875        let sqrt_term = T::from(2.0).expect("Operation failed")
876            * (-q_new / T::from(3.0).expect("Operation failed")).sqrt();
877
878        let y1 = sqrt_term * (theta / T::from(3.0).expect("Operation failed")).cos();
879        let y2 = sqrt_term
880            * ((theta
881                + T::from(2.0).expect("Operation failed")
882                    * T::from(std::f64::consts::PI).expect("Operation failed"))
883                / T::from(3.0).expect("Operation failed"))
884            .cos();
885        let y3 = sqrt_term
886            * ((theta
887                + T::from(4.0).expect("Operation failed")
888                    * T::from(std::f64::consts::PI).expect("Operation failed"))
889                / T::from(3.0).expect("Operation failed"))
890            .cos();
891
892        let x1 = y1 - p_over_3;
893        let x2 = y2 - p_over_3;
894        let x3 = y3 - p_over_3;
895
896        Ok(vec![x1, x2, x3])
897    } else {
898        // One real root
899        let u = (-r_new / T::from(2.0).expect("Operation failed")
900            + (discriminant / T::from(-108.0).expect("Operation failed")).sqrt())
901        .cbrt();
902        let v = if SparseElement::is_zero(&u) {
903            T::sparse_zero()
904        } else {
905            -q_new / (T::from(3.0).expect("Operation failed") * u)
906        };
907
908        let y = u + v;
909        let x = y - p_over_3;
910
911        Ok(vec![x])
912    }
913}
914
915#[cfg(test)]
916mod tests {
917    use super::*;
918    use crate::sym_csr::SymCsrMatrix;
919
920    #[test]
921    fn test_lanczos_simple() {
922        // Create a simple 2x2 symmetric matrix [[2, 1], [1, 2]]
923        // Only store lower triangular part for symmetric CSR
924        let data = vec![2.0, 1.0, 2.0]; // values: diag[0], [1,0], diag[1]
925        let indptr = vec![0, 1, 3]; // row 0 has 1 element, row 1 has 2 elements
926        let indices = vec![0, 0, 1]; // column indices
927        let matrix = SymCsrMatrix::new(data, indptr, indices, (2, 2)).expect("Operation failed");
928
929        let options = LanczosOptions {
930            max_iter: 100,
931            max_subspace_size: 2,
932            tol: 1e-8,
933            numeigenvalues: 1,
934            compute_eigenvectors: true,
935        };
936        let result = lanczos(&matrix, &options, None).expect("Operation failed");
937
938        assert!(result.converged);
939        // lanczos() returns all computed eigenvalues from the Krylov subspace;
940        // callers use process_eigenvalue_selection to narrow down to k.
941        assert!(
942            result.eigenvalues.len() >= 1,
943            "expected at least 1 eigenvalue, got {}",
944            result.eigenvalues.len()
945        );
946        // Test that the eigenvalues are finite (algorithm converges)
947        for &ev in result.eigenvalues.iter() {
948            assert!(ev.is_finite());
949        }
950    }
951
952    #[test]
953    fn test_tridiagonal_solver_2x2() {
954        let alpha = vec![2.0, 3.0];
955        let beta = vec![1.0];
956        let (eigenvalues, _eigenvectors) =
957            solve_tridiagonal_eigenproblem(&alpha, &beta, 2).expect("Operation failed");
958
959        assert_eq!(eigenvalues.len(), 2);
960        // Eigenvalues should be sorted in descending order
961        assert!(eigenvalues[0] >= eigenvalues[1]);
962    }
963
964    #[test]
965    fn test_solve_cubic() {
966        // Test x³ - 6x² + 11x - 6 = 0, which has roots 1, 2, 3
967        let roots = solve_cubic(-6.0, 11.0, -6.0).expect("Operation failed");
968        assert_eq!(roots.len(), 3);
969
970        // Sort roots for comparison
971        let mut sorted_roots = roots;
972        sorted_roots.sort_by(|a, b| a.partial_cmp(b).expect("Operation failed"));
973
974        assert!((sorted_roots[0] - 1.0).abs() < 1e-10);
975        assert!((sorted_roots[1] - 2.0).abs() < 1e-10);
976        assert!((sorted_roots[2] - 3.0).abs() < 1e-10);
977    }
978}