scirs2_sparse/linalg/
matfuncs.rs

1//! Matrix functions for sparse matrices
2
3use crate::csr::CsrMatrix;
4use crate::error::SparseResult;
5use crate::linalg::interface::LinearOperator;
6use crate::sparray::SparseArray;
7use scirs2_core::ndarray::{Array1, ArrayView1};
8use scirs2_core::numeric::{Float, NumAssign};
9use scirs2_core::random::Rng;
10use scirs2_core::SparseElement;
11use std::fmt::Debug;
12use std::iter::Sum;
13use std::ops::{Add, Div, Mul, Sub};
14
15/// Compute the action of the matrix exponential on a vector: y = exp(t*A) * v
16///
17/// This function computes y = exp(t*A) * v without explicitly forming exp(t*A),
18/// using a Krylov subspace approximation.
19///
20/// # Arguments
21///
22/// * `a` - The linear operator A
23/// * `v` - The vector to multiply
24/// * `t` - The scalar parameter (usually time)
25/// * `m` - The dimension of the Krylov subspace (default: 30)
26/// * `tol` - Tolerance for the approximation (default: 1e-7)
27///
28/// # Returns
29///
30/// The result vector y = exp(t*A) * v
31#[allow(dead_code)]
32pub fn expm_multiply<F>(
33    a: &dyn LinearOperator<F>,
34    v: &[F],
35    t: F,
36    m: Option<usize>,
37    tol: Option<F>,
38) -> SparseResult<Vec<F>>
39where
40    F: Float + NumAssign + Sum + SparseElement + 'static,
41{
42    let (rows, cols) = a.shape();
43    if rows != cols {
44        return Err(crate::error::SparseError::ValueError(
45            "Matrix must be square for expm_multiply".to_string(),
46        ));
47    }
48    if v.len() != cols {
49        return Err(crate::error::SparseError::DimensionMismatch {
50            expected: cols,
51            found: v.len(),
52        });
53    }
54
55    let n = rows;
56    let m = m.unwrap_or(30.min(n - 1));
57    let tol = tol.unwrap_or(F::from(1e-7).unwrap());
58
59    // Special case: t = 0
60    if t == F::sparse_zero() {
61        return Ok(v.to_vec());
62    }
63
64    // Normalize the initial vector
65    let v_norm = norm2(v);
66    if v_norm == F::sparse_zero() {
67        return Ok(vec![F::sparse_zero(); n]);
68    }
69
70    let v_normalized: Vec<F> = v.iter().map(|&vi| vi / v_norm).collect();
71
72    // Build Krylov subspace using Arnoldi iteration
73    let mut v = vec![v_normalized.clone()]; // Orthonormal basis
74    let mut h = vec![vec![F::sparse_zero(); m]; m + 1]; // Upper Hessenberg matrix
75
76    for j in 0..m {
77        // Compute w = A * v[j]
78        let mut w = a.matvec(&v[j])?;
79
80        // Orthogonalize w against previous vectors
81        for i in 0..=j {
82            let h_ij = dot(&v[i], &w);
83            h[i][j] = h_ij;
84            // w = w - h_ij * v[i]
85            for (k, w_val) in w.iter_mut().enumerate().take(n) {
86                *w_val -= h_ij * v[i][k];
87            }
88        }
89
90        let h_next = norm2(&w);
91        h[j + 1][j] = h_next;
92
93        // Check for breakdown
94        if h_next.abs() < tol * F::from(100).unwrap() {
95            // Early termination - Krylov subspace is exhausted
96            break;
97        }
98
99        // Normalize w and add to basis
100        let w_normalized: Vec<F> = w.iter().map(|&wi| wi / h_next).collect();
101        v.push(w_normalized);
102    }
103
104    // Extract the square Hessenberg matrix
105    let actual_m = v.len() - 1;
106
107    // Special case: if Krylov subspace has dimension 1
108    if actual_m == 0 {
109        // For 1x1 case, h[0][0] contains the eigenvalue
110        let lambda = h[0][0];
111        let exp_t_lambda = (t * lambda).exp();
112        let mut y = vec![F::sparse_zero(); n];
113        for (j, y_val) in y.iter_mut().enumerate().take(n) {
114            *y_val = v_norm * exp_t_lambda * v[0][j];
115        }
116        return Ok(y);
117    }
118
119    let mut h_square = vec![vec![F::sparse_zero(); actual_m]; actual_m];
120    for (i, h_row) in h.iter().enumerate().take(actual_m) {
121        for (j, &h_val) in h_row.iter().enumerate().take(actual_m) {
122            h_square[i][j] = h_val;
123        }
124    }
125
126    // Compute exp(t*H) using scaling and squaring
127    let exp_t_h = matrix_exponential_dense(&h_square, t)?;
128
129    // Compute y = v_norm * V * exp(t*H) * e1
130    let mut y = vec![F::sparse_zero(); n];
131    for (i, v_row) in v.iter().enumerate().take(actual_m) {
132        let coeff = v_norm * exp_t_h[i][0];
133        for (j, y_val) in y.iter_mut().enumerate().take(n) {
134            *y_val += coeff * v_row[j];
135        }
136    }
137
138    Ok(y)
139}
140
141/// Compute matrix exponential of a small dense matrix
142#[allow(dead_code)]
143fn matrix_exponential_dense<F>(h: &[Vec<F>], t: F) -> SparseResult<Vec<Vec<F>>>
144where
145    F: Float + NumAssign + SparseElement + 'static,
146{
147    let _n = h.len();
148
149    // Scaling: find s such that ||t*H/2^s|| < 1
150    let mut s = 0;
151    let mut scale = F::sparse_one();
152    let h_norm = matrix_norm_inf(h);
153    let mut scaled_norm = (t * h_norm).abs();
154
155    while scaled_norm > F::sparse_one() {
156        s += 1;
157        scale *= F::from(2).unwrap();
158        scaled_norm /= F::from(2).unwrap();
159    }
160
161    let t_scaled = t / scale;
162
163    // Compute Padé approximation of exp(t_scaled * H)
164    let mut exp_h = pade_approximation(h, t_scaled, 6)?;
165
166    // Squaring phase: exp(t*H) = (exp(t*H/2^s))^(2^s)
167    for _ in 0..s {
168        exp_h = matrix_multiply_dense(&exp_h, &exp_h)?;
169    }
170
171    Ok(exp_h)
172}
173
174/// Padé approximation for matrix exponential
175#[allow(dead_code)]
176fn pade_approximation<F>(a: &[Vec<F>], t: F, order: usize) -> SparseResult<Vec<Vec<F>>>
177where
178    F: Float + NumAssign + SparseElement + 'static,
179{
180    let n = a.len();
181
182    // Compute powers of t*A
183    let mut t_a = vec![vec![F::sparse_zero(); n]; n];
184    for i in 0..n {
185        for j in 0..n {
186            t_a[i][j] = t * a[i][j];
187        }
188    }
189
190    let mut powers = vec![identity_matrix(n)];
191    powers.push(t_a.clone());
192
193    for p in 2..=order {
194        let prev = &powers[p - 1];
195        let next = matrix_multiply_dense(&t_a, prev)?;
196        powers.push(next);
197    }
198
199    // Padé coefficients for order 6
200    let num_coeffs = [
201        F::sparse_one(),
202        F::from(0.5).unwrap(),
203        F::from(3.0 / 26.0).unwrap(),
204        F::from(1.0 / 312.0).unwrap(),
205        F::from(1.0 / 11232.0).unwrap(),
206        F::from(1.0 / 506880.0).unwrap(),
207        F::from(1.0 / 18811680.0).unwrap(),
208    ];
209
210    let den_coeffs = [
211        F::sparse_one(),
212        F::from(-0.5).unwrap(),
213        F::from(3.0 / 26.0).unwrap(),
214        F::from(-1.0 / 312.0).unwrap(),
215        F::from(1.0 / 11232.0).unwrap(),
216        F::from(-1.0 / 506880.0).unwrap(),
217        F::from(1.0 / 18811680.0).unwrap(),
218    ];
219
220    // Compute numerator and denominator
221    let mut num = zero_matrix(n);
222    let mut den = zero_matrix(n);
223
224    for (i, coeff) in num_coeffs.iter().enumerate().take(order + 1) {
225        add_scaled_matrix(&mut num, &powers[i], *coeff);
226    }
227
228    for (i, coeff) in den_coeffs.iter().enumerate().take(order + 1) {
229        add_scaled_matrix(&mut den, &powers[i], *coeff);
230    }
231
232    // Solve den * exp_A = num for exp_A
233    solve_matrix_equation(&den, &num)
234}
235
236/// Estimate the 2-norm (spectral norm) of a sparse matrix using power iteration
237///
238/// This function estimates ||A||_2 = σ_max(A), the largest singular value of A,
239/// using power iteration on A^T * A. The 2-norm is also known as the spectral norm.
240///
241/// # Arguments
242///
243/// * `a` - The sparse matrix
244/// * `tol` - Convergence tolerance (default: 1e-6)
245/// * `maxiter` - Maximum number of power iterations (default: 100)
246///
247/// # Returns
248///
249/// An estimate of the 2-norm of the matrix
250///
251/// # Examples
252///
253/// ```no_run
254/// use scirs2_sparse::csr::CsrMatrix;
255/// use scirs2_sparse::linalg::twonormest;
256///
257/// let rows = vec![0, 1];
258/// let cols = vec![0, 1];
259/// let data = vec![2.0, 3.0];
260/// let matrix = CsrMatrix::new(data, rows, cols, (2, 2)).unwrap();
261///
262/// let norm_estimate = twonormest(&matrix, None, Some(1)).unwrap();
263/// ```
264#[allow(dead_code)]
265pub fn twonormest<F>(a: &CsrMatrix<F>, tol: Option<F>, maxiter: Option<usize>) -> SparseResult<F>
266where
267    F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
268{
269    let (m, n) = (a.rows(), a.cols());
270    let tol = tol.unwrap_or_else(|| F::from(1e-6).unwrap());
271    let maxiter = maxiter.unwrap_or(100);
272
273    if m == 0 || n == 0 {
274        return Ok(F::sparse_zero());
275    }
276
277    // For very small matrices, compute exactly
278    if n <= 4 && m <= 4 {
279        return exact_twonorm(a);
280    }
281
282    // Initialize with a random unit vector
283    let mut rng = scirs2_core::random::rng();
284    let mut v: Vec<F> = (0..n)
285        .map(|_| F::from(rng.random::<f64>() - 0.5).unwrap())
286        .collect();
287
288    // Normalize initial vector
289    let v_norm = norm2(&v);
290    if v_norm == F::sparse_zero() {
291        return Ok(F::sparse_zero());
292    }
293    for v_elem in v.iter_mut() {
294        *v_elem /= v_norm;
295    }
296
297    let mut lambda = F::sparse_zero();
298    let mut lambda_old = F::sparse_zero();
299
300    for iter in 0..maxiter {
301        // Compute w = A * v
302        let mut w = vec![F::sparse_zero(); m];
303        for (row, w_val) in w.iter_mut().enumerate().take(m) {
304            let row_range = a.row_range(row);
305            let row_indices = &a.indices[row_range.clone()];
306            let row_data = &a.data[row_range];
307
308            let mut sum = F::sparse_zero();
309            for (col_idx, &col) in row_indices.iter().enumerate() {
310                sum += row_data[col_idx] * v[col];
311            }
312            *w_val = sum;
313        }
314
315        // Compute u = A^T * w
316        let mut u = vec![F::sparse_zero(); n];
317        let a_t = a.transpose();
318        for (row, u_val) in u.iter_mut().enumerate().take(a_t.rows()) {
319            let row_range = a_t.row_range(row);
320            let row_indices = &a_t.indices[row_range.clone()];
321            let row_data = &a_t.data[row_range];
322
323            let mut sum = F::sparse_zero();
324            for (col_idx, &col) in row_indices.iter().enumerate() {
325                sum += row_data[col_idx] * w[col];
326            }
327            *u_val = sum;
328        }
329
330        // Estimate eigenvalue: λ = v^T * u
331        lambda = dot(&v, &u);
332
333        // Normalize u to get new v
334        let u_norm = norm2(&u);
335        if u_norm == F::sparse_zero() {
336            break;
337        }
338
339        for (i, u_val) in u.iter().enumerate() {
340            v[i] = *u_val / u_norm;
341        }
342
343        // Check convergence
344        if iter > 0 {
345            let rel_change = if lambda_old != F::sparse_zero() {
346                ((lambda - lambda_old) / lambda_old).abs()
347            } else {
348                lambda.abs()
349            };
350
351            if rel_change < tol {
352                break;
353            }
354        }
355
356        lambda_old = lambda;
357    }
358
359    // The 2-norm is the square root of the largest eigenvalue of A^T * A
360    Ok(lambda.sqrt())
361}
362
363/// Estimate the condition number of a sparse matrix
364///
365/// This function estimates cond(A) = ||A||_2 * ||A^(-1)||_2 using norm estimation.
366/// For efficiency, it estimates ||A^(-1)||_2 by solving (A^T * A) * x = b for random b
367/// and using power iteration to estimate the smallest singular value.
368///
369/// # Arguments
370///
371/// * `a` - The sparse matrix
372/// * `norm_type` - The norm to use: "1" for 1-norm, "2" for 2-norm (default: "2")
373/// * `tol` - Convergence tolerance (default: 1e-6)
374/// * `maxiter` - Maximum number of iterations (default: 100)
375///
376/// # Returns
377///
378/// An estimate of the condition number
379///
380/// # Examples
381///
382/// ```no_run
383/// use scirs2_sparse::csr::CsrMatrix;
384/// use scirs2_sparse::linalg::condest;
385///
386/// let rows = vec![0, 1];
387/// let cols = vec![0, 1];
388/// let data = vec![2.0, 3.0];
389/// let matrix = CsrMatrix::new(data, rows, cols, (2, 2)).unwrap();
390///
391/// let cond_estimate = condest(&matrix, None, None, Some(1)).unwrap();
392/// ```
393#[allow(dead_code)]
394pub fn condest<F>(
395    a: &CsrMatrix<F>,
396    norm_type: Option<&str>,
397    tol: Option<F>,
398    maxiter: Option<usize>,
399) -> SparseResult<F>
400where
401    F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
402{
403    let (m, n) = (a.rows(), a.cols());
404    if m != n {
405        return Err(crate::error::SparseError::ValueError(
406            "Condition number estimation requires a square matrix".to_string(),
407        ));
408    }
409
410    let norm_type = norm_type.unwrap_or("2");
411    let tol = tol.unwrap_or_else(|| F::from(1e-6).unwrap());
412    let maxiter = maxiter.unwrap_or(100);
413
414    // Estimate ||A||
415    let norm_a = match norm_type {
416        "1" => onenormest(a, None, None)?,
417        "2" => twonormest(a, Some(tol), Some(maxiter))?,
418        _ => {
419            return Err(crate::error::SparseError::ValueError(
420                "norm_type must be '1' or '2'".to_string(),
421            ))
422        }
423    };
424
425    if norm_a == F::sparse_zero() {
426        return Ok(F::infinity());
427    }
428
429    // Estimate ||A^(-1)|| using inverse power iteration
430    // This estimates the smallest singular value σ_min, then ||A^(-1)||_2 = 1/σ_min
431    let norm_a_inv = estimate_inverse_norm(a, norm_type, tol, maxiter)?;
432
433    Ok(norm_a * norm_a_inv)
434}
435
436/// Estimate ||A^(-1)|| using inverse power iteration
437#[allow(dead_code)]
438fn estimate_inverse_norm<F>(
439    a: &CsrMatrix<F>,
440    norm_type: &str,
441    tol: F,
442    maxiter: usize,
443) -> SparseResult<F>
444where
445    F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
446{
447    let _n = a.rows();
448
449    // For 1-norm, use inverse power iteration with A^(-1)
450    if norm_type == "1" {
451        // This is more complex and would require solving linear systems
452        // For now, we'll use a simpler 2-norm based approach
453        return estimate_smallest_singular_value(a, tol, maxiter).map(|sigma_min| {
454            if sigma_min == F::sparse_zero() {
455                F::infinity()
456            } else {
457                F::sparse_one() / sigma_min
458            }
459        });
460    }
461
462    // For 2-norm, estimate smallest singular value of A
463    estimate_smallest_singular_value(a, tol, maxiter).map(|sigma_min| {
464        if sigma_min == F::sparse_zero() {
465            F::infinity()
466        } else {
467            F::sparse_one() / sigma_min
468        }
469    })
470}
471
472/// Estimate the smallest singular value using inverse power iteration on A^T * A
473#[allow(dead_code)]
474fn estimate_smallest_singular_value<F>(a: &CsrMatrix<F>, tol: F, maxiter: usize) -> SparseResult<F>
475where
476    F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
477{
478    let n = a.cols();
479
480    // Initialize with a random unit vector
481    let mut rng = scirs2_core::random::rng();
482    let mut v: Vec<F> = (0..n)
483        .map(|_| F::from(rng.random::<f64>() - 0.5).unwrap())
484        .collect();
485
486    // Normalize initial vector
487    let v_norm = norm2(&v);
488    if v_norm == F::sparse_zero() {
489        return Ok(F::sparse_zero());
490    }
491    for v_elem in v.iter_mut() {
492        *v_elem /= v_norm;
493    }
494
495    let mut lambda = F::sparse_zero();
496    let mut lambda_old = F::infinity();
497
498    for iter in 0..maxiter {
499        // We want to find the smallest eigenvalue of A^T * A
500        // Using inverse iteration: solve (A^T * A) * x = v for x
501        // For simplicity, we'll use a few iterations of the power method on (A^T * A)^(-1)
502        // This is equivalent to solving (A^T * A) * u = v and then normalizing u
503
504        // Since we don't have a direct solver, we'll approximate using several steps
505        // of a simple iterative method (like Jacobi or minimal residual)
506        let u = solve_ata_approximately(a, &v, 5)?; // 5 inner iterations
507
508        // Normalize u
509        let u_norm = norm2(&u);
510        if u_norm == F::sparse_zero() {
511            break;
512        }
513
514        for (i, u_val) in u.iter().enumerate() {
515            v[i] = *u_val / u_norm;
516        }
517
518        // Estimate eigenvalue: λ = v^T * (A^T * A) * v
519        lambda = estimate_rayleigh_quotient(a, &v)?;
520
521        // Check convergence
522        if iter > 0 {
523            let rel_change = if lambda != F::sparse_zero() {
524                ((lambda - lambda_old) / lambda).abs()
525            } else {
526                F::infinity()
527            };
528
529            if rel_change < tol {
530                break;
531            }
532        }
533
534        lambda_old = lambda;
535    }
536
537    // The smallest singular value is the square root of the smallest eigenvalue of A^T * A
538    Ok(lambda.sqrt())
539}
540
541/// Approximately solve (A^T * A) * x = b using simple iteration
542#[allow(dead_code)]
543fn solve_ata_approximately<F>(
544    a: &CsrMatrix<F>,
545    b: &[F],
546    num_iterations: usize,
547) -> SparseResult<Vec<F>>
548where
549    F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
550{
551    let n = a.cols();
552    let mut x = b.to_vec(); // Initial guess
553
554    for _ in 0..num_iterations {
555        // Compute r = A^T * A * x
556        let mut ax = vec![F::sparse_zero(); a.rows()];
557        for (row, ax_val) in ax.iter_mut().enumerate().take(a.rows()) {
558            let row_range = a.row_range(row);
559            let row_indices = &a.indices[row_range.clone()];
560            let row_data = &a.data[row_range];
561
562            let mut sum = F::sparse_zero();
563            for (col_idx, &col) in row_indices.iter().enumerate() {
564                sum += row_data[col_idx] * x[col];
565            }
566            *ax_val = sum;
567        }
568
569        let mut ata_x = vec![F::sparse_zero(); n];
570        let a_t = a.transpose();
571        for (row, ata_x_val) in ata_x.iter_mut().enumerate().take(a_t.rows()) {
572            let row_range = a_t.row_range(row);
573            let row_indices = &a_t.indices[row_range.clone()];
574            let row_data = &a_t.data[row_range];
575
576            let mut sum = F::sparse_zero();
577            for (col_idx, &col) in row_indices.iter().enumerate() {
578                sum += row_data[col_idx] * ax[col];
579            }
580            *ata_x_val = sum;
581        }
582
583        // Simple iteration: x = x - α * (A^T * A * x - b)
584        let alpha = F::from(0.1).unwrap(); // Simple step size
585        for i in 0..n {
586            x[i] -= alpha * (ata_x[i] - b[i]);
587        }
588    }
589
590    Ok(x)
591}
592
593/// Estimate the Rayleigh quotient v^T * (A^T * A) * v
594#[allow(dead_code)]
595fn estimate_rayleigh_quotient<F>(a: &CsrMatrix<F>, v: &[F]) -> SparseResult<F>
596where
597    F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
598{
599    // Compute A * v
600    let mut av = vec![F::sparse_zero(); a.rows()];
601    for (row, av_val) in av.iter_mut().enumerate().take(a.rows()) {
602        let row_range = a.row_range(row);
603        let row_indices = &a.indices[row_range.clone()];
604        let row_data = &a.data[row_range];
605
606        let mut sum = F::sparse_zero();
607        for (col_idx, &col) in row_indices.iter().enumerate() {
608            sum += row_data[col_idx] * v[col];
609        }
610        *av_val = sum;
611    }
612
613    // Compute A^T * (A * v)
614    let mut ata_v = vec![F::sparse_zero(); a.cols()];
615    let a_t = a.transpose();
616    for (row, ata_v_val) in ata_v.iter_mut().enumerate().take(a_t.rows()) {
617        let row_range = a_t.row_range(row);
618        let row_indices = &a_t.indices[row_range.clone()];
619        let row_data = &a_t.data[row_range];
620
621        let mut sum = F::sparse_zero();
622        for (col_idx, &col) in row_indices.iter().enumerate() {
623            sum += row_data[col_idx] * av[col];
624        }
625        *ata_v_val = sum;
626    }
627
628    // Return v^T * (A^T * A * v)
629    Ok(dot(v, &ata_v))
630}
631
632/// Compute the exact 2-norm for small matrices
633#[allow(dead_code)]
634fn exact_twonorm<F>(a: &CsrMatrix<F>) -> SparseResult<F>
635where
636    F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
637{
638    // For small matrices, we can afford to compute all singular values
639    // This is a simplified implementation - in practice, you'd use SVD
640
641    let (m, n) = (a.rows(), a.cols());
642    let min_dim = m.min(n);
643
644    if min_dim == 0 {
645        return Ok(F::sparse_zero());
646    }
647
648    if min_dim == 1 {
649        // For 1D case, just compute the norm of the single row/column
650        let mut max_norm = F::sparse_zero();
651        for i in 0..m {
652            for j in 0..n {
653                let val = a.get(i, j).abs();
654                if val > max_norm {
655                    max_norm = val;
656                }
657            }
658        }
659        return Ok(max_norm);
660    }
661
662    // For small square matrices, use power iteration with high accuracy
663    twonormest(a, Some(F::from(1e-12).unwrap()), Some(1000))
664}
665
666/// Estimate the 1-norm of a sparse matrix using a randomized algorithm
667///
668/// This function estimates ||A||_1 without explicitly computing all columns of A.
669/// It uses a randomized algorithm that typically requires only a few matrix-vector products.
670///
671/// # Arguments
672///
673/// * `a` - The sparse matrix
674/// * `t` - Number of iterations (default: 2)
675/// * `itmax` - Maximum number of iterations for the iterative algorithm (default: 5)
676///
677/// # Returns
678///
679/// An estimate of the 1-norm of the matrix
680#[allow(dead_code)]
681pub fn onenormest<F>(a: &CsrMatrix<F>, t: Option<usize>, itmax: Option<usize>) -> SparseResult<F>
682where
683    F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
684{
685    let n = a.cols();
686    let t = t.unwrap_or(2);
687    let itmax = itmax.unwrap_or(5);
688
689    if n == 0 {
690        return Ok(F::sparse_zero());
691    }
692
693    // Handle small matrices directly
694    if n <= 4 {
695        return exact_onenorm(a);
696    }
697
698    // Initialize with random vectors
699    let mut rng = scirs2_core::random::rng();
700    let mut x = vec![vec![F::sparse_zero(); n]; t];
701    for x_j in x.iter_mut().take(t) {
702        for x_elem in x_j.iter_mut().take(n) {
703            *x_elem = if rng.random::<bool>() {
704                F::sparse_one()
705            } else {
706                -F::sparse_one()
707            };
708        }
709    }
710
711    // Make sure the first column is the all-ones vector
712    for i in 0..n {
713        x[0][i] = F::sparse_one();
714    }
715
716    let mut est = F::sparse_zero();
717    let mut est_old = F::sparse_zero();
718    let mut ind_best = vec![0; n];
719    let mut s = vec![false; n];
720
721    for _ in 0..itmax {
722        // Compute Y = A^T * X
723        let mut y = vec![vec![F::sparse_zero(); n]; t];
724        let a_t = a.transpose();
725        for j in 0..t {
726            let mut y_j = vec![F::sparse_zero(); n];
727            for (row, y_val) in y_j.iter_mut().enumerate().take(a_t.rows()) {
728                let row_range = a_t.row_range(row);
729                let row_indices = &a_t.indices[row_range.clone()];
730                let row_data = &a_t.data[row_range];
731
732                let mut sum = F::sparse_zero();
733                for (col_idx, &col) in row_indices.iter().enumerate() {
734                    sum += row_data[col_idx] * x[j][col];
735                }
736                *y_val = sum;
737            }
738            y[j] = y_j;
739        }
740
741        // Find the column of Y with maximum 1-norm
742        let mut max_norm = F::sparse_zero();
743        let mut max_col = 0;
744        for (j, y_vec) in y.iter().enumerate().take(t) {
745            let norm = onenorm_vec(y_vec);
746            if norm > max_norm {
747                max_norm = norm;
748                max_col = j;
749            }
750        }
751
752        est = max_norm;
753
754        // Check convergence
755        if est <= est_old {
756            break;
757        }
758        est_old = est;
759
760        // Find indices of maximum absolute values in y[max_col]
761        let mut abs_y: Vec<(F, usize)> = y[max_col]
762            .iter()
763            .enumerate()
764            .map(|(i, &y_val)| (y_val.abs(), i))
765            .collect();
766        abs_y.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap());
767
768        // Update ind_best with largest elements not in S
769        let mut count = 0;
770        for (_, idx) in abs_y {
771            if !s[idx] {
772                ind_best[count] = idx;
773                s[idx] = true;
774                count += 1;
775                if count >= t {
776                    break;
777                }
778            }
779        }
780
781        // If we couldn't find enough new indices, we're done
782        if count < t {
783            break;
784        }
785
786        // Form Z = sign(y[max_col])
787        let z: Vec<F> = y[max_col]
788            .iter()
789            .map(|&y_val| {
790                if y_val >= F::sparse_zero() {
791                    F::sparse_one()
792                } else {
793                    -F::sparse_one()
794                }
795            })
796            .collect();
797
798        // Compute w = A * Z
799        let mut w = vec![F::sparse_zero(); a.rows()];
800        for (row, w_val) in w.iter_mut().enumerate().take(a.rows()) {
801            let row_range = a.row_range(row);
802            let row_indices = &a.indices[row_range.clone()];
803            let row_data = &a.data[row_range];
804
805            let mut sum = F::sparse_zero();
806            for (col_idx, &col) in row_indices.iter().enumerate() {
807                sum += row_data[col_idx] * z[col];
808            }
809            *w_val = sum;
810        }
811
812        // Update x with unit vectors at positions ind_best
813        for j in 0..t {
814            for i in 0..n {
815                x[j][i] = F::sparse_zero();
816            }
817            x[j][ind_best[j]] = F::sparse_one();
818        }
819
820        // Update estimate
821        let w_norm = onenorm_vec(&w);
822        if w_norm > est {
823            est = w_norm;
824        }
825    }
826
827    Ok(est)
828}
829
830// Helper functions
831
832/// Compute the exact 1-norm of a matrix
833#[allow(dead_code)]
834fn exact_onenorm<F>(a: &CsrMatrix<F>) -> SparseResult<F>
835where
836    F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
837{
838    let n = a.cols();
839    let mut max_norm = F::sparse_zero();
840
841    for j in 0..n {
842        let mut col_sum = F::sparse_zero();
843        for i in 0..a.rows() {
844            let val = a.get(i, j);
845            if val != F::sparse_zero() {
846                col_sum += val.abs();
847            }
848        }
849        if col_sum > max_norm {
850            max_norm = col_sum;
851        }
852    }
853
854    Ok(max_norm)
855}
856
857/// Compute the 1-norm of a vector
858#[allow(dead_code)]
859fn onenorm_vec<F: Float + Sum>(x: &[F]) -> F {
860    x.iter().map(|&xi| xi.abs()).sum()
861}
862
863/// Compute the dot product of two vectors
864#[allow(dead_code)]
865fn dot<F: Float + Sum>(x: &[F], y: &[F]) -> F {
866    x.iter().zip(y).map(|(&xi, &yi)| xi * yi).sum()
867}
868
869/// Compute the 2-norm of a vector
870#[allow(dead_code)]
871fn norm2<F: Float + Sum>(x: &[F]) -> F {
872    dot(x, x).sqrt()
873}
874
875/// Compute the infinity norm of a matrix
876#[allow(dead_code)]
877fn matrix_norm_inf<F: Float + SparseElement>(a: &[Vec<F>]) -> F {
878    let mut max_norm = F::sparse_zero();
879    for row in a {
880        let row_sum: F = row
881            .iter()
882            .map(|&x| x.abs())
883            .fold(F::sparse_zero(), |a, b| a + b);
884        if row_sum > max_norm {
885            max_norm = row_sum;
886        }
887    }
888    max_norm
889}
890
891/// Create an identity matrix
892#[allow(dead_code)]
893fn identity_matrix<F: Float + SparseElement>(n: usize) -> Vec<Vec<F>> {
894    let mut identity = vec![vec![F::sparse_zero(); n]; n];
895    for (i, row) in identity.iter_mut().enumerate().take(n) {
896        row[i] = F::sparse_one();
897    }
898    identity
899}
900
901/// Create a zero matrix
902#[allow(dead_code)]
903fn zero_matrix<F: Float + SparseElement>(n: usize) -> Vec<Vec<F>> {
904    vec![vec![F::sparse_zero(); n]; n]
905}
906
907/// Add a scaled matrix to another: A += alpha * B
908#[allow(dead_code)]
909fn add_scaled_matrix<F: Float + NumAssign>(a: &mut [Vec<F>], b: &[Vec<F>], alpha: F) {
910    let n = a.len();
911    for i in 0..n {
912        for j in 0..n {
913            a[i][j] += alpha * b[i][j];
914        }
915    }
916}
917
918/// Multiply two dense matrices
919#[allow(dead_code)]
920fn matrix_multiply_dense<F: Float + NumAssign + SparseElement>(
921    a: &[Vec<F>],
922    b: &[Vec<F>],
923) -> SparseResult<Vec<Vec<F>>> {
924    let n = a.len();
925    let mut c = vec![vec![F::sparse_zero(); n]; n];
926
927    for (i, c_row) in c.iter_mut().enumerate().take(n) {
928        for (j, c_val) in c_row.iter_mut().enumerate().take(n) {
929            for (k, &a_val) in a[i].iter().enumerate().take(n) {
930                *c_val += a_val * b[k][j];
931            }
932        }
933    }
934
935    Ok(c)
936}
937
938/// Solve the matrix equation AX = B for X
939#[allow(dead_code)]
940fn solve_matrix_equation<F: Float + NumAssign + SparseElement>(
941    a: &[Vec<F>],
942    b: &[Vec<F>],
943) -> SparseResult<Vec<Vec<F>>> {
944    let n = a.len();
945
946    // LU decomposition
947    let mut l = vec![vec![F::sparse_zero(); n]; n];
948    let mut u = a.to_vec();
949
950    for (i, l_row) in l.iter_mut().enumerate().take(n) {
951        l_row[i] = F::sparse_one();
952    }
953
954    // Gaussian elimination
955    if n > 1 {
956        for k in 0..n - 1 {
957            for i in k + 1..n {
958                if u[k][k].abs() < F::epsilon() {
959                    return Err(crate::error::SparseError::SingularMatrix(
960                        "Matrix is singular".to_string(),
961                    ));
962                }
963                let factor = u[i][k] / u[k][k];
964                l[i][k] = factor;
965                for j in k..n {
966                    u[i][j] = u[i][j] - factor * u[k][j];
967                }
968            }
969        }
970    }
971
972    // Solve LY = B for Y
973    let mut y = vec![vec![F::sparse_zero(); n]; n];
974    for j in 0..n {
975        for i in 0..n {
976            let mut sum = b[i][j];
977            for (k, &l_val) in l[i].iter().enumerate().take(i) {
978                sum -= l_val * y[k][j];
979            }
980            y[i][j] = sum;
981        }
982    }
983
984    // Solve UX = Y for X
985    let mut x = vec![vec![F::sparse_zero(); n]; n];
986    for j in 0..n {
987        for i in (0..n).rev() {
988            let mut sum = y[i][j];
989            for (k, &u_val) in u[i].iter().enumerate().skip(i + 1).take(n - i - 1) {
990                sum -= u_val * x[k][j];
991            }
992            if u[i][i].abs() < F::epsilon() {
993                return Err(crate::error::SparseError::SingularMatrix(
994                    "Matrix is singular".to_string(),
995                ));
996            }
997            x[i][j] = sum / u[i][i];
998        }
999    }
1000
1001    Ok(x)
1002}
1003
1004/// Enhanced 2-norm estimation for sparse arrays using power iteration
1005///
1006/// This function estimates ||A||_2 = σ_max(A), the largest singular value of A,
1007/// using power iteration on A^T * A. This version works with the newer CsrArray
1008/// and SparseArray types and includes improved convergence criteria.
1009///
1010/// # Arguments
1011///
1012/// * `a` - The sparse array (must implement SparseArray trait)
1013/// * `tol` - Convergence tolerance (default: 1e-8)
1014/// * `maxiter` - Maximum number of power iterations (default: 100)
1015/// * `initial_guess` - Optional initial guess vector
1016///
1017/// # Returns
1018///
1019/// An estimate of the 2-norm of the matrix
1020///
1021/// # Examples
1022///
1023/// ```no_run
1024/// use scirs2_sparse::csr_array::CsrArray;
1025/// use scirs2_sparse::linalg::twonormest_enhanced;
1026///
1027/// let rows = vec![0, 1];
1028/// let cols = vec![0, 1];
1029/// let data = vec![2.0, 3.0];
1030/// let matrix = CsrArray::from_triplets(&rows, &cols, &data, (2, 2), false).unwrap();
1031///
1032/// let norm_estimate = twonormest_enhanced(&matrix, None, Some(1), None).unwrap();
1033/// ```
1034#[allow(dead_code)]
1035pub fn twonormest_enhanced<T, S>(
1036    a: &S,
1037    tol: Option<T>,
1038    maxiter: Option<usize>,
1039    initial_guess: Option<ArrayView1<T>>,
1040) -> SparseResult<T>
1041where
1042    T: Float
1043        + SparseElement
1044        + NumAssign
1045        + Sum
1046        + Debug
1047        + Copy
1048        + Add<Output = T>
1049        + Sub<Output = T>
1050        + Mul<Output = T>
1051        + Div<Output = T>
1052        + 'static
1053        + scirs2_core::simd_ops::SimdUnifiedOps
1054        + Send
1055        + Sync,
1056    S: SparseArray<T>,
1057{
1058    let (m, n) = a.shape();
1059    let tol = tol.unwrap_or_else(|| T::from(1e-8).unwrap());
1060    let maxiter = maxiter.unwrap_or(100);
1061
1062    if m == 0 || n == 0 {
1063        return Ok(T::sparse_zero());
1064    }
1065
1066    // For very small matrices, use more accurate computation
1067    if n <= 4 && m <= 4 {
1068        return exact_twonorm_enhanced(a);
1069    }
1070
1071    // Initialize with provided _guess or random unit vector
1072    let mut v = match initial_guess {
1073        Some(_guess) => {
1074            if _guess.len() != n {
1075                return Err(crate::error::SparseError::DimensionMismatch {
1076                    expected: n,
1077                    found: _guess.len(),
1078                });
1079            }
1080            _guess.to_owned()
1081        }
1082        None => {
1083            let mut rng = scirs2_core::random::rng();
1084            let mut v_arr = Array1::zeros(n);
1085            for i in 0..n {
1086                v_arr[i] = T::from(rng.random::<f64>() - 0.5).unwrap();
1087            }
1088            v_arr
1089        }
1090    };
1091
1092    // Normalize initial vector
1093    let v_norm = (v.iter().map(|&x| x * x).sum::<T>()).sqrt();
1094    if v_norm == T::sparse_zero() {
1095        return Ok(T::sparse_zero());
1096    }
1097    for i in 0..n {
1098        v[i] /= v_norm;
1099    }
1100
1101    let mut lambda = T::sparse_zero();
1102    let mut lambda_old = T::sparse_zero();
1103    let mut _converged = false;
1104
1105    for iter in 0..maxiter {
1106        // Compute w = A * v using sparse matrix-vector product
1107        let w = sparse_matvec(a, &v.view())?;
1108
1109        // Compute u = A^T * w using sparse matrix-vector product with transpose
1110        let u = sparse_matvec_transpose(a, &w.view())?;
1111
1112        // Estimate eigenvalue: λ = v^T * u (Rayleigh quotient for A^T * A)
1113        lambda = v.iter().zip(u.iter()).map(|(&vi, &ui)| vi * ui).sum();
1114
1115        // Normalize u to get new v
1116        let u_norm = (u.iter().map(|&x| x * x).sum::<T>()).sqrt();
1117        if u_norm == T::sparse_zero() {
1118            break;
1119        }
1120
1121        for i in 0..n {
1122            v[i] = u[i] / u_norm;
1123        }
1124
1125        // Check convergence using relative change
1126        if iter > 0 {
1127            let rel_change = if lambda_old != T::sparse_zero() {
1128                ((lambda - lambda_old) / lambda_old).abs()
1129            } else {
1130                lambda.abs()
1131            };
1132
1133            if rel_change < tol {
1134                _converged = true;
1135                break;
1136            }
1137        }
1138
1139        lambda_old = lambda;
1140    }
1141
1142    // The 2-norm is the square root of the largest eigenvalue of A^T * A
1143    Ok(lambda.sqrt())
1144}
1145
1146/// Enhanced condition number estimation for sparse arrays
1147///
1148/// This function estimates cond(A) = ||A||_2 * ||A^(-1)||_2 using enhanced norm estimation.
1149/// It provides better accuracy and works with the newer SparseArray trait.
1150///
1151/// # Arguments
1152///
1153/// * `a` - The sparse array (must be square)
1154/// * `norm_type` - The norm to use: "1" for 1-norm, "2" for 2-norm (default: "2")
1155/// * `tol` - Convergence tolerance (default: 1e-8)
1156/// * `maxiter` - Maximum number of iterations (default: 100)
1157///
1158/// # Returns
1159///
1160/// An estimate of the condition number
1161///
1162/// # Examples
1163///
1164/// ```no_run
1165/// use scirs2_sparse::csr_array::CsrArray;
1166/// use scirs2_sparse::linalg::condest_enhanced;
1167///
1168/// let rows = vec![0, 1];
1169/// let cols = vec![0, 1];
1170/// let data = vec![2.0, 3.0];
1171/// let matrix = CsrArray::from_triplets(&rows, &cols, &data, (2, 2), false).unwrap();
1172///
1173/// let cond_estimate = condest_enhanced(&matrix, None, None, Some(1)).unwrap();
1174/// ```
1175#[allow(dead_code)]
1176pub fn condest_enhanced<T, S>(
1177    a: &S,
1178    norm_type: Option<&str>,
1179    tol: Option<T>,
1180    maxiter: Option<usize>,
1181) -> SparseResult<T>
1182where
1183    T: Float
1184        + SparseElement
1185        + NumAssign
1186        + Sum
1187        + Debug
1188        + Copy
1189        + Add<Output = T>
1190        + Sub<Output = T>
1191        + Mul<Output = T>
1192        + Div<Output = T>
1193        + 'static
1194        + scirs2_core::simd_ops::SimdUnifiedOps
1195        + Send
1196        + Sync,
1197    S: SparseArray<T>,
1198{
1199    let (m, n) = a.shape();
1200    if m != n {
1201        return Err(crate::error::SparseError::ValueError(
1202            "Condition number estimation requires a square matrix".to_string(),
1203        ));
1204    }
1205
1206    let norm_type = norm_type.unwrap_or("2");
1207    let tol = tol.unwrap_or_else(|| T::from(1e-8).unwrap());
1208    let maxiter = maxiter.unwrap_or(100);
1209
1210    // Estimate ||A||
1211    let norm_a = match norm_type {
1212        "2" => twonormest_enhanced(a, Some(tol), Some(maxiter), None)?,
1213        "1" => onenormest_enhanced(a, None, None)?,
1214        _ => {
1215            return Err(crate::error::SparseError::ValueError(
1216                "norm_type must be '1' or '2'".to_string(),
1217            ))
1218        }
1219    };
1220
1221    if norm_a == T::sparse_zero() {
1222        return Ok(T::infinity());
1223    }
1224
1225    // Estimate ||A^(-1)|| using inverse power iteration
1226    let norm_a_inv = estimate_inverse_norm_enhanced(a, norm_type, tol, maxiter)?;
1227
1228    Ok(norm_a * norm_a_inv)
1229}
1230
1231/// Enhanced 1-norm estimation for sparse arrays
1232///
1233/// This function estimates ||A||_1 using a randomized algorithm optimized for sparse matrices.
1234/// It works with the newer SparseArray trait and includes performance improvements.
1235///
1236/// # Arguments
1237///
1238/// * `a` - The sparse array
1239/// * `t` - Number of random vectors to use (default: 2)
1240/// * `itmax` - Maximum number of iterations (default: 5)
1241///
1242/// # Returns
1243///
1244/// An estimate of the 1-norm of the matrix
1245#[allow(dead_code)]
1246pub fn onenormest_enhanced<T, S>(a: &S, t: Option<usize>, itmax: Option<usize>) -> SparseResult<T>
1247where
1248    T: Float
1249        + SparseElement
1250        + NumAssign
1251        + Sum
1252        + Debug
1253        + Copy
1254        + Add<Output = T>
1255        + Sub<Output = T>
1256        + Mul<Output = T>
1257        + Div<Output = T>
1258        + 'static,
1259    S: SparseArray<T>,
1260{
1261    let (_m, n) = a.shape();
1262    let t = t.unwrap_or(2);
1263    let itmax = itmax.unwrap_or(5);
1264
1265    if n == 0 {
1266        return Ok(T::sparse_zero());
1267    }
1268
1269    // Handle small matrices exactly
1270    if n <= 4 {
1271        return exact_onenorm_enhanced(a);
1272    }
1273
1274    // Initialize with random ±1 vectors
1275    let mut rng = scirs2_core::random::rng();
1276    let mut x_vectors = Vec::with_capacity(t);
1277
1278    for _ in 0..t {
1279        let mut x = Array1::zeros(n);
1280        for i in 0..n {
1281            x[i] = if rng.random::<bool>() {
1282                T::sparse_one()
1283            } else {
1284                -T::sparse_one()
1285            };
1286        }
1287        x_vectors.push(x);
1288    }
1289
1290    // First vector is the all-ones vector for better convergence
1291    if !x_vectors.is_empty() {
1292        for i in 0..n {
1293            x_vectors[0][i] = T::sparse_one();
1294        }
1295    }
1296
1297    let mut est = T::sparse_zero();
1298    let mut est_old = T::sparse_zero();
1299
1300    for _iter in 0..itmax {
1301        // Compute Y = A^T * X
1302        let mut y_vectors = Vec::with_capacity(t);
1303        for x in &x_vectors {
1304            let y = sparse_matvec_transpose(a, &x.view())?;
1305            y_vectors.push(y);
1306        }
1307
1308        // Find the column of Y with maximum 1-norm
1309        let mut max_norm = T::sparse_zero();
1310        let mut max_col = 0;
1311        for (j, y) in y_vectors.iter().enumerate() {
1312            let norm = y.iter().map(|&val| val.abs()).sum();
1313            if norm > max_norm {
1314                max_norm = norm;
1315                max_col = j;
1316            }
1317        }
1318
1319        est = max_norm;
1320
1321        // Check convergence
1322        if est <= est_old {
1323            break;
1324        }
1325        est_old = est;
1326
1327        // Update X based on the signs of Y[max_col]
1328        x_vectors.clear();
1329        let mut x = Array1::zeros(n);
1330        for i in 0..n {
1331            x[i] = if y_vectors[max_col][i] >= T::sparse_zero() {
1332                T::sparse_one()
1333            } else {
1334                -T::sparse_one()
1335            };
1336        }
1337        x_vectors.push(x);
1338
1339        // Add additional random vectors if needed
1340        for _ in 1..t {
1341            let mut x = Array1::zeros(n);
1342            for i in 0..n {
1343                x[i] = if rng.random::<bool>() {
1344                    T::sparse_one()
1345                } else {
1346                    -T::sparse_one()
1347                };
1348            }
1349            x_vectors.push(x);
1350        }
1351    }
1352
1353    Ok(est)
1354}
1355
1356/// Estimate ||A^(-1)|| using enhanced inverse power iteration
1357#[allow(dead_code)]
1358fn estimate_inverse_norm_enhanced<T, S>(
1359    a: &S,
1360    norm_type: &str,
1361    tol: T,
1362    maxiter: usize,
1363) -> SparseResult<T>
1364where
1365    T: Float
1366        + SparseElement
1367        + NumAssign
1368        + Sum
1369        + Debug
1370        + Copy
1371        + Add<Output = T>
1372        + Sub<Output = T>
1373        + Mul<Output = T>
1374        + Div<Output = T>
1375        + 'static
1376        + scirs2_core::simd_ops::SimdUnifiedOps
1377        + Send
1378        + Sync,
1379    S: SparseArray<T>,
1380{
1381    // For both 1-norm and 2-norm, estimate smallest singular value of A
1382    let sigma_min = estimate_smallest_singular_value_enhanced(a, tol, maxiter)?;
1383
1384    if sigma_min == T::sparse_zero() {
1385        Ok(T::infinity())
1386    } else {
1387        Ok(T::sparse_one() / sigma_min)
1388    }
1389}
1390
1391/// Estimate the smallest singular value using enhanced inverse power iteration
1392#[allow(dead_code)]
1393fn estimate_smallest_singular_value_enhanced<T, S>(a: &S, tol: T, maxiter: usize) -> SparseResult<T>
1394where
1395    T: Float
1396        + SparseElement
1397        + NumAssign
1398        + Sum
1399        + Debug
1400        + Copy
1401        + Add<Output = T>
1402        + Sub<Output = T>
1403        + Mul<Output = T>
1404        + Div<Output = T>
1405        + 'static
1406        + scirs2_core::simd_ops::SimdUnifiedOps
1407        + Send
1408        + Sync,
1409    S: SparseArray<T>,
1410{
1411    let (_, n) = a.shape();
1412
1413    // Initialize with a random unit vector
1414    let mut rng = scirs2_core::random::rng();
1415    let mut v = Array1::zeros(n);
1416    for i in 0..n {
1417        v[i] = T::from(rng.random::<f64>() - 0.5).unwrap();
1418    }
1419
1420    // Normalize initial vector
1421    let v_norm = (v.iter().map(|&x| x * x).sum::<T>()).sqrt();
1422    if v_norm == T::sparse_zero() {
1423        return Ok(T::sparse_zero());
1424    }
1425    for i in 0..n {
1426        v[i] /= v_norm;
1427    }
1428
1429    let mut lambda = T::sparse_zero();
1430    let mut lambda_old = T::infinity();
1431
1432    for iter in 0..maxiter {
1433        // We want to find the smallest eigenvalue of A^T * A
1434        // Using inverse iteration: solve (A^T * A) * x = v for x
1435        let u = solve_ata_system_enhanced(a, &v, 10)?; // 10 inner iterations
1436
1437        // Normalize u
1438        let u_norm = (u.iter().map(|&x| x * x).sum::<T>()).sqrt();
1439        if u_norm == T::sparse_zero() {
1440            break;
1441        }
1442
1443        for i in 0..n {
1444            v[i] = u[i] / u_norm;
1445        }
1446
1447        // Estimate eigenvalue: λ = v^T * (A^T * A) * v using Rayleigh quotient
1448        lambda = estimate_rayleigh_quotient_enhanced(a, &v)?;
1449
1450        // Check convergence
1451        if iter > 0 {
1452            let rel_change = if lambda != T::sparse_zero() {
1453                ((lambda - lambda_old) / lambda).abs()
1454            } else {
1455                T::infinity()
1456            };
1457
1458            if rel_change < tol {
1459                break;
1460            }
1461        }
1462
1463        lambda_old = lambda;
1464    }
1465
1466    // The smallest singular value is the square root of the smallest eigenvalue of A^T * A
1467    Ok(lambda.sqrt())
1468}
1469
1470/// Enhanced sparse matrix-vector product
1471#[allow(dead_code)]
1472fn sparse_matvec<T, S>(a: &S, x: &ArrayView1<T>) -> SparseResult<Array1<T>>
1473where
1474    T: Float + SparseElement + Debug + Copy + Add<Output = T> + Mul<Output = T> + 'static,
1475    S: SparseArray<T>,
1476{
1477    let (m, n) = a.shape();
1478    if x.len() != n {
1479        return Err(crate::error::SparseError::DimensionMismatch {
1480            expected: n,
1481            found: x.len(),
1482        });
1483    }
1484
1485    let mut result = Array1::zeros(m);
1486    let (row_indices, col_indices, values) = a.find();
1487
1488    for (k, (&i, &j)) in row_indices.iter().zip(col_indices.iter()).enumerate() {
1489        result[i] = result[i] + values[k] * x[j];
1490    }
1491
1492    Ok(result)
1493}
1494
1495/// Enhanced sparse matrix-vector product with transpose
1496#[allow(dead_code)]
1497fn sparse_matvec_transpose<T, S>(a: &S, x: &ArrayView1<T>) -> SparseResult<Array1<T>>
1498where
1499    T: Float + SparseElement + Debug + Copy + Add<Output = T> + Mul<Output = T> + 'static,
1500    S: SparseArray<T>,
1501{
1502    let (m, n) = a.shape();
1503    if x.len() != m {
1504        return Err(crate::error::SparseError::DimensionMismatch {
1505            expected: m,
1506            found: x.len(),
1507        });
1508    }
1509
1510    let mut result = Array1::zeros(n);
1511    let (row_indices, col_indices, values) = a.find();
1512
1513    for (k, (&i, &j)) in row_indices.iter().zip(col_indices.iter()).enumerate() {
1514        result[j] = result[j] + values[k] * x[i];
1515    }
1516
1517    Ok(result)
1518}
1519
1520/// Approximately solve (A^T * A) * x = b using enhanced iterative method
1521#[allow(dead_code)]
1522fn solve_ata_system_enhanced<T, S>(
1523    a: &S,
1524    b: &Array1<T>,
1525    num_iterations: usize,
1526) -> SparseResult<Array1<T>>
1527where
1528    T: Float
1529        + SparseElement
1530        + Debug
1531        + Copy
1532        + Add<Output = T>
1533        + Sub<Output = T>
1534        + Mul<Output = T>
1535        + Div<Output = T>
1536        + 'static
1537        + scirs2_core::simd_ops::SimdUnifiedOps
1538        + Send
1539        + Sync,
1540    S: SparseArray<T>,
1541{
1542    let (_, n) = a.shape();
1543    let mut x = b.clone(); // Initial guess
1544
1545    for _ in 0..num_iterations {
1546        // Compute r = A^T * A * x
1547        let ax = sparse_matvec(a, &x.view())?;
1548        let ata_x = sparse_matvec_transpose(a, &ax.view())?;
1549
1550        // Simple iteration: x = x - α * (A^T * A * x - b)
1551        let alpha = T::from(0.1).unwrap(); // Conservative step size
1552        for i in 0..n {
1553            x[i] = x[i] - alpha * (ata_x[i] - b[i]);
1554        }
1555    }
1556
1557    Ok(x)
1558}
1559
1560/// Estimate the Rayleigh quotient v^T * (A^T * A) * v with enhanced accuracy
1561#[allow(dead_code)]
1562fn estimate_rayleigh_quotient_enhanced<T, S>(a: &S, v: &Array1<T>) -> SparseResult<T>
1563where
1564    T: Float
1565        + SparseElement
1566        + Debug
1567        + Copy
1568        + Add<Output = T>
1569        + Mul<Output = T>
1570        + 'static
1571        + std::iter::Sum,
1572    S: SparseArray<T>,
1573{
1574    // Compute A * v
1575    let av = sparse_matvec(a, &v.view())?;
1576
1577    // Compute A^T * (A * v)
1578    let ata_v = sparse_matvec_transpose(a, &av.view())?;
1579
1580    // Return v^T * (A^T * A * v)
1581    Ok(v.iter().zip(ata_v.iter()).map(|(&vi, &ai)| vi * ai).sum())
1582}
1583
1584/// Compute exact 2-norm for small sparse arrays
1585#[allow(dead_code)]
1586fn exact_twonorm_enhanced<T, S>(a: &S) -> SparseResult<T>
1587where
1588    T: Float
1589        + SparseElement
1590        + NumAssign
1591        + Sum
1592        + Debug
1593        + Copy
1594        + Add<Output = T>
1595        + Sub<Output = T>
1596        + Mul<Output = T>
1597        + Div<Output = T>
1598        + 'static
1599        + scirs2_core::simd_ops::SimdUnifiedOps
1600        + Send
1601        + Sync,
1602    S: SparseArray<T>,
1603{
1604    let (m, n) = a.shape();
1605    let min_dim = m.min(n);
1606
1607    if min_dim == 0 {
1608        return Ok(T::sparse_zero());
1609    }
1610
1611    if min_dim == 1 {
1612        // For 1D case, compute the norm of all entries
1613        let (_, _, values) = a.find();
1614        let max_val = values
1615            .iter()
1616            .map(|&v| v.abs())
1617            .fold(T::sparse_zero(), |acc, v| if v > acc { v } else { acc });
1618        return Ok(max_val);
1619    }
1620
1621    // For small matrices, use high-precision power iteration
1622    twonormest_enhanced(a, Some(T::from(1e-12).unwrap()), Some(1000), None)
1623}
1624
1625/// Compute exact 1-norm for small sparse arrays
1626#[allow(dead_code)]
1627fn exact_onenorm_enhanced<T, S>(a: &S) -> SparseResult<T>
1628where
1629    T: Float + SparseElement + Debug + Copy + Add<Output = T> + 'static,
1630    S: SparseArray<T>,
1631{
1632    let (_, n) = a.shape();
1633    let mut max_col_sum = T::sparse_zero();
1634
1635    for j in 0..n {
1636        let mut col_sum = T::sparse_zero();
1637        let (row_indices, col_indices, values) = a.find();
1638
1639        for (k, (&_i, &col)) in row_indices.iter().zip(col_indices.iter()).enumerate() {
1640            if col == j {
1641                col_sum = col_sum + values[k].abs();
1642            }
1643        }
1644
1645        if col_sum > max_col_sum {
1646            max_col_sum = col_sum;
1647        }
1648    }
1649
1650    Ok(max_col_sum)
1651}
1652
1653#[cfg(test)]
1654mod tests {
1655    use super::*;
1656    use crate::linalg::interface::{IdentityOperator, ScaledIdentityOperator};
1657
1658    #[test]
1659    fn test_expm_multiply_identity() {
1660        // exp(t*I) * v = exp(t) * v
1661        let identity = IdentityOperator::<f64>::new(3);
1662        let v = vec![1.0, 2.0, 3.0];
1663        let t = 1.0;
1664
1665        let result = expm_multiply(&identity, &v, t, None, None).unwrap();
1666
1667        let exp_t = t.exp();
1668        let expected: Vec<f64> = v.iter().map(|&vi| exp_t * vi).collect();
1669
1670        println!("Identity test - result: {:?}", result);
1671        println!("Identity test - expected: {:?}", expected);
1672        println!("Identity test - exp(t): {}", exp_t);
1673
1674        for (ri, ei) in result.iter().zip(&expected) {
1675            assert!((ri - ei).abs() < 1e-10);
1676        }
1677    }
1678
1679    #[test]
1680    fn test_expm_multiply_scaled_identity() {
1681        // exp(t*alpha*I) * v = exp(t*alpha) * v
1682        let alpha = 2.0;
1683        let scaled_identity = ScaledIdentityOperator::new(3, alpha);
1684        let v = vec![1.0, 2.0, 3.0];
1685        let t = 0.5;
1686
1687        let result = expm_multiply(&scaled_identity, &v, t, None, None).unwrap();
1688
1689        let exp_t_alpha = (t * alpha).exp();
1690        let expected: Vec<f64> = v.iter().map(|&vi| exp_t_alpha * vi).collect();
1691
1692        for (ri, ei) in result.iter().zip(&expected) {
1693            assert!((ri - ei).abs() < 1e-6);
1694        }
1695    }
1696
1697    #[test]
1698    fn test_expm_multiply_zero_time() {
1699        // exp(0*A) * v = v
1700        let identity = IdentityOperator::<f64>::new(3);
1701        let v = vec![1.0, 2.0, 3.0];
1702        let t = 0.0;
1703
1704        let result = expm_multiply(&identity, &v, t, None, None).unwrap();
1705
1706        for (ri, vi) in result.iter().zip(&v) {
1707            assert!((ri - vi).abs() < 1e-10);
1708        }
1709    }
1710
1711    #[test]
1712    fn test_onenormest_small_matrix() {
1713        // For small matrices, it should compute the exact norm
1714        let rows = vec![0, 1, 2];
1715        let cols = vec![0, 1, 2];
1716        let data = vec![2.0, 3.0, 4.0];
1717        let shape = (3, 3);
1718
1719        let matrix = CsrMatrix::new(data, rows, cols, shape).unwrap();
1720        let estimate = onenormest(&matrix, None, None).unwrap();
1721
1722        // For a diagonal matrix, the 1-norm is the maximum absolute diagonal element
1723        assert!((estimate - 4.0).abs() < 1e-10);
1724    }
1725}