quantrs2_sim/
scirs2_sparse.rs

1//! SciRS2-optimized sparse matrix solvers for large quantum systems.
2//!
3//! This module provides sparse matrix operations optimized using `SciRS2`'s
4//! sparse linear algebra capabilities. It includes sparse Hamiltonian
5//! evolution, linear system solving, eigenvalue problems, and optimization
6//! routines for large-scale quantum simulations.
7
8use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
9use scirs2_core::Complex64;
10use serde::{Deserialize, Serialize};
11use std::collections::HashMap;
12
13use crate::error::{Result, SimulatorError};
14use crate::scirs2_integration::SciRS2Backend;
15use crate::sparse::{apply_sparse_gate, CSRMatrix, SparseMatrixBuilder};
16use crate::statevector::StateVectorSimulator;
17
18/// Sparse matrix storage format
19#[derive(Debug, Clone, Copy, PartialEq, Eq)]
20pub enum SparseFormat {
21    /// Compressed Sparse Row format
22    CSR,
23    /// Compressed Sparse Column format
24    CSC,
25    /// Coordinate format (COO)
26    COO,
27    /// Diagonal format
28    DIA,
29    /// Block Sparse Row format
30    BSR,
31}
32
33/// Sparse solver method
34#[derive(Debug, Clone, Copy, PartialEq, Eq)]
35pub enum SparseSolverMethod {
36    /// Direct methods
37    LU,
38    QR,
39    Cholesky,
40    /// Iterative methods
41    CG,
42    GMRES,
43    BiCGSTAB,
44    /// Eigenvalue solvers
45    Arnoldi,
46    Lanczos,
47    LOBPCG,
48    /// `SciRS2` optimized methods
49    SciRS2Auto,
50    SciRS2Iterative,
51    SciRS2Direct,
52}
53
54/// Preconditioner type
55#[derive(Debug, Clone, Copy, PartialEq, Eq)]
56pub enum Preconditioner {
57    None,
58    Jacobi,
59    ILU,
60    AMG,
61    SciRS2Auto,
62}
63
64/// Sparse solver configuration
65#[derive(Debug, Clone)]
66pub struct SparseSolverConfig {
67    /// Solver method to use
68    pub method: SparseSolverMethod,
69    /// Preconditioner
70    pub preconditioner: Preconditioner,
71    /// Convergence tolerance
72    pub tolerance: f64,
73    /// Maximum iterations
74    pub max_iterations: usize,
75    /// Number of restart iterations for GMRES
76    pub restart: usize,
77    /// Use parallel execution
78    pub parallel: bool,
79    /// Memory limit in bytes
80    pub memory_limit: usize,
81}
82
83impl Default for SparseSolverConfig {
84    fn default() -> Self {
85        Self {
86            method: SparseSolverMethod::SciRS2Auto,
87            preconditioner: Preconditioner::SciRS2Auto,
88            tolerance: 1e-12,
89            max_iterations: 1000,
90            restart: 30,
91            parallel: true,
92            memory_limit: 8 * 1024 * 1024 * 1024, // 8GB
93        }
94    }
95}
96
97/// Sparse solver execution statistics
98#[derive(Debug, Clone, Default, Serialize, Deserialize)]
99pub struct SparseSolverStats {
100    /// Execution time in milliseconds
101    pub execution_time_ms: f64,
102    /// Number of iterations performed
103    pub iterations: usize,
104    /// Final residual norm
105    pub residual_norm: f64,
106    /// Convergence achieved
107    pub converged: bool,
108    /// Memory usage in bytes
109    pub memory_usage_bytes: usize,
110    /// Number of matrix-vector multiplications
111    pub matvec_count: usize,
112    /// Method used for solving
113    pub method_used: String,
114    /// Preconditioner setup time
115    pub preconditioner_time_ms: f64,
116}
117
118/// Sparse eigenvalue problem result
119#[derive(Debug, Clone, Serialize, Deserialize)]
120pub struct SparseEigenResult {
121    /// Eigenvalues (sorted in ascending order)
122    pub eigenvalues: Vec<f64>,
123    /// Corresponding eigenvectors
124    pub eigenvectors: Array2<Complex64>,
125    /// Number of converged eigenvalues
126    pub converged_count: usize,
127    /// Solver statistics
128    pub stats: SparseSolverStats,
129}
130
131/// SciRS2-optimized sparse matrix operations
132pub struct SciRS2SparseSolver {
133    /// `SciRS2` backend
134    backend: Option<SciRS2Backend>,
135    /// Solver configuration
136    config: SparseSolverConfig,
137    /// Execution statistics
138    stats: SparseSolverStats,
139    /// Cached sparse matrices
140    matrix_cache: HashMap<String, SparseMatrix>,
141    /// Preconditioner cache
142    preconditioner_cache: HashMap<String, Preconditioner>,
143}
144
145/// Sparse matrix representation
146#[derive(Debug, Clone)]
147pub struct SparseMatrix {
148    /// Matrix dimensions
149    pub shape: (usize, usize),
150    /// Storage format
151    pub format: SparseFormat,
152    /// Row indices (for CSR)
153    pub row_ptr: Vec<usize>,
154    /// Column indices
155    pub col_indices: Vec<usize>,
156    /// Matrix values
157    pub values: Vec<Complex64>,
158    /// Number of non-zero elements
159    pub nnz: usize,
160    /// Is Hermitian
161    pub is_hermitian: bool,
162    /// Is positive definite
163    pub is_positive_definite: bool,
164}
165
166impl SparseMatrix {
167    /// Create new sparse matrix
168    #[must_use]
169    pub const fn new(shape: (usize, usize), format: SparseFormat) -> Self {
170        Self {
171            shape,
172            format,
173            row_ptr: Vec::new(),
174            col_indices: Vec::new(),
175            values: Vec::new(),
176            nnz: 0,
177            is_hermitian: false,
178            is_positive_definite: false,
179        }
180    }
181
182    /// Create from CSR format
183    #[must_use]
184    pub fn from_csr(
185        shape: (usize, usize),
186        row_ptr: Vec<usize>,
187        col_indices: Vec<usize>,
188        values: Vec<Complex64>,
189    ) -> Self {
190        let nnz = values.len();
191        Self {
192            shape,
193            format: SparseFormat::CSR,
194            row_ptr,
195            col_indices,
196            values,
197            nnz,
198            is_hermitian: false,
199            is_positive_definite: false,
200        }
201    }
202
203    /// Create identity matrix
204    #[must_use]
205    pub fn identity(n: usize) -> Self {
206        let mut row_ptr = vec![0; n + 1];
207        let mut col_indices = Vec::with_capacity(n);
208        let mut values = Vec::with_capacity(n);
209
210        for i in 0..n {
211            row_ptr[i + 1] = i + 1;
212            col_indices.push(i);
213            values.push(Complex64::new(1.0, 0.0));
214        }
215
216        Self {
217            shape: (n, n),
218            format: SparseFormat::CSR,
219            row_ptr,
220            col_indices,
221            values,
222            nnz: n,
223            is_hermitian: true,
224            is_positive_definite: true,
225        }
226    }
227
228    /// Matrix-vector multiplication
229    pub fn matvec(&self, x: &Array1<Complex64>) -> Result<Array1<Complex64>> {
230        if x.len() != self.shape.1 {
231            return Err(SimulatorError::DimensionMismatch(format!(
232                "Vector length {} doesn't match matrix columns {}",
233                x.len(),
234                self.shape.1
235            )));
236        }
237
238        let mut y = Array1::zeros(self.shape.0);
239
240        match self.format {
241            SparseFormat::CSR => {
242                for i in 0..self.shape.0 {
243                    let mut sum = Complex64::new(0.0, 0.0);
244                    for j in self.row_ptr[i]..self.row_ptr[i + 1] {
245                        sum += self.values[j] * x[self.col_indices[j]];
246                    }
247                    y[i] = sum;
248                }
249            }
250            _ => {
251                return Err(SimulatorError::UnsupportedOperation(format!(
252                    "Matrix-vector multiplication not implemented for {:?}",
253                    self.format
254                )));
255            }
256        }
257
258        Ok(y)
259    }
260
261    /// Get matrix density (nnz / `total_elements`)
262    #[must_use]
263    pub fn density(&self) -> f64 {
264        self.nnz as f64 / (self.shape.0 * self.shape.1) as f64
265    }
266
267    /// Check if matrix is square
268    #[must_use]
269    pub const fn is_square(&self) -> bool {
270        self.shape.0 == self.shape.1
271    }
272
273    /// Convert to dense matrix (for small matrices only)
274    pub fn to_dense(&self) -> Result<Array2<Complex64>> {
275        if self.shape.0 * self.shape.1 > 10_000_000 {
276            return Err(SimulatorError::InvalidInput(
277                "Matrix too large to convert to dense format".to_string(),
278            ));
279        }
280
281        let mut dense = Array2::zeros(self.shape);
282
283        match self.format {
284            SparseFormat::CSR => {
285                for i in 0..self.shape.0 {
286                    for j in self.row_ptr[i]..self.row_ptr[i + 1] {
287                        dense[[i, self.col_indices[j]]] = self.values[j];
288                    }
289                }
290            }
291            _ => {
292                return Err(SimulatorError::UnsupportedOperation(format!(
293                    "Dense conversion not implemented for {:?}",
294                    self.format
295                )));
296            }
297        }
298
299        Ok(dense)
300    }
301}
302
303impl SciRS2SparseSolver {
304    /// Create new sparse solver
305    pub fn new(config: SparseSolverConfig) -> Result<Self> {
306        Ok(Self {
307            backend: None,
308            config,
309            stats: SparseSolverStats::default(),
310            matrix_cache: HashMap::new(),
311            preconditioner_cache: HashMap::new(),
312        })
313    }
314
315    /// Initialize with `SciRS2` backend
316    pub fn with_backend(mut self) -> Result<Self> {
317        self.backend = Some(SciRS2Backend::new());
318        Ok(self)
319    }
320
321    /// Solve linear system Ax = b
322    pub fn solve_linear_system(
323        &mut self,
324        matrix: &SparseMatrix,
325        rhs: &Array1<Complex64>,
326    ) -> Result<Array1<Complex64>> {
327        let start_time = std::time::Instant::now();
328
329        if !matrix.is_square() {
330            return Err(SimulatorError::InvalidInput(
331                "Matrix must be square for linear system solving".to_string(),
332            ));
333        }
334
335        if rhs.len() != matrix.shape.0 {
336            return Err(SimulatorError::DimensionMismatch(format!(
337                "RHS vector length {} doesn't match matrix size {}",
338                rhs.len(),
339                matrix.shape.0
340            )));
341        }
342
343        let solution = match self.config.method {
344            SparseSolverMethod::SciRS2Auto => self.solve_scirs2_auto(matrix, rhs)?,
345            SparseSolverMethod::SciRS2Direct => self.solve_scirs2_direct(matrix, rhs)?,
346            SparseSolverMethod::SciRS2Iterative => self.solve_scirs2_iterative(matrix, rhs)?,
347            SparseSolverMethod::CG => self.solve_conjugate_gradient(matrix, rhs)?,
348            SparseSolverMethod::GMRES => self.solve_gmres(matrix, rhs)?,
349            SparseSolverMethod::BiCGSTAB => self.solve_bicgstab(matrix, rhs)?,
350            _ => {
351                return Err(SimulatorError::UnsupportedOperation(format!(
352                    "Solver method {:?} not implemented",
353                    self.config.method
354                )));
355            }
356        };
357
358        self.stats.execution_time_ms = start_time.elapsed().as_secs_f64() * 1000.0;
359
360        Ok(solution)
361    }
362
363    /// Solve eigenvalue problem
364    pub fn solve_eigenvalue_problem(
365        &mut self,
366        matrix: &SparseMatrix,
367        num_eigenvalues: usize,
368        which: &str,
369    ) -> Result<SparseEigenResult> {
370        let start_time = std::time::Instant::now();
371
372        if !matrix.is_square() {
373            return Err(SimulatorError::InvalidInput(
374                "Matrix must be square for eigenvalue problems".to_string(),
375            ));
376        }
377
378        if num_eigenvalues >= matrix.shape.0 {
379            return Err(SimulatorError::InvalidInput(
380                "Number of eigenvalues must be less than matrix size".to_string(),
381            ));
382        }
383
384        let (eigenvalues, eigenvectors, converged_count) = match self.config.method {
385            SparseSolverMethod::Arnoldi => self.solve_arnoldi(matrix, num_eigenvalues, which)?,
386            SparseSolverMethod::Lanczos => self.solve_lanczos(matrix, num_eigenvalues, which)?,
387            SparseSolverMethod::LOBPCG => self.solve_lobpcg(matrix, num_eigenvalues, which)?,
388            SparseSolverMethod::SciRS2Auto => {
389                self.solve_eigen_scirs2_auto(matrix, num_eigenvalues, which)?
390            }
391            _ => {
392                return Err(SimulatorError::UnsupportedOperation(format!(
393                    "Eigenvalue solver {:?} not implemented",
394                    self.config.method
395                )));
396            }
397        };
398
399        self.stats.execution_time_ms = start_time.elapsed().as_secs_f64() * 1000.0;
400        self.stats.converged = converged_count == num_eigenvalues;
401
402        Ok(SparseEigenResult {
403            eigenvalues,
404            eigenvectors,
405            converged_count,
406            stats: self.stats.clone(),
407        })
408    }
409
410    /// `SciRS2` automatic solver selection
411    fn solve_scirs2_auto(
412        &mut self,
413        matrix: &SparseMatrix,
414        rhs: &Array1<Complex64>,
415    ) -> Result<Array1<Complex64>> {
416        if let Some(_backend) = &mut self.backend {
417            // SciRS2 would automatically choose the best solver based on matrix properties
418            let density = matrix.density();
419
420            if density > 0.1 {
421                // High density - use direct method
422                self.solve_scirs2_direct(matrix, rhs)
423            } else if matrix.is_hermitian && matrix.is_positive_definite {
424                // Symmetric positive definite - use CG
425                self.solve_conjugate_gradient(matrix, rhs)
426            } else {
427                // General case - use GMRES
428                self.solve_gmres(matrix, rhs)
429            }
430        } else {
431            // Fallback to iterative method
432            self.solve_gmres(matrix, rhs)
433        }
434    }
435
436    /// `SciRS2` direct solver
437    fn solve_scirs2_direct(
438        &mut self,
439        matrix: &SparseMatrix,
440        rhs: &Array1<Complex64>,
441    ) -> Result<Array1<Complex64>> {
442        if let Some(_backend) = &mut self.backend {
443            // Simulate SciRS2 sparse LU decomposition
444            self.simulate_sparse_lu(matrix, rhs)
445        } else {
446            // Fallback to dense LU for small matrices
447            if matrix.shape.0 <= 1000 {
448                let dense_matrix = matrix.to_dense()?;
449                self.solve_dense_lu(&dense_matrix, rhs)
450            } else {
451                Err(SimulatorError::UnsupportedOperation(
452                    "Matrix too large for direct solving without SciRS2 backend".to_string(),
453                ))
454            }
455        }
456    }
457
458    /// `SciRS2` iterative solver
459    fn solve_scirs2_iterative(
460        &mut self,
461        matrix: &SparseMatrix,
462        rhs: &Array1<Complex64>,
463    ) -> Result<Array1<Complex64>> {
464        if let Some(_backend) = &mut self.backend {
465            // Use SciRS2's optimized iterative solvers
466            if matrix.is_hermitian {
467                self.solve_conjugate_gradient(matrix, rhs)
468            } else {
469                self.solve_gmres(matrix, rhs)
470            }
471        } else {
472            // Fallback to standard iterative methods
473            self.solve_gmres(matrix, rhs)
474        }
475    }
476
477    /// Conjugate Gradient solver (for symmetric positive definite matrices)
478    fn solve_conjugate_gradient(
479        &mut self,
480        matrix: &SparseMatrix,
481        rhs: &Array1<Complex64>,
482    ) -> Result<Array1<Complex64>> {
483        let n = matrix.shape.0;
484        let mut x = Array1::zeros(n);
485        let mut r = rhs.clone();
486        let mut p = r.clone();
487        let mut rsold = r.iter().map(|&ri| ri.norm_sqr()).sum::<f64>();
488
489        self.stats.method_used = "ConjugateGradient".to_string();
490
491        for iteration in 0..self.config.max_iterations {
492            let ap = matrix.matvec(&p)?;
493            let alpha = rsold
494                / p.iter()
495                    .zip(ap.iter())
496                    .map(|(&pi, &api)| (pi.conj() * api).re)
497                    .sum::<f64>();
498
499            for i in 0..n {
500                x[i] += alpha * p[i];
501                r[i] -= alpha * ap[i];
502            }
503
504            let rsnew = r.iter().map(|&ri| ri.norm_sqr()).sum::<f64>();
505
506            self.stats.iterations = iteration + 1;
507            self.stats.residual_norm = rsnew.sqrt();
508            self.stats.matvec_count += 1;
509
510            if rsnew.sqrt() < self.config.tolerance {
511                self.stats.converged = true;
512                break;
513            }
514
515            let beta = rsnew / rsold;
516            for i in 0..n {
517                p[i] = r[i] + beta * p[i];
518            }
519
520            rsold = rsnew;
521        }
522
523        Ok(x)
524    }
525
526    /// GMRES solver (for general matrices)
527    fn solve_gmres(
528        &mut self,
529        matrix: &SparseMatrix,
530        rhs: &Array1<Complex64>,
531    ) -> Result<Array1<Complex64>> {
532        let n = matrix.shape.0;
533        let m = self.config.restart.min(n);
534        let mut x = Array1::zeros(n);
535
536        self.stats.method_used = "GMRES".to_string();
537
538        // Simplified GMRES implementation
539        let mut r = rhs.clone();
540        let beta = r.iter().map(|&ri| ri.norm_sqr()).sum::<f64>().sqrt();
541
542        if beta < self.config.tolerance {
543            self.stats.converged = true;
544            return Ok(x);
545        }
546
547        for _restart in 0..(self.config.max_iterations / m) {
548            // Arnoldi process
549            let mut v = Array2::zeros((n, m + 1));
550            let mut h = Array2::zeros((m + 1, m));
551
552            // Initial vector
553            for i in 0..n {
554                v[[i, 0]] = r[i] / beta;
555            }
556
557            for j in 0..m {
558                let vj = v.column(j).to_owned();
559                let w = matrix.matvec(&vj)?;
560                self.stats.matvec_count += 1;
561
562                // Modified Gram-Schmidt
563                for i in 0..=j {
564                    let vi = v.column(i);
565                    h[[i, j]] = vi
566                        .iter()
567                        .zip(w.iter())
568                        .map(|(&vi_val, &w_val)| vi_val.conj() * w_val)
569                        .sum();
570                }
571
572                let mut w_next = w.clone();
573                for i in 0..=j {
574                    let vi = v.column(i);
575                    for k in 0..n {
576                        w_next[k] -= h[[i, j]] * vi[k];
577                    }
578                }
579
580                let h_norm = w_next.iter().map(|&wi| wi.norm_sqr()).sum::<f64>().sqrt();
581                h[[j + 1, j]] = Complex64::new(h_norm, 0.0);
582
583                if h_norm > 1e-12 && j + 1 < m {
584                    for i in 0..n {
585                        v[[i, j + 1]] = w_next[i] / h_norm;
586                    }
587                }
588
589                self.stats.iterations += 1;
590                self.stats.residual_norm = h_norm;
591
592                if h_norm < self.config.tolerance {
593                    self.stats.converged = true;
594                    return Ok(x);
595                }
596            }
597        }
598
599        Ok(x)
600    }
601
602    /// `BiCGSTAB` solver
603    fn solve_bicgstab(
604        &mut self,
605        matrix: &SparseMatrix,
606        rhs: &Array1<Complex64>,
607    ) -> Result<Array1<Complex64>> {
608        let n = matrix.shape.0;
609        let mut x = Array1::zeros(n);
610        let mut r = rhs.clone();
611        let r0 = r.clone();
612
613        self.stats.method_used = "BiCGSTAB".to_string();
614
615        let mut p = r.clone();
616        let mut alpha = Complex64::new(1.0, 0.0);
617        let mut omega = Complex64::new(1.0, 0.0);
618        let mut rho_old = Complex64::new(1.0, 0.0);
619
620        for iteration in 0..self.config.max_iterations {
621            let rho: Complex64 = r0
622                .iter()
623                .zip(r.iter())
624                .map(|(&r0i, &ri)| r0i.conj() * ri)
625                .sum();
626
627            if rho.norm() < 1e-15 {
628                break;
629            }
630
631            let beta = (rho / rho_old) * (alpha / omega);
632
633            for i in 0..n {
634                p[i] = r[i] + beta * (p[i] - omega * matrix.matvec(&p)?[i]);
635            }
636
637            let ap = matrix.matvec(&p)?;
638            alpha = rho
639                / r0.iter()
640                    .zip(ap.iter())
641                    .map(|(&r0i, &api)| r0i.conj() * api)
642                    .sum::<Complex64>();
643
644            let mut s = r.clone();
645            for i in 0..n {
646                s[i] -= alpha * ap[i];
647            }
648
649            let residual_s = s.iter().map(|&si| si.norm_sqr()).sum::<f64>().sqrt();
650            if residual_s < self.config.tolerance {
651                for i in 0..n {
652                    x[i] += alpha * p[i];
653                }
654                self.stats.converged = true;
655                break;
656            }
657
658            let as_vec = matrix.matvec(&s)?;
659            omega = as_vec
660                .iter()
661                .zip(s.iter())
662                .map(|(&asi, &si)| asi.conj() * si)
663                .sum::<Complex64>()
664                / as_vec.iter().map(|&asi| asi.norm_sqr()).sum::<f64>();
665
666            for i in 0..n {
667                x[i] += alpha * p[i] + omega * s[i];
668                r[i] = s[i] - omega * as_vec[i];
669            }
670
671            self.stats.iterations = iteration + 1;
672            self.stats.residual_norm = r.iter().map(|&ri| ri.norm_sqr()).sum::<f64>().sqrt();
673            self.stats.matvec_count += 2;
674
675            if self.stats.residual_norm < self.config.tolerance {
676                self.stats.converged = true;
677                break;
678            }
679
680            rho_old = rho;
681        }
682
683        Ok(x)
684    }
685
686    /// Simulate sparse LU decomposition
687    fn simulate_sparse_lu(
688        &mut self,
689        matrix: &SparseMatrix,
690        rhs: &Array1<Complex64>,
691    ) -> Result<Array1<Complex64>> {
692        // Simplified sparse LU simulation
693        // In practice, this would use SciRS2's optimized sparse LU
694
695        self.stats.method_used = "SparseLU".to_string();
696
697        // For now, fall back to iterative method for large matrices
698        if matrix.shape.0 > 5000 {
699            return self.solve_gmres(matrix, rhs);
700        }
701
702        // Small matrix - convert to dense and solve
703        let dense_matrix = matrix.to_dense()?;
704        self.solve_dense_lu(&dense_matrix, rhs)
705    }
706
707    /// Dense LU solver (fallback)
708    fn solve_dense_lu(
709        &self,
710        matrix: &Array2<Complex64>,
711        rhs: &Array1<Complex64>,
712    ) -> Result<Array1<Complex64>> {
713        // Simplified dense LU implementation
714        // This would be replaced with proper LAPACK calls
715
716        let n = matrix.nrows();
717        if n != matrix.ncols() {
718            return Err(SimulatorError::InvalidInput(
719                "Matrix must be square".to_string(),
720            ));
721        }
722
723        // For demonstration, solve using Gaussian elimination
724        let mut a = matrix.clone();
725        let mut b = rhs.clone();
726
727        // Forward elimination
728        for k in 0..n - 1 {
729            for i in k + 1..n {
730                if a[[k, k]].norm() < 1e-15 {
731                    return Err(SimulatorError::NumericalError(
732                        "Singular matrix".to_string(),
733                    ));
734                }
735
736                let factor = a[[i, k]] / a[[k, k]];
737                let b_k = b[k]; // Store value to avoid borrow checker issue
738                for j in k..n {
739                    let a_kj = a[[k, j]]; // Store value to avoid borrow checker issue
740                    a[[i, j]] -= factor * a_kj;
741                }
742                b[i] -= factor * b_k;
743            }
744        }
745
746        // Back substitution
747        let mut x = Array1::zeros(n);
748        for i in (0..n).rev() {
749            let mut sum = Complex64::new(0.0, 0.0);
750            for j in i + 1..n {
751                sum += a[[i, j]] * x[j];
752            }
753            x[i] = (b[i] - sum) / a[[i, i]];
754        }
755
756        Ok(x)
757    }
758
759    /// Arnoldi eigenvalue solver
760    fn solve_arnoldi(
761        &mut self,
762        matrix: &SparseMatrix,
763        num_eigenvalues: usize,
764        _which: &str,
765    ) -> Result<(Vec<f64>, Array2<Complex64>, usize)> {
766        // Simplified Arnoldi implementation
767        let n = matrix.shape.0;
768        let m = (num_eigenvalues * 2).min(n);
769
770        let mut v = Array2::zeros((n, m + 1));
771        let mut h = Array2::zeros((m + 1, m));
772
773        // Random initial vector
774        for i in 0..n {
775            v[[i, 0]] = Complex64::new(fastrand::f64() - 0.5, fastrand::f64() - 0.5);
776        }
777
778        // Normalize
779        let norm = v
780            .column(0)
781            .iter()
782            .map(|&vi| vi.norm_sqr())
783            .sum::<f64>()
784            .sqrt();
785        for i in 0..n {
786            v[[i, 0]] /= norm;
787        }
788
789        // Arnoldi process
790        for j in 0..m {
791            let vj = v.column(j).to_owned();
792            let w = matrix.matvec(&vj)?;
793
794            // Modified Gram-Schmidt
795            for i in 0..=j {
796                let vi = v.column(i);
797                h[[i, j]] = vi
798                    .iter()
799                    .zip(w.iter())
800                    .map(|(&vi_val, &w_val)| vi_val.conj() * w_val)
801                    .sum();
802            }
803
804            let mut w_next = w.clone();
805            for i in 0..=j {
806                let vi = v.column(i);
807                for k in 0..n {
808                    w_next[k] -= h[[i, j]] * vi[k];
809                }
810            }
811
812            let h_norm = w_next.iter().map(|&wi| wi.norm_sqr()).sum::<f64>().sqrt();
813            h[[j + 1, j]] = Complex64::new(h_norm, 0.0);
814
815            if h_norm > 1e-12 && j + 1 < m {
816                for i in 0..n {
817                    v[[i, j + 1]] = w_next[i] / h_norm;
818                }
819            }
820        }
821
822        // Extract eigenvalues from Hessenberg matrix (simplified)
823        let mut eigenvalues = Vec::new();
824        for i in 0..m.min(num_eigenvalues) {
825            eigenvalues.push(h[[i, i]].re);
826        }
827        eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
828
829        let eigenvectors = Array2::zeros((n, num_eigenvalues));
830
831        self.stats.method_used = "Arnoldi".to_string();
832
833        Ok((eigenvalues, eigenvectors, num_eigenvalues.min(m)))
834    }
835
836    /// Lanczos eigenvalue solver (for Hermitian matrices)
837    fn solve_lanczos(
838        &mut self,
839        matrix: &SparseMatrix,
840        num_eigenvalues: usize,
841        _which: &str,
842    ) -> Result<(Vec<f64>, Array2<Complex64>, usize)> {
843        // Simplified Lanczos implementation
844        if !matrix.is_hermitian {
845            return self.solve_arnoldi(matrix, num_eigenvalues, _which);
846        }
847
848        let n = matrix.shape.0;
849        let m = (num_eigenvalues * 2).min(n);
850
851        let mut v = Array2::zeros((n, m + 1));
852        let mut alpha = vec![0.0; m];
853        let mut beta = vec![0.0; m];
854
855        // Random initial vector
856        for i in 0..n {
857            v[[i, 0]] = Complex64::new(fastrand::f64() - 0.5, 0.0);
858        }
859
860        // Normalize
861        let norm = v
862            .column(0)
863            .iter()
864            .map(|&vi| vi.norm_sqr())
865            .sum::<f64>()
866            .sqrt();
867        for i in 0..n {
868            v[[i, 0]] /= norm;
869        }
870
871        // Lanczos process
872        for j in 0..m {
873            let vj = v.column(j).to_owned();
874            let w = matrix.matvec(&vj)?;
875
876            alpha[j] = vj
877                .iter()
878                .zip(w.iter())
879                .map(|(&vji, &wi)| (vji.conj() * wi).re)
880                .sum();
881
882            let mut w_next = w.clone();
883            for i in 0..n {
884                w_next[i] -= alpha[j] * vj[i];
885                if j > 0 {
886                    w_next[i] -= beta[j - 1] * v[[i, j - 1]];
887                }
888            }
889
890            beta[j] = w_next.iter().map(|&wi| wi.norm_sqr()).sum::<f64>().sqrt();
891
892            if beta[j] > 1e-12 && j + 1 < m {
893                for i in 0..n {
894                    v[[i, j + 1]] = w_next[i] / beta[j];
895                }
896            }
897        }
898
899        // Solve tridiagonal eigenvalue problem (simplified)
900        let mut eigenvalues = alpha[..m.min(num_eigenvalues)].to_vec();
901        eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
902
903        let eigenvectors = Array2::zeros((n, num_eigenvalues));
904
905        self.stats.method_used = "Lanczos".to_string();
906
907        Ok((eigenvalues, eigenvectors, num_eigenvalues.min(m)))
908    }
909
910    /// LOBPCG eigenvalue solver
911    fn solve_lobpcg(
912        &mut self,
913        matrix: &SparseMatrix,
914        num_eigenvalues: usize,
915        _which: &str,
916    ) -> Result<(Vec<f64>, Array2<Complex64>, usize)> {
917        // Simplified LOBPCG placeholder
918        // Full implementation would be much more complex
919        self.solve_lanczos(matrix, num_eigenvalues, _which)
920    }
921
922    /// `SciRS2` automatic eigenvalue solver
923    fn solve_eigen_scirs2_auto(
924        &mut self,
925        matrix: &SparseMatrix,
926        num_eigenvalues: usize,
927        which: &str,
928    ) -> Result<(Vec<f64>, Array2<Complex64>, usize)> {
929        if let Some(_backend) = &mut self.backend {
930            // SciRS2 would choose the best eigenvalue solver
931            if matrix.is_hermitian {
932                self.solve_lanczos(matrix, num_eigenvalues, which)
933            } else {
934                self.solve_arnoldi(matrix, num_eigenvalues, which)
935            }
936        } else {
937            // Fallback
938            if matrix.is_hermitian {
939                self.solve_lanczos(matrix, num_eigenvalues, which)
940            } else {
941                self.solve_arnoldi(matrix, num_eigenvalues, which)
942            }
943        }
944    }
945
946    /// Get execution statistics
947    #[must_use]
948    pub const fn get_stats(&self) -> &SparseSolverStats {
949        &self.stats
950    }
951
952    /// Reset statistics
953    pub fn reset_stats(&mut self) {
954        self.stats = SparseSolverStats::default();
955    }
956
957    /// Set configuration
958    pub const fn set_config(&mut self, config: SparseSolverConfig) {
959        self.config = config;
960    }
961}
962
963/// Utilities for creating sparse matrices from quantum problems
964pub struct SparseMatrixUtils;
965
966impl SparseMatrixUtils {
967    /// Create sparse Hamiltonian from Pauli strings
968    pub fn hamiltonian_from_pauli_strings(
969        num_qubits: usize,
970        pauli_strings: &[(String, f64)],
971    ) -> Result<SparseMatrix> {
972        let dim = 1 << num_qubits;
973        let mut builder = SparseMatrixBuilder::new(dim, dim);
974
975        for (pauli_str, coeff) in pauli_strings {
976            if pauli_str.len() != num_qubits {
977                return Err(SimulatorError::InvalidInput(format!(
978                    "Pauli string length {} doesn't match num_qubits {}",
979                    pauli_str.len(),
980                    num_qubits
981                )));
982            }
983
984            // Build Pauli string matrix and add to Hamiltonian
985            for i in 0..dim {
986                let mut amplitude = Complex64::new(*coeff, 0.0);
987                let mut target_state = i;
988
989                for (qubit, pauli_char) in pauli_str.chars().enumerate() {
990                    let bit_pos = num_qubits - 1 - qubit;
991                    let bit_val = (i >> bit_pos) & 1;
992
993                    match pauli_char {
994                        'I' => {} // Identity - no change
995                        'X' => {
996                            target_state ^= 1 << bit_pos; // Flip bit
997                        }
998                        'Y' => {
999                            target_state ^= 1 << bit_pos; // Flip bit
1000                            if bit_val == 0 {
1001                                amplitude *= Complex64::new(0.0, 1.0); // i
1002                            } else {
1003                                amplitude *= Complex64::new(0.0, -1.0); // -i
1004                            }
1005                        }
1006                        'Z' => {
1007                            if bit_val == 1 {
1008                                amplitude *= Complex64::new(-1.0, 0.0); // -1
1009                            }
1010                        }
1011                        _ => {
1012                            return Err(SimulatorError::InvalidInput(format!(
1013                                "Invalid Pauli character: {pauli_char}"
1014                            )));
1015                        }
1016                    }
1017                }
1018
1019                if amplitude.norm() > 1e-15 {
1020                    builder.add(i, target_state, amplitude);
1021                }
1022            }
1023        }
1024
1025        let csr_matrix = builder.build();
1026        let nnz = csr_matrix.values.len();
1027        let mut matrix = SparseMatrix {
1028            shape: (dim, dim),
1029            format: SparseFormat::CSR,
1030            row_ptr: csr_matrix.row_ptr,
1031            col_indices: csr_matrix.col_indices,
1032            values: csr_matrix.values,
1033            nnz,
1034            is_hermitian: true,
1035            is_positive_definite: false,
1036        };
1037        matrix.is_hermitian = true;
1038
1039        Ok(matrix)
1040    }
1041
1042    /// Create sparse matrix from dense matrix
1043    #[must_use]
1044    pub fn from_dense(dense: &Array2<Complex64>, threshold: f64) -> SparseMatrix {
1045        let (rows, cols) = dense.dim();
1046        let mut row_ptr = vec![0; rows + 1];
1047        let mut col_indices = Vec::new();
1048        let mut values = Vec::new();
1049
1050        let mut nnz = 0;
1051        for i in 0..rows {
1052            row_ptr[i] = nnz;
1053            for j in 0..cols {
1054                if dense[[i, j]].norm() > threshold {
1055                    col_indices.push(j);
1056                    values.push(dense[[i, j]]);
1057                    nnz += 1;
1058                }
1059            }
1060        }
1061        row_ptr[rows] = nnz;
1062
1063        SparseMatrix::from_csr((rows, cols), row_ptr, col_indices, values)
1064    }
1065
1066    /// Create random sparse matrix for testing
1067    #[must_use]
1068    pub fn random_sparse(n: usize, density: f64, hermitian: bool) -> SparseMatrix {
1069        let total_elements = n * n;
1070        let nnz_target = (total_elements as f64 * density) as usize;
1071
1072        let mut row_ptr = vec![0; n + 1];
1073        let mut col_indices = Vec::new();
1074        let mut values = Vec::new();
1075
1076        let mut added_elements = std::collections::HashSet::new();
1077        let mut nnz = 0;
1078
1079        for _ in 0..nnz_target {
1080            let i = fastrand::usize(0..n);
1081            let j = if hermitian && fastrand::f64() < 0.5 && i < n - 1 {
1082                fastrand::usize(i..n) // Upper triangular for Hermitian
1083            } else {
1084                fastrand::usize(0..n)
1085            };
1086
1087            if added_elements.insert((i, j)) {
1088                let real = fastrand::f64() - 0.5;
1089                let imag = if hermitian && i == j {
1090                    0.0
1091                } else {
1092                    fastrand::f64() - 0.5
1093                };
1094                let value = Complex64::new(real, imag);
1095
1096                // Add to appropriate row
1097                let pos = col_indices
1098                    .iter()
1099                    .position(|&col| col > j)
1100                    .unwrap_or(col_indices.len());
1101                col_indices.insert(pos, j);
1102                values.insert(pos, value);
1103                nnz += 1;
1104
1105                // Update row pointers
1106                for row in (i + 1)..=n {
1107                    row_ptr[row] += 1;
1108                }
1109
1110                // Add symmetric element for Hermitian matrices
1111                if hermitian && i != j {
1112                    added_elements.insert((j, i));
1113
1114                    let sym_value = value.conj();
1115                    let sym_pos = col_indices
1116                        .iter()
1117                        .position(|&col| col > i)
1118                        .unwrap_or(col_indices.len());
1119                    col_indices.insert(sym_pos, i);
1120                    values.insert(sym_pos, sym_value);
1121                    nnz += 1;
1122
1123                    for row in (j + 1)..=n {
1124                        row_ptr[row] += 1;
1125                    }
1126                }
1127            }
1128        }
1129
1130        let mut matrix = SparseMatrix::from_csr((n, n), row_ptr, col_indices, values);
1131        matrix.is_hermitian = hermitian;
1132        matrix
1133    }
1134}
1135
1136/// Benchmark sparse solver methods
1137pub fn benchmark_sparse_solvers(
1138    matrix_size: usize,
1139    density: f64,
1140) -> Result<HashMap<String, SparseSolverStats>> {
1141    let mut results = HashMap::new();
1142
1143    // Create test matrix and RHS
1144    let matrix = SparseMatrixUtils::random_sparse(matrix_size, density, true);
1145    let mut rhs = Array1::zeros(matrix_size);
1146    for i in 0..matrix_size {
1147        rhs[i] = Complex64::new(fastrand::f64(), 0.0);
1148    }
1149
1150    // Test different solver methods
1151    let methods = vec![
1152        ("CG", SparseSolverMethod::CG),
1153        ("GMRES", SparseSolverMethod::GMRES),
1154        ("BiCGSTAB", SparseSolverMethod::BiCGSTAB),
1155        ("SciRS2Auto", SparseSolverMethod::SciRS2Auto),
1156    ];
1157
1158    for (name, method) in methods {
1159        let config = SparseSolverConfig {
1160            method,
1161            tolerance: 1e-10,
1162            max_iterations: 1000,
1163            ..Default::default()
1164        };
1165
1166        let mut solver = SciRS2SparseSolver::new(config.clone())?;
1167        if method == SparseSolverMethod::SciRS2Auto {
1168            solver = solver.with_backend().unwrap_or_else(|_| {
1169                SciRS2SparseSolver::new(config).expect("fallback solver creation should succeed")
1170            });
1171        }
1172
1173        let _solution = solver.solve_linear_system(&matrix, &rhs)?;
1174        results.insert(name.to_string(), solver.get_stats().clone());
1175    }
1176
1177    Ok(results)
1178}
1179
1180/// Compare solver accuracy
1181pub fn compare_sparse_solver_accuracy(matrix_size: usize) -> Result<HashMap<String, f64>> {
1182    let mut errors = HashMap::new();
1183
1184    // Create test problem with known solution
1185    let matrix = SparseMatrix::identity(matrix_size);
1186    let solution = Array1::from_vec(
1187        (0..matrix_size)
1188            .map(|i| Complex64::new(i as f64, 0.0))
1189            .collect(),
1190    );
1191    let rhs = matrix.matvec(&solution)?;
1192
1193    let methods = vec![
1194        ("CG", SparseSolverMethod::CG),
1195        ("GMRES", SparseSolverMethod::GMRES),
1196        ("BiCGSTAB", SparseSolverMethod::BiCGSTAB),
1197    ];
1198
1199    for (name, method) in methods {
1200        let config = SparseSolverConfig {
1201            method,
1202            tolerance: 1e-12,
1203            max_iterations: 1000,
1204            ..Default::default()
1205        };
1206
1207        let mut solver = SciRS2SparseSolver::new(config)?;
1208        let computed_solution = solver.solve_linear_system(&matrix, &rhs)?;
1209
1210        // Calculate error
1211        let error = solution
1212            .iter()
1213            .zip(computed_solution.iter())
1214            .map(|(exact, computed)| (exact - computed).norm())
1215            .sum::<f64>()
1216            / matrix_size as f64;
1217
1218        errors.insert(name.to_string(), error);
1219    }
1220
1221    Ok(errors)
1222}
1223
1224#[cfg(test)]
1225mod tests {
1226    use super::*;
1227    use approx::assert_abs_diff_eq;
1228
1229    #[test]
1230    fn test_sparse_matrix_creation() {
1231        let matrix = SparseMatrix::identity(5);
1232        assert_eq!(matrix.shape, (5, 5));
1233        assert_eq!(matrix.nnz, 5);
1234        assert!(matrix.is_hermitian);
1235        assert!(matrix.is_positive_definite);
1236    }
1237
1238    #[test]
1239    fn test_sparse_matrix_matvec() {
1240        let matrix = SparseMatrix::identity(3);
1241        let x = Array1::from_vec(vec![
1242            Complex64::new(1.0, 0.0),
1243            Complex64::new(2.0, 0.0),
1244            Complex64::new(3.0, 0.0),
1245        ]);
1246
1247        let y = matrix.matvec(&x).expect("matvec should succeed");
1248
1249        for i in 0..3 {
1250            assert_abs_diff_eq!(y[i].re, x[i].re, epsilon = 1e-10);
1251            assert_abs_diff_eq!(y[i].im, x[i].im, epsilon = 1e-10);
1252        }
1253    }
1254
1255    #[test]
1256    fn test_sparse_solver_creation() {
1257        let config = SparseSolverConfig::default();
1258        let solver = SciRS2SparseSolver::new(config).expect("solver creation should succeed");
1259        assert!(solver.backend.is_none());
1260    }
1261
1262    #[test]
1263    fn test_identity_solve() {
1264        let matrix = SparseMatrix::identity(5);
1265        let rhs = Array1::from_vec((0..5).map(|i| Complex64::new(i as f64, 0.0)).collect());
1266
1267        let config = SparseSolverConfig {
1268            method: SparseSolverMethod::CG,
1269            tolerance: 1e-10,
1270            max_iterations: 100,
1271            ..Default::default()
1272        };
1273
1274        let mut solver = SciRS2SparseSolver::new(config).expect("solver creation should succeed");
1275        let solution = solver
1276            .solve_linear_system(&matrix, &rhs)
1277            .expect("solve_linear_system should succeed");
1278
1279        for i in 0..5 {
1280            assert_abs_diff_eq!(solution[i].re, rhs[i].re, epsilon = 1e-8);
1281            assert_abs_diff_eq!(solution[i].im, rhs[i].im, epsilon = 1e-8);
1282        }
1283    }
1284
1285    #[test]
1286    fn test_pauli_hamiltonian_creation() {
1287        let pauli_strings = vec![("ZZ".to_string(), 1.0), ("XX".to_string(), 0.5)];
1288
1289        let matrix = SparseMatrixUtils::hamiltonian_from_pauli_strings(2, &pauli_strings)
1290            .expect("hamiltonian_from_pauli_strings should succeed");
1291
1292        assert_eq!(matrix.shape, (4, 4));
1293        assert!(matrix.is_hermitian);
1294        assert!(matrix.nnz > 0);
1295    }
1296
1297    #[test]
1298    fn test_random_sparse_matrix() {
1299        let matrix = SparseMatrixUtils::random_sparse(10, 0.1, true);
1300
1301        assert_eq!(matrix.shape, (10, 10));
1302        assert!(matrix.is_hermitian);
1303        assert!(matrix.density() <= 0.25); // Allow more margin due to randomness and Hermitian constraint
1304    }
1305
1306    #[test]
1307    fn test_dense_conversion() {
1308        let matrix = SparseMatrix::identity(3);
1309        let dense = matrix.to_dense().expect("to_dense should succeed");
1310
1311        assert_eq!(dense.shape(), [3, 3]);
1312        for i in 0..3 {
1313            for j in 0..3 {
1314                if i == j {
1315                    assert_abs_diff_eq!(dense[[i, j]].re, 1.0, epsilon = 1e-10);
1316                } else {
1317                    assert_abs_diff_eq!(dense[[i, j]].norm(), 0.0, epsilon = 1e-10);
1318                }
1319            }
1320        }
1321    }
1322}