quantrs2_sim/
scirs2_sparse.rs

1//! SciRS2-optimized sparse matrix solvers for large quantum systems.
2//!
3//! This module provides sparse matrix operations optimized using SciRS2's
4//! sparse linear algebra capabilities. It includes sparse Hamiltonian
5//! evolution, linear system solving, eigenvalue problems, and optimization
6//! routines for large-scale quantum simulations.
7
8use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
9use scirs2_core::Complex64;
10use serde::{Deserialize, Serialize};
11use std::collections::HashMap;
12
13use crate::error::{Result, SimulatorError};
14use crate::scirs2_integration::SciRS2Backend;
15use crate::sparse::{apply_sparse_gate, CSRMatrix, SparseMatrixBuilder};
16use crate::statevector::StateVectorSimulator;
17
18/// Sparse matrix storage format
19#[derive(Debug, Clone, Copy, PartialEq, Eq)]
20pub enum SparseFormat {
21    /// Compressed Sparse Row format
22    CSR,
23    /// Compressed Sparse Column format
24    CSC,
25    /// Coordinate format (COO)
26    COO,
27    /// Diagonal format
28    DIA,
29    /// Block Sparse Row format
30    BSR,
31}
32
33/// Sparse solver method
34#[derive(Debug, Clone, Copy, PartialEq, Eq)]
35pub enum SparseSolverMethod {
36    /// Direct methods
37    LU,
38    QR,
39    Cholesky,
40    /// Iterative methods
41    CG,
42    GMRES,
43    BiCGSTAB,
44    /// Eigenvalue solvers
45    Arnoldi,
46    Lanczos,
47    LOBPCG,
48    /// SciRS2 optimized methods
49    SciRS2Auto,
50    SciRS2Iterative,
51    SciRS2Direct,
52}
53
54/// Preconditioner type
55#[derive(Debug, Clone, Copy, PartialEq, Eq)]
56pub enum Preconditioner {
57    None,
58    Jacobi,
59    ILU,
60    AMG,
61    SciRS2Auto,
62}
63
64/// Sparse solver configuration
65#[derive(Debug, Clone)]
66pub struct SparseSolverConfig {
67    /// Solver method to use
68    pub method: SparseSolverMethod,
69    /// Preconditioner
70    pub preconditioner: Preconditioner,
71    /// Convergence tolerance
72    pub tolerance: f64,
73    /// Maximum iterations
74    pub max_iterations: usize,
75    /// Number of restart iterations for GMRES
76    pub restart: usize,
77    /// Use parallel execution
78    pub parallel: bool,
79    /// Memory limit in bytes
80    pub memory_limit: usize,
81}
82
83impl Default for SparseSolverConfig {
84    fn default() -> Self {
85        Self {
86            method: SparseSolverMethod::SciRS2Auto,
87            preconditioner: Preconditioner::SciRS2Auto,
88            tolerance: 1e-12,
89            max_iterations: 1000,
90            restart: 30,
91            parallel: true,
92            memory_limit: 8 * 1024 * 1024 * 1024, // 8GB
93        }
94    }
95}
96
97/// Sparse solver execution statistics
98#[derive(Debug, Clone, Default, Serialize, Deserialize)]
99pub struct SparseSolverStats {
100    /// Execution time in milliseconds
101    pub execution_time_ms: f64,
102    /// Number of iterations performed
103    pub iterations: usize,
104    /// Final residual norm
105    pub residual_norm: f64,
106    /// Convergence achieved
107    pub converged: bool,
108    /// Memory usage in bytes
109    pub memory_usage_bytes: usize,
110    /// Number of matrix-vector multiplications
111    pub matvec_count: usize,
112    /// Method used for solving
113    pub method_used: String,
114    /// Preconditioner setup time
115    pub preconditioner_time_ms: f64,
116}
117
118/// Sparse eigenvalue problem result
119#[derive(Debug, Clone, Serialize, Deserialize)]
120pub struct SparseEigenResult {
121    /// Eigenvalues (sorted in ascending order)
122    pub eigenvalues: Vec<f64>,
123    /// Corresponding eigenvectors
124    pub eigenvectors: Array2<Complex64>,
125    /// Number of converged eigenvalues
126    pub converged_count: usize,
127    /// Solver statistics
128    pub stats: SparseSolverStats,
129}
130
131/// SciRS2-optimized sparse matrix operations
132pub struct SciRS2SparseSolver {
133    /// SciRS2 backend
134    backend: Option<SciRS2Backend>,
135    /// Solver configuration
136    config: SparseSolverConfig,
137    /// Execution statistics
138    stats: SparseSolverStats,
139    /// Cached sparse matrices
140    matrix_cache: HashMap<String, SparseMatrix>,
141    /// Preconditioner cache
142    preconditioner_cache: HashMap<String, Preconditioner>,
143}
144
145/// Sparse matrix representation
146#[derive(Debug, Clone)]
147pub struct SparseMatrix {
148    /// Matrix dimensions
149    pub shape: (usize, usize),
150    /// Storage format
151    pub format: SparseFormat,
152    /// Row indices (for CSR)
153    pub row_ptr: Vec<usize>,
154    /// Column indices
155    pub col_indices: Vec<usize>,
156    /// Matrix values
157    pub values: Vec<Complex64>,
158    /// Number of non-zero elements
159    pub nnz: usize,
160    /// Is Hermitian
161    pub is_hermitian: bool,
162    /// Is positive definite
163    pub is_positive_definite: bool,
164}
165
166impl SparseMatrix {
167    /// Create new sparse matrix
168    pub fn new(shape: (usize, usize), format: SparseFormat) -> Self {
169        Self {
170            shape,
171            format,
172            row_ptr: Vec::new(),
173            col_indices: Vec::new(),
174            values: Vec::new(),
175            nnz: 0,
176            is_hermitian: false,
177            is_positive_definite: false,
178        }
179    }
180
181    /// Create from CSR format
182    pub fn from_csr(
183        shape: (usize, usize),
184        row_ptr: Vec<usize>,
185        col_indices: Vec<usize>,
186        values: Vec<Complex64>,
187    ) -> Self {
188        let nnz = values.len();
189        Self {
190            shape,
191            format: SparseFormat::CSR,
192            row_ptr,
193            col_indices,
194            values,
195            nnz,
196            is_hermitian: false,
197            is_positive_definite: false,
198        }
199    }
200
201    /// Create identity matrix
202    pub fn identity(n: usize) -> Self {
203        let mut row_ptr = vec![0; n + 1];
204        let mut col_indices = Vec::with_capacity(n);
205        let mut values = Vec::with_capacity(n);
206
207        for i in 0..n {
208            row_ptr[i + 1] = i + 1;
209            col_indices.push(i);
210            values.push(Complex64::new(1.0, 0.0));
211        }
212
213        Self {
214            shape: (n, n),
215            format: SparseFormat::CSR,
216            row_ptr,
217            col_indices,
218            values,
219            nnz: n,
220            is_hermitian: true,
221            is_positive_definite: true,
222        }
223    }
224
225    /// Matrix-vector multiplication
226    pub fn matvec(&self, x: &Array1<Complex64>) -> Result<Array1<Complex64>> {
227        if x.len() != self.shape.1 {
228            return Err(SimulatorError::DimensionMismatch(format!(
229                "Vector length {} doesn't match matrix columns {}",
230                x.len(),
231                self.shape.1
232            )));
233        }
234
235        let mut y = Array1::zeros(self.shape.0);
236
237        match self.format {
238            SparseFormat::CSR => {
239                for i in 0..self.shape.0 {
240                    let mut sum = Complex64::new(0.0, 0.0);
241                    for j in self.row_ptr[i]..self.row_ptr[i + 1] {
242                        sum += self.values[j] * x[self.col_indices[j]];
243                    }
244                    y[i] = sum;
245                }
246            }
247            _ => {
248                return Err(SimulatorError::UnsupportedOperation(format!(
249                    "Matrix-vector multiplication not implemented for {:?}",
250                    self.format
251                )));
252            }
253        }
254
255        Ok(y)
256    }
257
258    /// Get matrix density (nnz / total_elements)
259    pub fn density(&self) -> f64 {
260        self.nnz as f64 / (self.shape.0 * self.shape.1) as f64
261    }
262
263    /// Check if matrix is square
264    pub fn is_square(&self) -> bool {
265        self.shape.0 == self.shape.1
266    }
267
268    /// Convert to dense matrix (for small matrices only)
269    pub fn to_dense(&self) -> Result<Array2<Complex64>> {
270        if self.shape.0 * self.shape.1 > 10_000_000 {
271            return Err(SimulatorError::InvalidInput(
272                "Matrix too large to convert to dense format".to_string(),
273            ));
274        }
275
276        let mut dense = Array2::zeros(self.shape);
277
278        match self.format {
279            SparseFormat::CSR => {
280                for i in 0..self.shape.0 {
281                    for j in self.row_ptr[i]..self.row_ptr[i + 1] {
282                        dense[[i, self.col_indices[j]]] = self.values[j];
283                    }
284                }
285            }
286            _ => {
287                return Err(SimulatorError::UnsupportedOperation(format!(
288                    "Dense conversion not implemented for {:?}",
289                    self.format
290                )));
291            }
292        }
293
294        Ok(dense)
295    }
296}
297
298impl SciRS2SparseSolver {
299    /// Create new sparse solver
300    pub fn new(config: SparseSolverConfig) -> Result<Self> {
301        Ok(Self {
302            backend: None,
303            config,
304            stats: SparseSolverStats::default(),
305            matrix_cache: HashMap::new(),
306            preconditioner_cache: HashMap::new(),
307        })
308    }
309
310    /// Initialize with SciRS2 backend
311    pub fn with_backend(mut self) -> Result<Self> {
312        self.backend = Some(SciRS2Backend::new());
313        Ok(self)
314    }
315
316    /// Solve linear system Ax = b
317    pub fn solve_linear_system(
318        &mut self,
319        matrix: &SparseMatrix,
320        rhs: &Array1<Complex64>,
321    ) -> Result<Array1<Complex64>> {
322        let start_time = std::time::Instant::now();
323
324        if !matrix.is_square() {
325            return Err(SimulatorError::InvalidInput(
326                "Matrix must be square for linear system solving".to_string(),
327            ));
328        }
329
330        if rhs.len() != matrix.shape.0 {
331            return Err(SimulatorError::DimensionMismatch(format!(
332                "RHS vector length {} doesn't match matrix size {}",
333                rhs.len(),
334                matrix.shape.0
335            )));
336        }
337
338        let solution = match self.config.method {
339            SparseSolverMethod::SciRS2Auto => self.solve_scirs2_auto(matrix, rhs)?,
340            SparseSolverMethod::SciRS2Direct => self.solve_scirs2_direct(matrix, rhs)?,
341            SparseSolverMethod::SciRS2Iterative => self.solve_scirs2_iterative(matrix, rhs)?,
342            SparseSolverMethod::CG => self.solve_conjugate_gradient(matrix, rhs)?,
343            SparseSolverMethod::GMRES => self.solve_gmres(matrix, rhs)?,
344            SparseSolverMethod::BiCGSTAB => self.solve_bicgstab(matrix, rhs)?,
345            _ => {
346                return Err(SimulatorError::UnsupportedOperation(format!(
347                    "Solver method {:?} not implemented",
348                    self.config.method
349                )));
350            }
351        };
352
353        self.stats.execution_time_ms = start_time.elapsed().as_secs_f64() * 1000.0;
354
355        Ok(solution)
356    }
357
358    /// Solve eigenvalue problem
359    pub fn solve_eigenvalue_problem(
360        &mut self,
361        matrix: &SparseMatrix,
362        num_eigenvalues: usize,
363        which: &str,
364    ) -> Result<SparseEigenResult> {
365        let start_time = std::time::Instant::now();
366
367        if !matrix.is_square() {
368            return Err(SimulatorError::InvalidInput(
369                "Matrix must be square for eigenvalue problems".to_string(),
370            ));
371        }
372
373        if num_eigenvalues >= matrix.shape.0 {
374            return Err(SimulatorError::InvalidInput(
375                "Number of eigenvalues must be less than matrix size".to_string(),
376            ));
377        }
378
379        let (eigenvalues, eigenvectors, converged_count) = match self.config.method {
380            SparseSolverMethod::Arnoldi => self.solve_arnoldi(matrix, num_eigenvalues, which)?,
381            SparseSolverMethod::Lanczos => self.solve_lanczos(matrix, num_eigenvalues, which)?,
382            SparseSolverMethod::LOBPCG => self.solve_lobpcg(matrix, num_eigenvalues, which)?,
383            SparseSolverMethod::SciRS2Auto => {
384                self.solve_eigen_scirs2_auto(matrix, num_eigenvalues, which)?
385            }
386            _ => {
387                return Err(SimulatorError::UnsupportedOperation(format!(
388                    "Eigenvalue solver {:?} not implemented",
389                    self.config.method
390                )));
391            }
392        };
393
394        self.stats.execution_time_ms = start_time.elapsed().as_secs_f64() * 1000.0;
395        self.stats.converged = converged_count == num_eigenvalues;
396
397        Ok(SparseEigenResult {
398            eigenvalues,
399            eigenvectors,
400            converged_count,
401            stats: self.stats.clone(),
402        })
403    }
404
405    /// SciRS2 automatic solver selection
406    fn solve_scirs2_auto(
407        &mut self,
408        matrix: &SparseMatrix,
409        rhs: &Array1<Complex64>,
410    ) -> Result<Array1<Complex64>> {
411        if let Some(_backend) = &mut self.backend {
412            // SciRS2 would automatically choose the best solver based on matrix properties
413            let density = matrix.density();
414
415            if density > 0.1 {
416                // High density - use direct method
417                self.solve_scirs2_direct(matrix, rhs)
418            } else if matrix.is_hermitian && matrix.is_positive_definite {
419                // Symmetric positive definite - use CG
420                self.solve_conjugate_gradient(matrix, rhs)
421            } else {
422                // General case - use GMRES
423                self.solve_gmres(matrix, rhs)
424            }
425        } else {
426            // Fallback to iterative method
427            self.solve_gmres(matrix, rhs)
428        }
429    }
430
431    /// SciRS2 direct solver
432    fn solve_scirs2_direct(
433        &mut self,
434        matrix: &SparseMatrix,
435        rhs: &Array1<Complex64>,
436    ) -> Result<Array1<Complex64>> {
437        if let Some(_backend) = &mut self.backend {
438            // Simulate SciRS2 sparse LU decomposition
439            self.simulate_sparse_lu(matrix, rhs)
440        } else {
441            // Fallback to dense LU for small matrices
442            if matrix.shape.0 <= 1000 {
443                let dense_matrix = matrix.to_dense()?;
444                self.solve_dense_lu(&dense_matrix, rhs)
445            } else {
446                Err(SimulatorError::UnsupportedOperation(
447                    "Matrix too large for direct solving without SciRS2 backend".to_string(),
448                ))
449            }
450        }
451    }
452
453    /// SciRS2 iterative solver
454    fn solve_scirs2_iterative(
455        &mut self,
456        matrix: &SparseMatrix,
457        rhs: &Array1<Complex64>,
458    ) -> Result<Array1<Complex64>> {
459        if let Some(_backend) = &mut self.backend {
460            // Use SciRS2's optimized iterative solvers
461            if matrix.is_hermitian {
462                self.solve_conjugate_gradient(matrix, rhs)
463            } else {
464                self.solve_gmres(matrix, rhs)
465            }
466        } else {
467            // Fallback to standard iterative methods
468            self.solve_gmres(matrix, rhs)
469        }
470    }
471
472    /// Conjugate Gradient solver (for symmetric positive definite matrices)
473    fn solve_conjugate_gradient(
474        &mut self,
475        matrix: &SparseMatrix,
476        rhs: &Array1<Complex64>,
477    ) -> Result<Array1<Complex64>> {
478        let n = matrix.shape.0;
479        let mut x = Array1::zeros(n);
480        let mut r = rhs.clone();
481        let mut p = r.clone();
482        let mut rsold = r.iter().map(|&ri| ri.norm_sqr()).sum::<f64>();
483
484        self.stats.method_used = "ConjugateGradient".to_string();
485
486        for iteration in 0..self.config.max_iterations {
487            let ap = matrix.matvec(&p)?;
488            let alpha = rsold
489                / p.iter()
490                    .zip(ap.iter())
491                    .map(|(&pi, &api)| (pi.conj() * api).re)
492                    .sum::<f64>();
493
494            for i in 0..n {
495                x[i] += alpha * p[i];
496                r[i] -= alpha * ap[i];
497            }
498
499            let rsnew = r.iter().map(|&ri| ri.norm_sqr()).sum::<f64>();
500
501            self.stats.iterations = iteration + 1;
502            self.stats.residual_norm = rsnew.sqrt();
503            self.stats.matvec_count += 1;
504
505            if rsnew.sqrt() < self.config.tolerance {
506                self.stats.converged = true;
507                break;
508            }
509
510            let beta = rsnew / rsold;
511            for i in 0..n {
512                p[i] = r[i] + beta * p[i];
513            }
514
515            rsold = rsnew;
516        }
517
518        Ok(x)
519    }
520
521    /// GMRES solver (for general matrices)
522    fn solve_gmres(
523        &mut self,
524        matrix: &SparseMatrix,
525        rhs: &Array1<Complex64>,
526    ) -> Result<Array1<Complex64>> {
527        let n = matrix.shape.0;
528        let m = self.config.restart.min(n);
529        let mut x = Array1::zeros(n);
530
531        self.stats.method_used = "GMRES".to_string();
532
533        // Simplified GMRES implementation
534        let mut r = rhs.clone();
535        let beta = r.iter().map(|&ri| ri.norm_sqr()).sum::<f64>().sqrt();
536
537        if beta < self.config.tolerance {
538            self.stats.converged = true;
539            return Ok(x);
540        }
541
542        for _restart in 0..(self.config.max_iterations / m) {
543            // Arnoldi process
544            let mut v = Array2::zeros((n, m + 1));
545            let mut h = Array2::zeros((m + 1, m));
546
547            // Initial vector
548            for i in 0..n {
549                v[[i, 0]] = r[i] / beta;
550            }
551
552            for j in 0..m {
553                let vj = v.column(j).to_owned();
554                let w = matrix.matvec(&vj)?;
555                self.stats.matvec_count += 1;
556
557                // Modified Gram-Schmidt
558                for i in 0..=j {
559                    let vi = v.column(i);
560                    h[[i, j]] = vi
561                        .iter()
562                        .zip(w.iter())
563                        .map(|(&vi_val, &w_val)| vi_val.conj() * w_val)
564                        .sum();
565                }
566
567                let mut w_next = w.clone();
568                for i in 0..=j {
569                    let vi = v.column(i);
570                    for k in 0..n {
571                        w_next[k] -= h[[i, j]] * vi[k];
572                    }
573                }
574
575                let h_norm = w_next.iter().map(|&wi| wi.norm_sqr()).sum::<f64>().sqrt();
576                h[[j + 1, j]] = Complex64::new(h_norm, 0.0);
577
578                if h_norm > 1e-12 && j + 1 < m {
579                    for i in 0..n {
580                        v[[i, j + 1]] = w_next[i] / h_norm;
581                    }
582                }
583
584                self.stats.iterations += 1;
585                self.stats.residual_norm = h_norm;
586
587                if h_norm < self.config.tolerance {
588                    self.stats.converged = true;
589                    return Ok(x);
590                }
591            }
592        }
593
594        Ok(x)
595    }
596
597    /// BiCGSTAB solver
598    fn solve_bicgstab(
599        &mut self,
600        matrix: &SparseMatrix,
601        rhs: &Array1<Complex64>,
602    ) -> Result<Array1<Complex64>> {
603        let n = matrix.shape.0;
604        let mut x = Array1::zeros(n);
605        let mut r = rhs.clone();
606        let r0 = r.clone();
607
608        self.stats.method_used = "BiCGSTAB".to_string();
609
610        let mut p = r.clone();
611        let mut alpha = Complex64::new(1.0, 0.0);
612        let mut omega = Complex64::new(1.0, 0.0);
613        let mut rho_old = Complex64::new(1.0, 0.0);
614
615        for iteration in 0..self.config.max_iterations {
616            let rho: Complex64 = r0
617                .iter()
618                .zip(r.iter())
619                .map(|(&r0i, &ri)| r0i.conj() * ri)
620                .sum();
621
622            if rho.norm() < 1e-15 {
623                break;
624            }
625
626            let beta = (rho / rho_old) * (alpha / omega);
627
628            for i in 0..n {
629                p[i] = r[i] + beta * (p[i] - omega * matrix.matvec(&p)?[i]);
630            }
631
632            let ap = matrix.matvec(&p)?;
633            alpha = rho
634                / r0.iter()
635                    .zip(ap.iter())
636                    .map(|(&r0i, &api)| r0i.conj() * api)
637                    .sum::<Complex64>();
638
639            let mut s = r.clone();
640            for i in 0..n {
641                s[i] -= alpha * ap[i];
642            }
643
644            let residual_s = s.iter().map(|&si| si.norm_sqr()).sum::<f64>().sqrt();
645            if residual_s < self.config.tolerance {
646                for i in 0..n {
647                    x[i] += alpha * p[i];
648                }
649                self.stats.converged = true;
650                break;
651            }
652
653            let as_vec = matrix.matvec(&s)?;
654            omega = as_vec
655                .iter()
656                .zip(s.iter())
657                .map(|(&asi, &si)| asi.conj() * si)
658                .sum::<Complex64>()
659                / as_vec.iter().map(|&asi| asi.norm_sqr()).sum::<f64>();
660
661            for i in 0..n {
662                x[i] += alpha * p[i] + omega * s[i];
663                r[i] = s[i] - omega * as_vec[i];
664            }
665
666            self.stats.iterations = iteration + 1;
667            self.stats.residual_norm = r.iter().map(|&ri| ri.norm_sqr()).sum::<f64>().sqrt();
668            self.stats.matvec_count += 2;
669
670            if self.stats.residual_norm < self.config.tolerance {
671                self.stats.converged = true;
672                break;
673            }
674
675            rho_old = rho;
676        }
677
678        Ok(x)
679    }
680
681    /// Simulate sparse LU decomposition
682    fn simulate_sparse_lu(
683        &mut self,
684        matrix: &SparseMatrix,
685        rhs: &Array1<Complex64>,
686    ) -> Result<Array1<Complex64>> {
687        // Simplified sparse LU simulation
688        // In practice, this would use SciRS2's optimized sparse LU
689
690        self.stats.method_used = "SparseLU".to_string();
691
692        // For now, fall back to iterative method for large matrices
693        if matrix.shape.0 > 5000 {
694            return self.solve_gmres(matrix, rhs);
695        }
696
697        // Small matrix - convert to dense and solve
698        let dense_matrix = matrix.to_dense()?;
699        self.solve_dense_lu(&dense_matrix, rhs)
700    }
701
702    /// Dense LU solver (fallback)
703    fn solve_dense_lu(
704        &self,
705        matrix: &Array2<Complex64>,
706        rhs: &Array1<Complex64>,
707    ) -> Result<Array1<Complex64>> {
708        // Simplified dense LU implementation
709        // This would be replaced with proper LAPACK calls
710
711        let n = matrix.nrows();
712        if n != matrix.ncols() {
713            return Err(SimulatorError::InvalidInput(
714                "Matrix must be square".to_string(),
715            ));
716        }
717
718        // For demonstration, solve using Gaussian elimination
719        let mut a = matrix.clone();
720        let mut b = rhs.clone();
721
722        // Forward elimination
723        for k in 0..n - 1 {
724            for i in k + 1..n {
725                if a[[k, k]].norm() < 1e-15 {
726                    return Err(SimulatorError::NumericalError(
727                        "Singular matrix".to_string(),
728                    ));
729                }
730
731                let factor = a[[i, k]] / a[[k, k]];
732                let b_k = b[k]; // Store value to avoid borrow checker issue
733                for j in k..n {
734                    let a_kj = a[[k, j]]; // Store value to avoid borrow checker issue
735                    a[[i, j]] -= factor * a_kj;
736                }
737                b[i] -= factor * b_k;
738            }
739        }
740
741        // Back substitution
742        let mut x = Array1::zeros(n);
743        for i in (0..n).rev() {
744            let mut sum = Complex64::new(0.0, 0.0);
745            for j in i + 1..n {
746                sum += a[[i, j]] * x[j];
747            }
748            x[i] = (b[i] - sum) / a[[i, i]];
749        }
750
751        Ok(x)
752    }
753
754    /// Arnoldi eigenvalue solver
755    fn solve_arnoldi(
756        &mut self,
757        matrix: &SparseMatrix,
758        num_eigenvalues: usize,
759        _which: &str,
760    ) -> Result<(Vec<f64>, Array2<Complex64>, usize)> {
761        // Simplified Arnoldi implementation
762        let n = matrix.shape.0;
763        let m = (num_eigenvalues * 2).min(n);
764
765        let mut v = Array2::zeros((n, m + 1));
766        let mut h = Array2::zeros((m + 1, m));
767
768        // Random initial vector
769        for i in 0..n {
770            v[[i, 0]] = Complex64::new(fastrand::f64() - 0.5, fastrand::f64() - 0.5);
771        }
772
773        // Normalize
774        let norm = v
775            .column(0)
776            .iter()
777            .map(|&vi| vi.norm_sqr())
778            .sum::<f64>()
779            .sqrt();
780        for i in 0..n {
781            v[[i, 0]] /= norm;
782        }
783
784        // Arnoldi process
785        for j in 0..m {
786            let vj = v.column(j).to_owned();
787            let w = matrix.matvec(&vj)?;
788
789            // Modified Gram-Schmidt
790            for i in 0..=j {
791                let vi = v.column(i);
792                h[[i, j]] = vi
793                    .iter()
794                    .zip(w.iter())
795                    .map(|(&vi_val, &w_val)| vi_val.conj() * w_val)
796                    .sum();
797            }
798
799            let mut w_next = w.clone();
800            for i in 0..=j {
801                let vi = v.column(i);
802                for k in 0..n {
803                    w_next[k] -= h[[i, j]] * vi[k];
804                }
805            }
806
807            let h_norm = w_next.iter().map(|&wi| wi.norm_sqr()).sum::<f64>().sqrt();
808            h[[j + 1, j]] = Complex64::new(h_norm, 0.0);
809
810            if h_norm > 1e-12 && j + 1 < m {
811                for i in 0..n {
812                    v[[i, j + 1]] = w_next[i] / h_norm;
813                }
814            }
815        }
816
817        // Extract eigenvalues from Hessenberg matrix (simplified)
818        let mut eigenvalues = Vec::new();
819        for i in 0..m.min(num_eigenvalues) {
820            eigenvalues.push(h[[i, i]].re);
821        }
822        eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap());
823
824        let eigenvectors = Array2::zeros((n, num_eigenvalues));
825
826        self.stats.method_used = "Arnoldi".to_string();
827
828        Ok((eigenvalues, eigenvectors, num_eigenvalues.min(m)))
829    }
830
831    /// Lanczos eigenvalue solver (for Hermitian matrices)
832    fn solve_lanczos(
833        &mut self,
834        matrix: &SparseMatrix,
835        num_eigenvalues: usize,
836        _which: &str,
837    ) -> Result<(Vec<f64>, Array2<Complex64>, usize)> {
838        // Simplified Lanczos implementation
839        if !matrix.is_hermitian {
840            return self.solve_arnoldi(matrix, num_eigenvalues, _which);
841        }
842
843        let n = matrix.shape.0;
844        let m = (num_eigenvalues * 2).min(n);
845
846        let mut v = Array2::zeros((n, m + 1));
847        let mut alpha = vec![0.0; m];
848        let mut beta = vec![0.0; m];
849
850        // Random initial vector
851        for i in 0..n {
852            v[[i, 0]] = Complex64::new(fastrand::f64() - 0.5, 0.0);
853        }
854
855        // Normalize
856        let norm = v
857            .column(0)
858            .iter()
859            .map(|&vi| vi.norm_sqr())
860            .sum::<f64>()
861            .sqrt();
862        for i in 0..n {
863            v[[i, 0]] /= norm;
864        }
865
866        // Lanczos process
867        for j in 0..m {
868            let vj = v.column(j).to_owned();
869            let w = matrix.matvec(&vj)?;
870
871            alpha[j] = vj
872                .iter()
873                .zip(w.iter())
874                .map(|(&vji, &wi)| (vji.conj() * wi).re)
875                .sum();
876
877            let mut w_next = w.clone();
878            for i in 0..n {
879                w_next[i] -= alpha[j] * vj[i];
880                if j > 0 {
881                    w_next[i] -= beta[j - 1] * v[[i, j - 1]];
882                }
883            }
884
885            beta[j] = w_next.iter().map(|&wi| wi.norm_sqr()).sum::<f64>().sqrt();
886
887            if beta[j] > 1e-12 && j + 1 < m {
888                for i in 0..n {
889                    v[[i, j + 1]] = w_next[i] / beta[j];
890                }
891            }
892        }
893
894        // Solve tridiagonal eigenvalue problem (simplified)
895        let mut eigenvalues = alpha[..m.min(num_eigenvalues)].to_vec();
896        eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap());
897
898        let eigenvectors = Array2::zeros((n, num_eigenvalues));
899
900        self.stats.method_used = "Lanczos".to_string();
901
902        Ok((eigenvalues, eigenvectors, num_eigenvalues.min(m)))
903    }
904
905    /// LOBPCG eigenvalue solver
906    fn solve_lobpcg(
907        &mut self,
908        matrix: &SparseMatrix,
909        num_eigenvalues: usize,
910        _which: &str,
911    ) -> Result<(Vec<f64>, Array2<Complex64>, usize)> {
912        // Simplified LOBPCG placeholder
913        // Full implementation would be much more complex
914        self.solve_lanczos(matrix, num_eigenvalues, _which)
915    }
916
917    /// SciRS2 automatic eigenvalue solver
918    fn solve_eigen_scirs2_auto(
919        &mut self,
920        matrix: &SparseMatrix,
921        num_eigenvalues: usize,
922        which: &str,
923    ) -> Result<(Vec<f64>, Array2<Complex64>, usize)> {
924        if let Some(_backend) = &mut self.backend {
925            // SciRS2 would choose the best eigenvalue solver
926            if matrix.is_hermitian {
927                self.solve_lanczos(matrix, num_eigenvalues, which)
928            } else {
929                self.solve_arnoldi(matrix, num_eigenvalues, which)
930            }
931        } else {
932            // Fallback
933            if matrix.is_hermitian {
934                self.solve_lanczos(matrix, num_eigenvalues, which)
935            } else {
936                self.solve_arnoldi(matrix, num_eigenvalues, which)
937            }
938        }
939    }
940
941    /// Get execution statistics
942    pub fn get_stats(&self) -> &SparseSolverStats {
943        &self.stats
944    }
945
946    /// Reset statistics
947    pub fn reset_stats(&mut self) {
948        self.stats = SparseSolverStats::default();
949    }
950
951    /// Set configuration
952    pub fn set_config(&mut self, config: SparseSolverConfig) {
953        self.config = config;
954    }
955}
956
957/// Utilities for creating sparse matrices from quantum problems
958pub struct SparseMatrixUtils;
959
960impl SparseMatrixUtils {
961    /// Create sparse Hamiltonian from Pauli strings
962    pub fn hamiltonian_from_pauli_strings(
963        num_qubits: usize,
964        pauli_strings: &[(String, f64)],
965    ) -> Result<SparseMatrix> {
966        let dim = 1 << num_qubits;
967        let mut builder = SparseMatrixBuilder::new(dim, dim);
968
969        for (pauli_str, coeff) in pauli_strings {
970            if pauli_str.len() != num_qubits {
971                return Err(SimulatorError::InvalidInput(format!(
972                    "Pauli string length {} doesn't match num_qubits {}",
973                    pauli_str.len(),
974                    num_qubits
975                )));
976            }
977
978            // Build Pauli string matrix and add to Hamiltonian
979            for i in 0..dim {
980                let mut amplitude = Complex64::new(*coeff, 0.0);
981                let mut target_state = i;
982
983                for (qubit, pauli_char) in pauli_str.chars().enumerate() {
984                    let bit_pos = num_qubits - 1 - qubit;
985                    let bit_val = (i >> bit_pos) & 1;
986
987                    match pauli_char {
988                        'I' => {} // Identity - no change
989                        'X' => {
990                            target_state ^= 1 << bit_pos; // Flip bit
991                        }
992                        'Y' => {
993                            target_state ^= 1 << bit_pos; // Flip bit
994                            if bit_val == 0 {
995                                amplitude *= Complex64::new(0.0, 1.0); // i
996                            } else {
997                                amplitude *= Complex64::new(0.0, -1.0); // -i
998                            }
999                        }
1000                        'Z' => {
1001                            if bit_val == 1 {
1002                                amplitude *= Complex64::new(-1.0, 0.0); // -1
1003                            }
1004                        }
1005                        _ => {
1006                            return Err(SimulatorError::InvalidInput(format!(
1007                                "Invalid Pauli character: {}",
1008                                pauli_char
1009                            )));
1010                        }
1011                    }
1012                }
1013
1014                if amplitude.norm() > 1e-15 {
1015                    builder.add(i, target_state, amplitude);
1016                }
1017            }
1018        }
1019
1020        let csr_matrix = builder.build();
1021        let nnz = csr_matrix.values.len();
1022        let mut matrix = SparseMatrix {
1023            shape: (dim, dim),
1024            format: SparseFormat::CSR,
1025            row_ptr: csr_matrix.row_ptr,
1026            col_indices: csr_matrix.col_indices,
1027            values: csr_matrix.values,
1028            nnz,
1029            is_hermitian: true,
1030            is_positive_definite: false,
1031        };
1032        matrix.is_hermitian = true;
1033
1034        Ok(matrix)
1035    }
1036
1037    /// Create sparse matrix from dense matrix
1038    pub fn from_dense(dense: &Array2<Complex64>, threshold: f64) -> SparseMatrix {
1039        let (rows, cols) = dense.dim();
1040        let mut row_ptr = vec![0; rows + 1];
1041        let mut col_indices = Vec::new();
1042        let mut values = Vec::new();
1043
1044        let mut nnz = 0;
1045        for i in 0..rows {
1046            row_ptr[i] = nnz;
1047            for j in 0..cols {
1048                if dense[[i, j]].norm() > threshold {
1049                    col_indices.push(j);
1050                    values.push(dense[[i, j]]);
1051                    nnz += 1;
1052                }
1053            }
1054        }
1055        row_ptr[rows] = nnz;
1056
1057        SparseMatrix::from_csr((rows, cols), row_ptr, col_indices, values)
1058    }
1059
1060    /// Create random sparse matrix for testing
1061    pub fn random_sparse(n: usize, density: f64, hermitian: bool) -> SparseMatrix {
1062        let total_elements = n * n;
1063        let nnz_target = (total_elements as f64 * density) as usize;
1064
1065        let mut row_ptr = vec![0; n + 1];
1066        let mut col_indices = Vec::new();
1067        let mut values = Vec::new();
1068
1069        let mut added_elements = std::collections::HashSet::new();
1070        let mut nnz = 0;
1071
1072        for _ in 0..nnz_target {
1073            let i = fastrand::usize(0..n);
1074            let j = if hermitian && fastrand::f64() < 0.5 && i < n - 1 {
1075                fastrand::usize(i..n) // Upper triangular for Hermitian
1076            } else {
1077                fastrand::usize(0..n)
1078            };
1079
1080            if !added_elements.contains(&(i, j)) {
1081                added_elements.insert((i, j));
1082
1083                let real = fastrand::f64() - 0.5;
1084                let imag = if hermitian && i == j {
1085                    0.0
1086                } else {
1087                    fastrand::f64() - 0.5
1088                };
1089                let value = Complex64::new(real, imag);
1090
1091                // Add to appropriate row
1092                let pos = col_indices
1093                    .iter()
1094                    .position(|&col| col > j)
1095                    .unwrap_or(col_indices.len());
1096                col_indices.insert(pos, j);
1097                values.insert(pos, value);
1098                nnz += 1;
1099
1100                // Update row pointers
1101                for row in (i + 1)..=n {
1102                    row_ptr[row] += 1;
1103                }
1104
1105                // Add symmetric element for Hermitian matrices
1106                if hermitian && i != j {
1107                    added_elements.insert((j, i));
1108
1109                    let sym_value = value.conj();
1110                    let sym_pos = col_indices
1111                        .iter()
1112                        .position(|&col| col > i)
1113                        .unwrap_or(col_indices.len());
1114                    col_indices.insert(sym_pos, i);
1115                    values.insert(sym_pos, sym_value);
1116                    nnz += 1;
1117
1118                    for row in (j + 1)..=n {
1119                        row_ptr[row] += 1;
1120                    }
1121                }
1122            }
1123        }
1124
1125        let mut matrix = SparseMatrix::from_csr((n, n), row_ptr, col_indices, values);
1126        matrix.is_hermitian = hermitian;
1127        matrix
1128    }
1129}
1130
1131/// Benchmark sparse solver methods
1132pub fn benchmark_sparse_solvers(
1133    matrix_size: usize,
1134    density: f64,
1135) -> Result<HashMap<String, SparseSolverStats>> {
1136    let mut results = HashMap::new();
1137
1138    // Create test matrix and RHS
1139    let matrix = SparseMatrixUtils::random_sparse(matrix_size, density, true);
1140    let mut rhs = Array1::zeros(matrix_size);
1141    for i in 0..matrix_size {
1142        rhs[i] = Complex64::new(fastrand::f64(), 0.0);
1143    }
1144
1145    // Test different solver methods
1146    let methods = vec![
1147        ("CG", SparseSolverMethod::CG),
1148        ("GMRES", SparseSolverMethod::GMRES),
1149        ("BiCGSTAB", SparseSolverMethod::BiCGSTAB),
1150        ("SciRS2Auto", SparseSolverMethod::SciRS2Auto),
1151    ];
1152
1153    for (name, method) in methods {
1154        let config = SparseSolverConfig {
1155            method,
1156            tolerance: 1e-10,
1157            max_iterations: 1000,
1158            ..Default::default()
1159        };
1160
1161        let mut solver = SciRS2SparseSolver::new(config.clone())?;
1162        if method == SparseSolverMethod::SciRS2Auto {
1163            solver = solver
1164                .with_backend()
1165                .unwrap_or_else(|_| SciRS2SparseSolver::new(config).unwrap());
1166        }
1167
1168        let _solution = solver.solve_linear_system(&matrix, &rhs)?;
1169        results.insert(name.to_string(), solver.get_stats().clone());
1170    }
1171
1172    Ok(results)
1173}
1174
1175/// Compare solver accuracy
1176pub fn compare_sparse_solver_accuracy(matrix_size: usize) -> Result<HashMap<String, f64>> {
1177    let mut errors = HashMap::new();
1178
1179    // Create test problem with known solution
1180    let matrix = SparseMatrix::identity(matrix_size);
1181    let solution = Array1::from_vec(
1182        (0..matrix_size)
1183            .map(|i| Complex64::new(i as f64, 0.0))
1184            .collect(),
1185    );
1186    let rhs = matrix.matvec(&solution)?;
1187
1188    let methods = vec![
1189        ("CG", SparseSolverMethod::CG),
1190        ("GMRES", SparseSolverMethod::GMRES),
1191        ("BiCGSTAB", SparseSolverMethod::BiCGSTAB),
1192    ];
1193
1194    for (name, method) in methods {
1195        let config = SparseSolverConfig {
1196            method,
1197            tolerance: 1e-12,
1198            max_iterations: 1000,
1199            ..Default::default()
1200        };
1201
1202        let mut solver = SciRS2SparseSolver::new(config)?;
1203        let computed_solution = solver.solve_linear_system(&matrix, &rhs)?;
1204
1205        // Calculate error
1206        let error = solution
1207            .iter()
1208            .zip(computed_solution.iter())
1209            .map(|(exact, computed)| (exact - computed).norm())
1210            .sum::<f64>()
1211            / matrix_size as f64;
1212
1213        errors.insert(name.to_string(), error);
1214    }
1215
1216    Ok(errors)
1217}
1218
1219#[cfg(test)]
1220mod tests {
1221    use super::*;
1222    use approx::assert_abs_diff_eq;
1223
1224    #[test]
1225    fn test_sparse_matrix_creation() {
1226        let matrix = SparseMatrix::identity(5);
1227        assert_eq!(matrix.shape, (5, 5));
1228        assert_eq!(matrix.nnz, 5);
1229        assert!(matrix.is_hermitian);
1230        assert!(matrix.is_positive_definite);
1231    }
1232
1233    #[test]
1234    fn test_sparse_matrix_matvec() {
1235        let matrix = SparseMatrix::identity(3);
1236        let x = Array1::from_vec(vec![
1237            Complex64::new(1.0, 0.0),
1238            Complex64::new(2.0, 0.0),
1239            Complex64::new(3.0, 0.0),
1240        ]);
1241
1242        let y = matrix.matvec(&x).unwrap();
1243
1244        for i in 0..3 {
1245            assert_abs_diff_eq!(y[i].re, x[i].re, epsilon = 1e-10);
1246            assert_abs_diff_eq!(y[i].im, x[i].im, epsilon = 1e-10);
1247        }
1248    }
1249
1250    #[test]
1251    fn test_sparse_solver_creation() {
1252        let config = SparseSolverConfig::default();
1253        let solver = SciRS2SparseSolver::new(config).unwrap();
1254        assert!(solver.backend.is_none());
1255    }
1256
1257    #[test]
1258    fn test_identity_solve() {
1259        let matrix = SparseMatrix::identity(5);
1260        let rhs = Array1::from_vec((0..5).map(|i| Complex64::new(i as f64, 0.0)).collect());
1261
1262        let config = SparseSolverConfig {
1263            method: SparseSolverMethod::CG,
1264            tolerance: 1e-10,
1265            max_iterations: 100,
1266            ..Default::default()
1267        };
1268
1269        let mut solver = SciRS2SparseSolver::new(config).unwrap();
1270        let solution = solver.solve_linear_system(&matrix, &rhs).unwrap();
1271
1272        for i in 0..5 {
1273            assert_abs_diff_eq!(solution[i].re, rhs[i].re, epsilon = 1e-8);
1274            assert_abs_diff_eq!(solution[i].im, rhs[i].im, epsilon = 1e-8);
1275        }
1276    }
1277
1278    #[test]
1279    fn test_pauli_hamiltonian_creation() {
1280        let pauli_strings = vec![("ZZ".to_string(), 1.0), ("XX".to_string(), 0.5)];
1281
1282        let matrix = SparseMatrixUtils::hamiltonian_from_pauli_strings(2, &pauli_strings).unwrap();
1283
1284        assert_eq!(matrix.shape, (4, 4));
1285        assert!(matrix.is_hermitian);
1286        assert!(matrix.nnz > 0);
1287    }
1288
1289    #[test]
1290    fn test_random_sparse_matrix() {
1291        let matrix = SparseMatrixUtils::random_sparse(10, 0.1, true);
1292
1293        assert_eq!(matrix.shape, (10, 10));
1294        assert!(matrix.is_hermitian);
1295        assert!(matrix.density() <= 0.25); // Allow more margin due to randomness and Hermitian constraint
1296    }
1297
1298    #[test]
1299    fn test_dense_conversion() {
1300        let matrix = SparseMatrix::identity(3);
1301        let dense = matrix.to_dense().unwrap();
1302
1303        assert_eq!(dense.shape(), [3, 3]);
1304        for i in 0..3 {
1305            for j in 0..3 {
1306                if i == j {
1307                    assert_abs_diff_eq!(dense[[i, j]].re, 1.0, epsilon = 1e-10);
1308                } else {
1309                    assert_abs_diff_eq!(dense[[i, j]].norm(), 0.0, epsilon = 1e-10);
1310                }
1311            }
1312        }
1313    }
1314}