1use ndarray::{s, Array1, Array2, ArrayView1, ArrayView2, ArrayViewMut1, ArrayViewMut2};
15use ndarray_linalg::Norm;
16#[cfg(feature = "advanced_math")]
17use ndrustfft::FftHandler;
18use num_complex::Complex64;
19use std::collections::HashMap;
20use std::f64::consts::PI;
21use std::sync::{Arc, Mutex};
22
23use quantrs2_core::prelude::QuantRS2Error as SciRS2Error;
25use scirs2_core::parallel_ops::*;
26use scirs2_core::simd_ops::SimdUnifiedOps;
27
28use rayon::prelude::*;
30
31use crate::error::{Result, SimulatorError};
32
33#[derive(Debug, Clone)]
35pub struct SciRS2Matrix {
36 data: Array2<Complex64>,
37 simd_aligned: bool,
39}
40
41impl SciRS2Matrix {
42 pub fn zeros(shape: (usize, usize), _allocator: &SciRS2MemoryAllocator) -> Result<Self> {
44 Ok(Self {
45 data: Array2::zeros(shape),
46 simd_aligned: true,
47 })
48 }
49
50 pub fn from_array2(array: Array2<Complex64>) -> Self {
52 Self {
53 data: array,
54 simd_aligned: false,
55 }
56 }
57
58 pub fn shape(&self) -> (usize, usize) {
60 self.data.dim()
61 }
62
63 pub fn rows(&self) -> usize {
65 self.data.nrows()
66 }
67
68 pub fn cols(&self) -> usize {
70 self.data.ncols()
71 }
72
73 pub fn data_view(&self) -> ArrayView2<'_, Complex64> {
75 self.data.view()
76 }
77
78 pub fn data_view_mut(&mut self) -> ArrayViewMut2<'_, Complex64> {
80 self.data.view_mut()
81 }
82}
83
84#[derive(Debug, Clone)]
86pub struct SciRS2Vector {
87 data: Array1<Complex64>,
88 simd_aligned: bool,
90}
91
92impl SciRS2Vector {
93 pub fn zeros(len: usize, _allocator: &SciRS2MemoryAllocator) -> Result<Self> {
95 Ok(Self {
96 data: Array1::zeros(len),
97 simd_aligned: true,
98 })
99 }
100
101 pub fn from_array1(array: Array1<Complex64>) -> Self {
103 Self {
104 data: array,
105 simd_aligned: false,
106 }
107 }
108
109 pub fn len(&self) -> usize {
111 self.data.len()
112 }
113
114 pub fn is_empty(&self) -> bool {
116 self.data.is_empty()
117 }
118
119 pub fn data_view(&self) -> ArrayView1<'_, Complex64> {
121 self.data.view()
122 }
123
124 pub fn data_view_mut(&mut self) -> ArrayViewMut1<'_, Complex64> {
126 self.data.view_mut()
127 }
128
129 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
131 Ok(self.data.clone())
132 }
133}
134
135#[derive(Debug, Clone)]
137pub struct SciRS2SimdConfig {
138 pub force_instruction_set: Option<String>,
140 pub override_simd_lanes: Option<usize>,
142 pub enable_aggressive_simd: bool,
144 pub numa_aware_allocation: bool,
146}
147
148impl Default for SciRS2SimdConfig {
149 fn default() -> Self {
150 Self {
151 force_instruction_set: None,
152 override_simd_lanes: None,
153 enable_aggressive_simd: true,
154 numa_aware_allocation: true,
155 }
156 }
157}
158
159#[derive(Debug, Clone)]
161pub struct SciRS2SimdContext {
162 pub simd_lanes: usize,
164 pub supports_complex_simd: bool,
166 pub instruction_set: String,
168 pub max_vector_width: usize,
170}
171
172impl SciRS2SimdContext {
173 pub fn detect_capabilities() -> Self {
175 #[cfg(target_arch = "x86_64")]
176 {
177 if is_x86_feature_detected!("avx512f") {
178 Self {
179 simd_lanes: 16,
180 supports_complex_simd: true,
181 instruction_set: "AVX-512".to_string(),
182 max_vector_width: 64,
183 }
184 } else if is_x86_feature_detected!("avx2") {
185 Self {
186 simd_lanes: 8,
187 supports_complex_simd: true,
188 instruction_set: "AVX2".to_string(),
189 max_vector_width: 32,
190 }
191 } else if is_x86_feature_detected!("sse4.1") {
192 Self {
193 simd_lanes: 4,
194 supports_complex_simd: true,
195 instruction_set: "SSE4.1".to_string(),
196 max_vector_width: 16,
197 }
198 } else {
199 Self::fallback()
200 }
201 }
202 #[cfg(target_arch = "aarch64")]
203 {
204 Self {
205 simd_lanes: 4,
206 supports_complex_simd: true,
207 instruction_set: "NEON".to_string(),
208 max_vector_width: 16,
209 }
210 }
211 #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
212 {
213 Self::fallback()
214 }
215 }
216
217 pub fn from_config(config: &SciRS2SimdConfig) -> Result<Self> {
219 let mut context = Self::detect_capabilities();
220
221 if let Some(ref instruction_set) = config.force_instruction_set {
222 context.instruction_set = instruction_set.clone();
223 }
224
225 if let Some(simd_lanes) = config.override_simd_lanes {
226 context.simd_lanes = simd_lanes;
227 }
228
229 Ok(context)
230 }
231
232 fn fallback() -> Self {
233 Self {
234 simd_lanes: 1,
235 supports_complex_simd: false,
236 instruction_set: "Scalar".to_string(),
237 max_vector_width: 8,
238 }
239 }
240}
241
242impl Default for SciRS2SimdContext {
243 fn default() -> Self {
244 Self::detect_capabilities()
245 }
246}
247
248#[derive(Debug)]
250pub struct SciRS2MemoryAllocator {
251 pub total_allocated: usize,
253 pub alignment: usize,
255 allocation_count: usize,
257}
258
259unsafe impl Send for SciRS2MemoryAllocator {}
261unsafe impl Sync for SciRS2MemoryAllocator {}
262
263impl Default for SciRS2MemoryAllocator {
264 fn default() -> Self {
265 Self {
266 total_allocated: 0,
267 alignment: 64, allocation_count: 0,
269 }
270 }
271}
272
273impl SciRS2MemoryAllocator {
274 pub fn new() -> Self {
276 Self::default()
277 }
278}
279
280#[derive(Debug)]
282pub struct SciRS2VectorizedFFT {
283 plans: HashMap<usize, FFTPlan>,
285 optimization_level: OptimizationLevel,
287}
288
289#[derive(Debug, Clone)]
290pub struct FFTPlan {
291 pub size: usize,
293 pub twiddle_factors: Vec<Complex64>,
295 pub vectorization_strategy: VectorizationStrategy,
297}
298
299#[derive(Debug, Clone, Copy)]
300pub enum VectorizationStrategy {
301 SimdComplexSeparate,
303 SimdComplexInterleaved,
305 Adaptive,
307}
308
309#[derive(Debug, Clone, Copy)]
310pub enum OptimizationLevel {
311 Basic,
313 Aggressive,
315 Maximum,
317}
318
319impl Default for SciRS2VectorizedFFT {
320 fn default() -> Self {
321 Self {
322 plans: HashMap::new(),
323 optimization_level: OptimizationLevel::Aggressive,
324 }
325 }
326}
327
328impl SciRS2VectorizedFFT {
329 pub fn forward(&self, input: &SciRS2Vector) -> Result<SciRS2Vector> {
331 let data = input.data_view().to_owned();
333
334 #[cfg(feature = "advanced_math")]
336 {
337 let mut handler = FftHandler::<f64>::new(data.len());
338 let mut output = data.clone();
339 ndrustfft::ndfft(&data, &mut output, &mut handler, 0);
340 Ok(SciRS2Vector::from_array1(output))
341 }
342
343 #[cfg(not(feature = "advanced_math"))]
344 {
345 let n = data.len();
347 let mut output = Array1::zeros(n);
348 for k in 0..n {
349 let mut sum = Complex64::new(0.0, 0.0);
350 for j in 0..n {
351 let angle = -2.0 * PI * (k * j) as f64 / n as f64;
352 let twiddle = Complex64::new(angle.cos(), angle.sin());
353 sum += data[j] * twiddle;
354 }
355 output[k] = sum;
356 }
357 Ok(SciRS2Vector::from_array1(output))
358 }
359 }
360
361 pub fn inverse(&self, input: &SciRS2Vector) -> Result<SciRS2Vector> {
363 let data = input.data_view().to_owned();
365
366 #[cfg(feature = "advanced_math")]
368 {
369 let mut handler = FftHandler::<f64>::new(data.len());
370 let mut output = data.clone();
371 ndrustfft::ndifft(&data, &mut output, &mut handler, 0);
372 Ok(SciRS2Vector::from_array1(output))
373 }
374
375 #[cfg(not(feature = "advanced_math"))]
376 {
377 let n = data.len();
379 let mut output = Array1::zeros(n);
380 let scale = 1.0 / n as f64;
381 for k in 0..n {
382 let mut sum = Complex64::new(0.0, 0.0);
383 for j in 0..n {
384 let angle = 2.0 * PI * (k * j) as f64 / n as f64;
385 let twiddle = Complex64::new(angle.cos(), angle.sin());
386 sum += data[j] * twiddle;
387 }
388 output[k] = sum * scale;
389 }
390 Ok(SciRS2Vector::from_array1(output))
391 }
392 }
393}
394
395#[derive(Debug)]
397pub struct SciRS2ParallelContext {
398 pub num_threads: usize,
400 pub thread_pool: rayon::ThreadPool,
402 pub numa_aware: bool,
404}
405
406impl Default for SciRS2ParallelContext {
407 fn default() -> Self {
408 let num_threads = rayon::current_num_threads();
409 let thread_pool = rayon::ThreadPoolBuilder::new()
410 .num_threads(num_threads)
411 .build()
412 .unwrap_or_else(|_| rayon::ThreadPoolBuilder::new().build().unwrap());
413
414 Self {
415 num_threads,
416 thread_pool,
417 numa_aware: true,
418 }
419 }
420}
421
422#[derive(Debug, Default, Clone)]
424pub struct BackendStats {
425 pub simd_vector_ops: usize,
427 pub simd_matrix_ops: usize,
429 pub complex_simd_ops: usize,
431 pub simd_time_ns: u64,
433 pub parallel_time_ns: u64,
435 pub memory_usage_bytes: usize,
437 pub peak_simd_throughput: f64,
439 pub simd_efficiency: f64,
441 pub vectorized_fft_ops: usize,
443 pub sparse_simd_ops: usize,
445 pub matrix_ops: usize,
447 pub lapack_time_ms: f64,
449 pub cache_hit_rate: f64,
451}
452
453#[derive(Debug)]
455pub struct SciRS2Backend {
456 pub available: bool,
458
459 pub stats: Arc<Mutex<BackendStats>>,
461
462 pub simd_context: SciRS2SimdContext,
464
465 pub memory_allocator: SciRS2MemoryAllocator,
467
468 pub fft_engine: SciRS2VectorizedFFT,
470
471 pub parallel_context: SciRS2ParallelContext,
473}
474
475impl SciRS2Backend {
476 pub fn new() -> Self {
478 let simd_context = SciRS2SimdContext::detect_capabilities();
479 let memory_allocator = SciRS2MemoryAllocator::default();
480 let fft_engine = SciRS2VectorizedFFT::default();
481 let parallel_context = SciRS2ParallelContext::default();
482
483 Self {
484 available: simd_context.supports_complex_simd,
485 stats: Arc::new(Mutex::new(BackendStats::default())),
486 simd_context,
487 memory_allocator,
488 fft_engine,
489 parallel_context,
490 }
491 }
492
493 pub fn with_config(simd_config: SciRS2SimdConfig) -> Result<Self> {
495 let mut backend = Self::new();
496 backend.simd_context = SciRS2SimdContext::from_config(&simd_config)?;
497 Ok(backend)
498 }
499
500 pub fn is_available(&self) -> bool {
502 self.available && self.simd_context.supports_complex_simd
503 }
504
505 pub fn get_simd_info(&self) -> &SciRS2SimdContext {
507 &self.simd_context
508 }
509
510 pub fn get_stats(&self) -> BackendStats {
512 self.stats.lock().unwrap().clone()
513 }
514
515 pub fn reset_stats(&self) {
517 *self.stats.lock().unwrap() = BackendStats::default();
518 }
519
520 pub fn matrix_multiply(&self, a: &SciRS2Matrix, b: &SciRS2Matrix) -> Result<SciRS2Matrix> {
522 let start_time = std::time::Instant::now();
523
524 if a.cols() != b.rows() {
526 return Err(SimulatorError::DimensionMismatch(format!(
527 "Cannot multiply {}x{} matrix with {}x{} matrix",
528 a.rows(),
529 a.cols(),
530 b.rows(),
531 b.cols()
532 )));
533 }
534
535 let result_shape = (a.rows(), b.cols());
536 let mut result = SciRS2Matrix::zeros(result_shape, &self.memory_allocator)?;
537
538 self.simd_gemm_complex(&a.data_view(), &b.data_view(), &mut result.data_view_mut())?;
540
541 {
543 let mut stats = self.stats.lock().unwrap();
544 stats.simd_matrix_ops += 1;
545 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
546 }
547
548 Ok(result)
549 }
550
551 pub fn matrix_vector_multiply(
553 &self,
554 a: &SciRS2Matrix,
555 x: &SciRS2Vector,
556 ) -> Result<SciRS2Vector> {
557 let start_time = std::time::Instant::now();
558
559 if a.cols() != x.len() {
561 return Err(SimulatorError::DimensionMismatch(format!(
562 "Cannot multiply {}x{} matrix with vector of length {}",
563 a.rows(),
564 a.cols(),
565 x.len()
566 )));
567 }
568
569 let mut result = SciRS2Vector::zeros(a.rows(), &self.memory_allocator)?;
570
571 self.simd_gemv_complex(&a.data_view(), &x.data_view(), &mut result.data_view_mut())?;
573
574 {
576 let mut stats = self.stats.lock().unwrap();
577 stats.simd_vector_ops += 1;
578 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
579 }
580
581 Ok(result)
582 }
583
584 fn simd_gemm_complex(
586 &self,
587 a: &ArrayView2<Complex64>,
588 b: &ArrayView2<Complex64>,
589 c: &mut ArrayViewMut2<Complex64>,
590 ) -> Result<()> {
591 let (m, k) = a.dim();
592 let (k2, n) = b.dim();
593
594 assert_eq!(k, k2, "Inner dimensions must match");
595 assert_eq!(c.dim(), (m, n), "Output dimensions must match");
596
597 let a_real: Vec<f64> = a.iter().map(|z| z.re).collect();
599 let a_imag: Vec<f64> = a.iter().map(|z| z.im).collect();
600 let b_real: Vec<f64> = b.iter().map(|z| z.re).collect();
601 let b_imag: Vec<f64> = b.iter().map(|z| z.im).collect();
602
603 for i in 0..m {
608 for j in 0..n {
609 let mut real_sum = 0.0;
610 let mut imag_sum = 0.0;
611
612 let a_row_start = i * k;
614
615 if k >= self.simd_context.simd_lanes {
616 for l in 0..k {
618 let b_idx = l * n + j;
619 let ar = a_real[a_row_start + l];
620 let ai = a_imag[a_row_start + l];
621 let br = b_real[b_idx];
622 let bi = b_imag[b_idx];
623
624 real_sum += ar * br - ai * bi;
625 imag_sum += ar * bi + ai * br;
626 }
627 } else {
628 for l in 0..k {
630 let b_idx = l * n + j;
631 let ar = a_real[a_row_start + l];
632 let ai = a_imag[a_row_start + l];
633 let br = b_real[b_idx];
634 let bi = b_imag[b_idx];
635
636 real_sum += ar * br - ai * bi;
637 imag_sum += ar * bi + ai * br;
638 }
639 }
640
641 c[[i, j]] = Complex64::new(real_sum, imag_sum);
642 }
643 }
644
645 Ok(())
646 }
647
648 fn simd_gemv_complex(
650 &self,
651 a: &ArrayView2<Complex64>,
652 x: &ArrayView1<Complex64>,
653 y: &mut ArrayViewMut1<Complex64>,
654 ) -> Result<()> {
655 let (m, n) = a.dim();
656 assert_eq!(x.len(), n, "Vector length must match matrix columns");
657 assert_eq!(y.len(), m, "Output vector length must match matrix rows");
658
659 let a_real: Vec<f64> = a.iter().map(|z| z.re).collect();
661 let a_imag: Vec<f64> = a.iter().map(|z| z.im).collect();
662 let x_real: Vec<f64> = x.iter().map(|z| z.re).collect();
663 let x_imag: Vec<f64> = x.iter().map(|z| z.im).collect();
664
665 for i in 0..m {
667 let mut real_sum = 0.0;
668 let mut imag_sum = 0.0;
669
670 let row_start = i * n;
671
672 if n >= self.simd_context.simd_lanes {
673 let chunks = n / self.simd_context.simd_lanes;
675
676 for chunk in 0..chunks {
677 let start_idx = chunk * self.simd_context.simd_lanes;
678 let end_idx = start_idx + self.simd_context.simd_lanes;
679
680 for j in start_idx..end_idx {
681 let a_idx = row_start + j;
682 let ar = a_real[a_idx];
683 let ai = a_imag[a_idx];
684 let xr = x_real[j];
685 let xi = x_imag[j];
686
687 real_sum += ar * xr - ai * xi;
688 imag_sum += ar * xi + ai * xr;
689 }
690 }
691
692 for j in (chunks * self.simd_context.simd_lanes)..n {
694 let a_idx = row_start + j;
695 let ar = a_real[a_idx];
696 let ai = a_imag[a_idx];
697 let xr = x_real[j];
698 let xi = x_imag[j];
699
700 real_sum += ar * xr - ai * xi;
701 imag_sum += ar * xi + ai * xr;
702 }
703 } else {
704 for j in 0..n {
706 let a_idx = row_start + j;
707 let ar = a_real[a_idx];
708 let ai = a_imag[a_idx];
709 let xr = x_real[j];
710 let xi = x_imag[j];
711
712 real_sum += ar * xr - ai * xi;
713 imag_sum += ar * xi + ai * xr;
714 }
715 }
716
717 y[i] = Complex64::new(real_sum, imag_sum);
718 }
719
720 Ok(())
721 }
722
723 #[cfg(feature = "advanced_math")]
725 pub fn svd(&mut self, matrix: &Matrix) -> Result<SvdResult> {
726 let start_time = std::time::Instant::now();
727
728 let result = LAPACK::svd(matrix)?;
729
730 let mut stats = self.stats.lock().unwrap();
731 stats.simd_matrix_ops += 1;
732 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
733
734 Ok(result)
735 }
736
737 #[cfg(feature = "advanced_math")]
739 pub fn eigendecomposition(&mut self, matrix: &Matrix) -> Result<EigResult> {
740 let start_time = std::time::Instant::now();
741
742 let result = LAPACK::eig(matrix)?;
743
744 let mut stats = self.stats.lock().unwrap();
745 stats.simd_matrix_ops += 1;
746 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
747
748 Ok(result)
749 }
750}
751
752impl Default for SciRS2Backend {
753 fn default() -> Self {
754 Self::new()
755 }
756}
757
758#[cfg(feature = "advanced_math")]
760#[derive(Debug)]
761pub struct MemoryPool {
762 _placeholder: (),
764}
765
766#[cfg(feature = "advanced_math")]
767impl MemoryPool {
768 pub fn new() -> Self {
769 Self {
770 _placeholder: (),
772 }
773 }
774}
775
776#[cfg(not(feature = "advanced_math"))]
777#[derive(Debug)]
778pub struct MemoryPool;
779
780#[cfg(not(feature = "advanced_math"))]
781impl MemoryPool {
782 pub fn new() -> Self {
783 Self
784 }
785}
786
787#[cfg(feature = "advanced_math")]
789#[derive(Debug)]
790pub struct FftEngine;
791
792#[cfg(feature = "advanced_math")]
793impl FftEngine {
794 pub fn new() -> Self {
795 Self
796 }
797
798 pub fn forward(&self, input: &Vector) -> Result<Vector> {
799 use ndrustfft::{ndfft, FftHandler};
801
802 let array = input.to_array1()?;
803 let mut handler = FftHandler::new(array.len());
804 let mut fft_result = array.clone();
805
806 ndfft(&array, &mut fft_result, &mut handler, 0);
807
808 Vector::from_array1(&fft_result.view(), &MemoryPool::new())
809 }
810
811 pub fn inverse(&self, input: &Vector) -> Result<Vector> {
812 use ndrustfft::{ndifft, FftHandler};
814
815 let array = input.to_array1()?;
816 let mut handler = FftHandler::new(array.len());
817 let mut ifft_result = array.clone();
818
819 ndifft(&array, &mut ifft_result, &mut handler, 0);
820
821 Vector::from_array1(&ifft_result.view(), &MemoryPool::new())
822 }
823}
824
825#[cfg(not(feature = "advanced_math"))]
826#[derive(Debug)]
827pub struct FftEngine;
828
829#[cfg(not(feature = "advanced_math"))]
830impl FftEngine {
831 pub fn new() -> Self {
832 Self
833 }
834
835 pub fn forward(&self, _input: &Vector) -> Result<Vector> {
836 Err(SimulatorError::UnsupportedOperation(
837 "SciRS2 integration requires 'advanced_math' feature".to_string(),
838 ))
839 }
840
841 pub fn inverse(&self, _input: &Vector) -> Result<Vector> {
842 Err(SimulatorError::UnsupportedOperation(
843 "SciRS2 integration requires 'advanced_math' feature".to_string(),
844 ))
845 }
846}
847
848#[cfg(feature = "advanced_math")]
850#[derive(Debug, Clone)]
851pub struct Matrix {
852 data: Array2<Complex64>,
853}
854
855#[cfg(feature = "advanced_math")]
856impl Matrix {
857 pub fn from_array2(array: &ArrayView2<Complex64>, _pool: &MemoryPool) -> Result<Self> {
858 Ok(Self {
859 data: array.to_owned(),
860 })
861 }
862
863 pub fn zeros(shape: (usize, usize), _pool: &MemoryPool) -> Result<Self> {
864 Ok(Self {
865 data: Array2::zeros(shape),
866 })
867 }
868
869 pub fn to_array2(&self, result: &mut Array2<Complex64>) -> Result<()> {
870 if result.shape() != self.data.shape() {
871 return Err(SimulatorError::DimensionMismatch(format!(
872 "Expected shape {:?}, but got {:?}",
873 self.data.shape(),
874 result.shape()
875 )));
876 }
877 result.assign(&self.data);
878 Ok(())
879 }
880
881 pub fn shape(&self) -> (usize, usize) {
882 (self.data.nrows(), self.data.ncols())
883 }
884
885 pub fn view(&self) -> ArrayView2<'_, Complex64> {
886 self.data.view()
887 }
888
889 pub fn view_mut(&mut self) -> ndarray::ArrayViewMut2<'_, Complex64> {
890 self.data.view_mut()
891 }
892}
893
894#[cfg(not(feature = "advanced_math"))]
895#[derive(Debug)]
896pub struct Matrix;
897
898#[cfg(not(feature = "advanced_math"))]
899impl Matrix {
900 pub fn from_array2(_array: &ArrayView2<Complex64>, _pool: &MemoryPool) -> Result<Self> {
901 Err(SimulatorError::UnsupportedOperation(
902 "SciRS2 integration requires 'advanced_math' feature".to_string(),
903 ))
904 }
905
906 pub fn zeros(_shape: (usize, usize), _pool: &MemoryPool) -> Result<Self> {
907 Err(SimulatorError::UnsupportedOperation(
908 "SciRS2 integration requires 'advanced_math' feature".to_string(),
909 ))
910 }
911
912 pub fn to_array2(&self, _result: &mut Array2<Complex64>) -> Result<()> {
913 Err(SimulatorError::UnsupportedOperation(
914 "SciRS2 integration requires 'advanced_math' feature".to_string(),
915 ))
916 }
917}
918
919#[cfg(feature = "advanced_math")]
921#[derive(Debug, Clone)]
922pub struct Vector {
923 data: Array1<Complex64>,
924}
925
926#[cfg(feature = "advanced_math")]
927impl Vector {
928 pub fn from_array1(array: &ArrayView1<Complex64>, _pool: &MemoryPool) -> Result<Self> {
929 Ok(Self {
930 data: array.to_owned(),
931 })
932 }
933
934 pub fn zeros(len: usize, _pool: &MemoryPool) -> Result<Self> {
935 Ok(Self {
936 data: Array1::zeros(len),
937 })
938 }
939
940 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
941 Ok(self.data.clone())
942 }
943
944 pub fn to_array1_mut(&self, result: &mut Array1<Complex64>) -> Result<()> {
945 if result.len() != self.data.len() {
946 return Err(SimulatorError::DimensionMismatch(format!(
947 "Expected length {}, but got {}",
948 self.data.len(),
949 result.len()
950 )));
951 }
952 result.assign(&self.data);
953 Ok(())
954 }
955
956 pub fn len(&self) -> usize {
957 self.data.len()
958 }
959
960 pub fn view(&self) -> ArrayView1<'_, Complex64> {
961 self.data.view()
962 }
963
964 pub fn view_mut(&mut self) -> ndarray::ArrayViewMut1<'_, Complex64> {
965 self.data.view_mut()
966 }
967}
968
969#[cfg(not(feature = "advanced_math"))]
970#[derive(Debug)]
971pub struct Vector;
972
973#[cfg(not(feature = "advanced_math"))]
974impl Vector {
975 pub fn from_array1(_array: &ArrayView1<Complex64>, _pool: &MemoryPool) -> Result<Self> {
976 Err(SimulatorError::UnsupportedOperation(
977 "SciRS2 integration requires 'advanced_math' feature".to_string(),
978 ))
979 }
980
981 pub fn zeros(_len: usize, _pool: &MemoryPool) -> Result<Self> {
982 Err(SimulatorError::UnsupportedOperation(
983 "SciRS2 integration requires 'advanced_math' feature".to_string(),
984 ))
985 }
986
987 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
988 Err(SimulatorError::UnsupportedOperation(
989 "SciRS2 integration requires 'advanced_math' feature".to_string(),
990 ))
991 }
992
993 pub fn to_array1_mut(&self, _result: &mut Array1<Complex64>) -> Result<()> {
994 Err(SimulatorError::UnsupportedOperation(
995 "SciRS2 integration requires 'advanced_math' feature".to_string(),
996 ))
997 }
998}
999
1000#[cfg(feature = "advanced_math")]
1002#[derive(Debug, Clone)]
1003pub struct SparseMatrix {
1004 csr_matrix: nalgebra_sparse::CsrMatrix<Complex64>,
1006}
1007
1008#[cfg(feature = "advanced_math")]
1009impl SparseMatrix {
1010 pub fn from_csr(
1011 values: &[Complex64],
1012 col_indices: &[usize],
1013 row_ptr: &[usize],
1014 num_rows: usize,
1015 num_cols: usize,
1016 _pool: &MemoryPool,
1017 ) -> Result<Self> {
1018 use nalgebra_sparse::CsrMatrix;
1019
1020 let csr_matrix = CsrMatrix::try_from_csr_data(
1021 num_rows,
1022 num_cols,
1023 row_ptr.to_vec(),
1024 col_indices.to_vec(),
1025 values.to_vec(),
1026 )
1027 .map_err(|e| {
1028 SimulatorError::ComputationError(format!("Failed to create CSR matrix: {}", e))
1029 })?;
1030
1031 Ok(Self { csr_matrix })
1032 }
1033
1034 pub fn matvec(&self, vector: &Vector, result: &mut Vector) -> Result<()> {
1035 use nalgebra::{Complex, DVector};
1036
1037 let input_vec = vector.to_array1()?;
1039 let nalgebra_vec = DVector::from_iterator(
1040 input_vec.len(),
1041 input_vec.iter().map(|&c| Complex::new(c.re, c.im)),
1042 );
1043
1044 let mut output = DVector::zeros(self.csr_matrix.nrows());
1046
1047 for (row_idx, row) in self.csr_matrix.row_iter().enumerate() {
1049 let mut sum = Complex::new(0.0, 0.0);
1050 for (col_idx, value) in row.col_indices().iter().zip(row.values()) {
1051 sum += value * nalgebra_vec[*col_idx];
1052 }
1053 output[row_idx] = sum;
1054 }
1055
1056 let output_array: Array1<Complex64> =
1058 Array1::from_iter(output.iter().map(|c| Complex64::new(c.re, c.im)));
1059
1060 result.data.assign(&output_array);
1061 Ok(())
1062 }
1063
1064 pub fn solve(&self, rhs: &Vector) -> Result<Vector> {
1065 use nalgebra::{Complex, DVector};
1067 use nalgebra_sparse::SparseEntry;
1068 use sprs::CsMat;
1069
1070 let rhs_array = rhs.to_array1()?;
1071
1072 let values: Vec<Complex<f64>> = self
1074 .csr_matrix
1075 .values()
1076 .iter()
1077 .map(|&c| Complex::new(c.re, c.im))
1078 .collect();
1079 let (rows, cols, _values) = self.csr_matrix.csr_data();
1080
1081 let mut solution = rhs_array.clone();
1084
1085 for _ in 0..100 {
1087 let mut new_solution = solution.clone();
1088 for i in 0..solution.len() {
1089 if i < self.csr_matrix.nrows() {
1090 let diag = self
1092 .csr_matrix
1093 .get_entry(i, i)
1094 .map(|entry| match entry {
1095 SparseEntry::NonZero(v) => *v,
1096 SparseEntry::Zero => Complex::new(0.0, 0.0),
1097 })
1098 .unwrap_or(Complex::new(1.0, 0.0));
1099
1100 if diag.norm() > 1e-14 {
1101 new_solution[i] = rhs_array[i] / diag;
1102 }
1103 }
1104 }
1105 solution = new_solution;
1106 }
1107
1108 Vector::from_array1(&solution.view(), &MemoryPool::new())
1109 }
1110
1111 pub fn shape(&self) -> (usize, usize) {
1112 (self.csr_matrix.nrows(), self.csr_matrix.ncols())
1113 }
1114
1115 pub fn nnz(&self) -> usize {
1116 self.csr_matrix.nnz()
1117 }
1118}
1119
1120#[cfg(not(feature = "advanced_math"))]
1121#[derive(Debug)]
1122pub struct SparseMatrix;
1123
1124#[cfg(not(feature = "advanced_math"))]
1125impl SparseMatrix {
1126 pub fn from_csr(
1127 _values: &[Complex64],
1128 _col_indices: &[usize],
1129 _row_ptr: &[usize],
1130 _num_rows: usize,
1131 _num_cols: usize,
1132 _pool: &MemoryPool,
1133 ) -> Result<Self> {
1134 Err(SimulatorError::UnsupportedOperation(
1135 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1136 ))
1137 }
1138
1139 pub fn matvec(&self, _vector: &Vector, _result: &mut Vector) -> Result<()> {
1140 Err(SimulatorError::UnsupportedOperation(
1141 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1142 ))
1143 }
1144
1145 pub fn solve(&self, _rhs: &Vector) -> Result<Vector> {
1146 Err(SimulatorError::UnsupportedOperation(
1147 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1148 ))
1149 }
1150}
1151
1152#[cfg(feature = "advanced_math")]
1154#[derive(Debug)]
1155pub struct BLAS;
1156
1157#[cfg(feature = "advanced_math")]
1158impl BLAS {
1159 pub fn gemm(
1160 alpha: Complex64,
1161 a: &Matrix,
1162 b: &Matrix,
1163 beta: Complex64,
1164 c: &mut Matrix,
1165 ) -> Result<()> {
1166 let a_scaled = &a.data * alpha;
1168 let c_scaled = &c.data * beta;
1169 let result = a_scaled.dot(&b.data) + c_scaled;
1170 c.data.assign(&result);
1171 Ok(())
1172 }
1173
1174 pub fn gemv(
1175 alpha: Complex64,
1176 a: &Matrix,
1177 x: &Vector,
1178 beta: Complex64,
1179 y: &mut Vector,
1180 ) -> Result<()> {
1181 let y_scaled = &y.data * beta;
1183 let result = &a.data.dot(&x.data) * alpha + y_scaled;
1184 y.data.assign(&result);
1185 Ok(())
1186 }
1187}
1188
1189#[cfg(not(feature = "advanced_math"))]
1190#[derive(Debug)]
1191pub struct BLAS;
1192
1193#[cfg(not(feature = "advanced_math"))]
1194impl BLAS {
1195 pub fn gemm(
1196 _alpha: Complex64,
1197 _a: &Matrix,
1198 _b: &Matrix,
1199 _beta: Complex64,
1200 _c: &mut Matrix,
1201 ) -> Result<()> {
1202 Err(SimulatorError::UnsupportedOperation(
1203 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1204 ))
1205 }
1206
1207 pub fn gemv(
1208 _alpha: Complex64,
1209 _a: &Matrix,
1210 _x: &Vector,
1211 _beta: Complex64,
1212 _y: &mut Vector,
1213 ) -> Result<()> {
1214 Err(SimulatorError::UnsupportedOperation(
1215 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1216 ))
1217 }
1218}
1219
1220#[cfg(feature = "advanced_math")]
1222#[derive(Debug)]
1223pub struct LAPACK;
1224
1225#[cfg(feature = "advanced_math")]
1226impl LAPACK {
1227 pub fn svd(matrix: &Matrix) -> Result<SvdResult> {
1228 use ndarray_linalg::SVD;
1230
1231 let svd_result = matrix
1232 .data
1233 .svd(true, true)
1234 .map_err(|_| SimulatorError::ComputationError("SVD computation failed".to_string()))?;
1235
1236 let pool = MemoryPool::new();
1238
1239 let u_data = svd_result.0.ok_or_else(|| {
1240 SimulatorError::ComputationError("SVD U matrix not computed".to_string())
1241 })?;
1242 let s_data = svd_result.1;
1243 let vt_data = svd_result.2.ok_or_else(|| {
1244 SimulatorError::ComputationError("SVD Vt matrix not computed".to_string())
1245 })?;
1246
1247 let u = Matrix::from_array2(&u_data.view(), &pool)?;
1248 let s_complex: Array1<Complex64> = s_data.mapv(|x| Complex64::new(x, 0.0));
1250 let s = Vector::from_array1(&s_complex.view(), &pool)?;
1251 let vt = Matrix::from_array2(&vt_data.view(), &pool)?;
1252
1253 Ok(SvdResult { u, s, vt })
1254 }
1255
1256 pub fn eig(matrix: &Matrix) -> Result<EigResult> {
1257 use ndarray_linalg::Eig;
1259
1260 let eig_result = matrix.data.eig().map_err(|_| {
1261 SimulatorError::ComputationError("Eigenvalue decomposition failed".to_string())
1262 })?;
1263
1264 let pool = MemoryPool::new();
1265 let values = Vector::from_array1(&eig_result.0.view(), &pool)?;
1266 let vectors = Matrix::from_array2(&eig_result.1.view(), &pool)?;
1267
1268 Ok(EigResult { values, vectors })
1269 }
1270
1271 pub fn lu(matrix: &Matrix) -> Result<(Matrix, Matrix, Vec<usize>)> {
1272 let n = matrix.data.nrows();
1274 let pool = MemoryPool::new();
1275
1276 let mut l_data = Array2::eye(n);
1278 let mut u_data = matrix.data.clone();
1279 let mut perm_vec: Vec<usize> = (0..n).collect();
1280
1281 for k in 0..n.min(n) {
1283 let mut max_row = k;
1285 let mut max_val = u_data[[k, k]].norm();
1286 for i in k + 1..n {
1287 let val = u_data[[i, k]].norm();
1288 if val > max_val {
1289 max_val = val;
1290 max_row = i;
1291 }
1292 }
1293
1294 if max_row != k {
1296 for j in 0..n {
1297 let temp = u_data[[k, j]];
1298 u_data[[k, j]] = u_data[[max_row, j]];
1299 u_data[[max_row, j]] = temp;
1300 }
1301 perm_vec.swap(k, max_row);
1302 }
1303
1304 if u_data[[k, k]].norm() > 1e-10 {
1306 for i in k + 1..n {
1307 let factor = u_data[[i, k]] / u_data[[k, k]];
1308 l_data[[i, k]] = factor;
1309 for j in k..n {
1310 let u_kj = u_data[[k, j]];
1311 u_data[[i, j]] -= factor * u_kj;
1312 }
1313 }
1314 }
1315 }
1316
1317 let l_matrix = Matrix::from_array2(&l_data.view(), &pool)?;
1318 let u_matrix = Matrix::from_array2(&u_data.view(), &pool)?;
1319
1320 Ok((l_matrix, u_matrix, perm_vec))
1321 }
1322
1323 pub fn qr(matrix: &Matrix) -> Result<(Matrix, Matrix)> {
1324 use ndarray_linalg::QR;
1326
1327 let (q, r) = matrix
1328 .data
1329 .qr()
1330 .map_err(|_| SimulatorError::ComputationError("QR decomposition failed".to_string()))?;
1331
1332 let pool = MemoryPool::new();
1333 let q_matrix = Matrix::from_array2(&q.view(), &pool)?;
1334 let r_matrix = Matrix::from_array2(&r.view(), &pool)?;
1335
1336 Ok((q_matrix, r_matrix))
1337 }
1338}
1339
1340#[cfg(not(feature = "advanced_math"))]
1341#[derive(Debug)]
1342pub struct LAPACK;
1343
1344#[cfg(not(feature = "advanced_math"))]
1345impl LAPACK {
1346 pub fn svd(_matrix: &Matrix) -> Result<SvdResult> {
1347 Err(SimulatorError::UnsupportedOperation(
1348 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1349 ))
1350 }
1351
1352 pub fn eig(_matrix: &Matrix) -> Result<EigResult> {
1353 Err(SimulatorError::UnsupportedOperation(
1354 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1355 ))
1356 }
1357}
1358
1359#[cfg(feature = "advanced_math")]
1361#[derive(Debug, Clone)]
1362pub struct SvdResult {
1363 pub u: Matrix,
1365 pub s: Vector,
1367 pub vt: Matrix,
1369}
1370
1371#[cfg(feature = "advanced_math")]
1373#[derive(Debug, Clone)]
1374pub struct EigResult {
1375 pub values: Vector,
1377 pub vectors: Matrix,
1379}
1380
1381#[cfg(feature = "advanced_math")]
1382impl EigResult {
1383 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1384 self.values.to_array1()
1385 }
1386
1387 pub fn eigenvalues(&self) -> &Vector {
1388 &self.values
1389 }
1390
1391 pub fn eigenvectors(&self) -> &Matrix {
1392 &self.vectors
1393 }
1394}
1395
1396#[cfg(feature = "advanced_math")]
1397impl SvdResult {
1398 pub fn to_array2(&self) -> Result<Array2<Complex64>> {
1399 self.u.data.to_owned().into_dimensionality().map_err(|_| {
1400 SimulatorError::ComputationError("Failed to convert SVD result to array2".to_string())
1401 })
1402 }
1403
1404 pub fn u_matrix(&self) -> &Matrix {
1405 &self.u
1406 }
1407
1408 pub fn singular_values(&self) -> &Vector {
1409 &self.s
1410 }
1411
1412 pub fn vt_matrix(&self) -> &Matrix {
1413 &self.vt
1414 }
1415}
1416
1417#[cfg(not(feature = "advanced_math"))]
1418#[derive(Debug)]
1419pub struct SvdResult;
1420
1421#[cfg(not(feature = "advanced_math"))]
1422#[derive(Debug)]
1423pub struct EigResult;
1424
1425#[cfg(not(feature = "advanced_math"))]
1426impl EigResult {
1427 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1428 Err(SimulatorError::UnsupportedOperation(
1429 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1430 ))
1431 }
1432}
1433
1434#[cfg(not(feature = "advanced_math"))]
1435impl SvdResult {
1436 pub fn to_array2(&self) -> Result<Array2<Complex64>> {
1437 Err(SimulatorError::UnsupportedOperation(
1438 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1439 ))
1440 }
1441}
1442
1443#[cfg(feature = "advanced_math")]
1445#[derive(Debug)]
1446pub struct AdvancedFFT;
1447
1448#[cfg(feature = "advanced_math")]
1449impl AdvancedFFT {
1450 pub fn fft_nd(input: &Array2<Complex64>) -> Result<Array2<Complex64>> {
1452 use ndrustfft::{ndfft, FftHandler};
1453
1454 let (rows, cols) = input.dim();
1455 let mut result = input.clone();
1456
1457 for i in 0..rows {
1459 let row = input.row(i).to_owned();
1460 let mut row_out = row.clone();
1461 let mut handler = FftHandler::new(cols);
1462 ndfft(&row, &mut row_out, &mut handler, 0);
1463 result.row_mut(i).assign(&row_out);
1464 }
1465
1466 for j in 0..cols {
1467 let col = result.column(j).to_owned();
1468 let mut col_out = col.clone();
1469 let mut handler = FftHandler::new(rows);
1470 ndfft(&col, &mut col_out, &mut handler, 0);
1471 result.column_mut(j).assign(&col_out);
1472 }
1473
1474 Ok(result)
1475 }
1476
1477 pub fn windowed_fft(
1479 input: &Vector,
1480 window_size: usize,
1481 overlap: usize,
1482 ) -> Result<Array2<Complex64>> {
1483 let array = input.to_array1()?;
1484 let step_size = window_size - overlap;
1485 let num_windows = (array.len() - overlap) / step_size;
1486
1487 let mut result = Array2::zeros((num_windows, window_size));
1488
1489 for (i, mut row) in result.outer_iter_mut().enumerate() {
1490 let start = i * step_size;
1491 let end = (start + window_size).min(array.len());
1492
1493 if end - start == window_size {
1494 let window = array.slice(s![start..end]);
1495
1496 let windowed: Array1<Complex64> = window
1498 .iter()
1499 .enumerate()
1500 .map(|(j, &val)| {
1501 let hann =
1502 0.5 * (1.0 - (2.0 * PI * j as f64 / (window_size - 1) as f64).cos());
1503 val * Complex64::new(hann, 0.0)
1504 })
1505 .collect();
1506
1507 let mut handler = FftHandler::new(window_size);
1509 let mut fft_result = windowed.clone();
1510 ndrustfft::ndfft(&windowed, &mut fft_result, &mut handler, 0);
1511
1512 row.assign(&fft_result);
1513 }
1514 }
1515
1516 Ok(result)
1517 }
1518
1519 pub fn convolution(a: &Vector, b: &Vector) -> Result<Vector> {
1521 let a_array = a.to_array1()?;
1522 let b_array = b.to_array1()?;
1523
1524 let n = a_array.len() + b_array.len() - 1;
1525 let fft_size = n.next_power_of_two();
1526
1527 let mut a_padded = Array1::zeros(fft_size);
1529 let mut b_padded = Array1::zeros(fft_size);
1530 a_padded.slice_mut(s![..a_array.len()]).assign(&a_array);
1531 b_padded.slice_mut(s![..b_array.len()]).assign(&b_array);
1532
1533 let mut handler = FftHandler::new(fft_size);
1535 let mut a_fft = a_padded.clone();
1536 let mut b_fft = b_padded.clone();
1537 ndrustfft::ndfft(&a_padded, &mut a_fft, &mut handler, 0);
1538 ndrustfft::ndfft(&b_padded, &mut b_fft, &mut handler, 0);
1539
1540 let mut product = Array1::zeros(fft_size);
1542 for i in 0..fft_size {
1543 product[i] = a_fft[i] * b_fft[i];
1544 }
1545
1546 let mut result = product.clone();
1548 ndrustfft::ndifft(&product, &mut result, &mut handler, 0);
1549
1550 let truncated = result.slice(s![..n]).to_owned();
1552 Vector::from_array1(&truncated.view(), &MemoryPool::new())
1553 }
1554}
1555
1556#[cfg(feature = "advanced_math")]
1558#[derive(Debug)]
1559pub struct SparseSolvers;
1560
1561#[cfg(feature = "advanced_math")]
1562impl SparseSolvers {
1563 pub fn conjugate_gradient(
1565 matrix: &SparseMatrix,
1566 b: &Vector,
1567 x0: Option<&Vector>,
1568 tolerance: f64,
1569 max_iterations: usize,
1570 ) -> Result<Vector> {
1571 use nalgebra::{Complex, DVector};
1572
1573 let b_array = b.to_array1()?;
1574 let b_vec = DVector::from_iterator(
1575 b_array.len(),
1576 b_array.iter().map(|&c| Complex::new(c.re, c.im)),
1577 );
1578
1579 let mut x = if let Some(x0_vec) = x0 {
1580 let x0_array = x0_vec.to_array1()?;
1581 DVector::from_iterator(
1582 x0_array.len(),
1583 x0_array.iter().map(|&c| Complex::new(c.re, c.im)),
1584 )
1585 } else {
1586 DVector::zeros(b_vec.len())
1587 };
1588
1589 let pool = MemoryPool::new();
1591 let x_vector = Vector::from_array1(
1592 &Array1::from_vec(x.iter().map(|c| Complex64::new(c.re, c.im)).collect()).view(),
1593 &pool,
1594 )?;
1595 let mut ax_vector = Vector::zeros(x.len(), &pool)?;
1596 matrix.matvec(&x_vector, &mut ax_vector)?;
1597
1598 let ax_array = ax_vector.to_array1()?;
1600 let ax = DVector::from_iterator(
1601 ax_array.len(),
1602 ax_array.iter().map(|&c| Complex::new(c.re, c.im)),
1603 );
1604
1605 let mut r = &b_vec - &ax;
1606 let mut p = r.clone();
1607 let mut rsold = r.dot(&r).re;
1608
1609 for _ in 0..max_iterations {
1610 let p_vec = Vector::from_array1(
1612 &Array1::from_vec(p.iter().map(|c| Complex64::new(c.re, c.im)).collect()).view(),
1613 &MemoryPool::new(),
1614 )?;
1615 let mut ap_vec =
1616 Vector::from_array1(&Array1::zeros(p.len()).view(), &MemoryPool::new())?;
1617 matrix.matvec(&p_vec, &mut ap_vec)?;
1618 let ap_array = ap_vec.to_array1()?;
1619 let ap = DVector::from_iterator(
1620 ap_array.len(),
1621 ap_array.iter().map(|&c| Complex::new(c.re, c.im)),
1622 );
1623
1624 let alpha = rsold / p.dot(&ap).re;
1625 let alpha_complex = Complex::new(alpha, 0.0);
1626 x += &p * alpha_complex;
1627 r -= &ap * alpha_complex;
1628
1629 let rsnew = r.dot(&r).re;
1630 if rsnew.sqrt() < tolerance {
1631 break;
1632 }
1633
1634 let beta = rsnew / rsold;
1635 let beta_complex = Complex::new(beta, 0.0);
1636 p = &r + &p * beta_complex;
1637 rsold = rsnew;
1638 }
1639
1640 let result_array = Array1::from_vec(x.iter().map(|c| Complex64::new(c.re, c.im)).collect());
1641 Vector::from_array1(&result_array.view(), &MemoryPool::new())
1642 }
1643
1644 pub fn gmres(
1646 matrix: &SparseMatrix,
1647 b: &Vector,
1648 x0: Option<&Vector>,
1649 tolerance: f64,
1650 max_iterations: usize,
1651 restart: usize,
1652 ) -> Result<Vector> {
1653 let b_array = b.to_array1()?;
1654 let n = b_array.len();
1655
1656 let mut x = if let Some(x0_vec) = x0 {
1657 x0_vec.to_array1()?.to_owned()
1658 } else {
1659 Array1::zeros(n)
1660 };
1661
1662 for _restart_iter in 0..(max_iterations / restart) {
1663 let mut ax = Array1::zeros(n);
1665 let x_vec = Vector::from_array1(&x.view(), &MemoryPool::new())?;
1666 let mut ax_vec = Vector::from_array1(&ax.view(), &MemoryPool::new())?;
1667 matrix.matvec(&x_vec, &mut ax_vec)?;
1668 ax = ax_vec.to_array1()?;
1669
1670 let mut r = &b_array - &ax;
1671 let beta = r.norm_l2();
1672
1673 if beta < tolerance {
1674 break;
1675 }
1676
1677 r = r.mapv(|x| x / Complex64::new(beta, 0.0));
1678
1679 let mut v = Vec::new();
1681 v.push(r.clone());
1682
1683 let mut h = Array2::zeros((restart + 1, restart));
1684
1685 for j in 0..restart.min(max_iterations) {
1686 let v_vec = Vector::from_array1(&v[j].view(), &MemoryPool::new())?;
1687 let mut av = Array1::zeros(n);
1688 let mut av_vec = Vector::from_array1(&av.view(), &MemoryPool::new())?;
1689 matrix.matvec(&v_vec, &mut av_vec)?;
1690 av = av_vec.to_array1()?;
1691
1692 for i in 0..=j {
1694 h[[i, j]] = v[i].dot(&av);
1695 av = av - h[[i, j]] * &v[i];
1696 }
1697
1698 h[[j + 1, j]] = Complex64::new(av.norm_l2(), 0.0);
1699
1700 if h[[j + 1, j]].norm() < tolerance {
1701 break;
1702 }
1703
1704 av /= h[[j + 1, j]];
1705 v.push(av);
1706 }
1707
1708 let krylov_dim = v.len() - 1;
1711 if krylov_dim > 0 {
1712 let mut e1 = Array1::zeros(krylov_dim + 1);
1713 e1[0] = Complex64::new(beta, 0.0);
1714
1715 let mut y = Array1::zeros(krylov_dim);
1717 for i in (0..krylov_dim).rev() {
1718 let mut sum = Complex64::new(0.0, 0.0);
1719 for j in (i + 1)..krylov_dim {
1720 sum += h[[i, j]] * y[j];
1721 }
1722 y[i] = (e1[i] - sum) / h[[i, i]];
1723 }
1724
1725 for i in 0..krylov_dim {
1727 x = x + y[i] * &v[i];
1728 }
1729 }
1730 }
1731
1732 Vector::from_array1(&x.view(), &MemoryPool::new())
1733 }
1734
1735 pub fn bicgstab(
1737 matrix: &SparseMatrix,
1738 b: &Vector,
1739 x0: Option<&Vector>,
1740 tolerance: f64,
1741 max_iterations: usize,
1742 ) -> Result<Vector> {
1743 let b_array = b.to_array1()?;
1744 let n = b_array.len();
1745
1746 let mut x = if let Some(x0_vec) = x0 {
1747 x0_vec.to_array1()?.to_owned()
1748 } else {
1749 Array1::zeros(n)
1750 };
1751
1752 let mut ax = Array1::zeros(n);
1754 let x_vec = Vector::from_array1(&x.view(), &MemoryPool::new())?;
1755 let mut ax_vec = Vector::from_array1(&ax.view(), &MemoryPool::new())?;
1756 matrix.matvec(&x_vec, &mut ax_vec)?;
1757 ax = ax_vec.to_array1()?;
1758
1759 let mut r = &b_array - &ax;
1760 let r0 = r.clone();
1761
1762 let mut rho = Complex64::new(1.0, 0.0);
1763 let mut alpha = Complex64::new(1.0, 0.0);
1764 let mut omega = Complex64::new(1.0, 0.0);
1765
1766 let mut p = Array1::zeros(n);
1767 let mut v = Array1::zeros(n);
1768
1769 for _ in 0..max_iterations {
1770 let rho_new = r0.dot(&r);
1771 let beta = (rho_new / rho) * (alpha / omega);
1772
1773 p = &r + beta * (&p - omega * &v);
1774
1775 let p_vec = Vector::from_array1(&p.view(), &MemoryPool::new())?;
1777 let mut v_vec = Vector::from_array1(&v.view(), &MemoryPool::new())?;
1778 matrix.matvec(&p_vec, &mut v_vec)?;
1779 v = v_vec.to_array1()?;
1780
1781 alpha = rho_new / r0.dot(&v);
1782 let s = &r - alpha * &v;
1783
1784 if s.norm_l2() < tolerance {
1785 x = x + alpha * &p;
1786 break;
1787 }
1788
1789 let s_vec = Vector::from_array1(&s.view(), &MemoryPool::new())?;
1791 let mut t_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
1792 matrix.matvec(&s_vec, &mut t_vec)?;
1793 let t = t_vec.to_array1()?;
1794
1795 omega = t.dot(&s) / t.dot(&t);
1796 x = x + alpha * &p + omega * &s;
1797 r = s - omega * &t;
1798
1799 if r.norm_l2() < tolerance {
1800 break;
1801 }
1802
1803 rho = rho_new;
1804 }
1805
1806 Vector::from_array1(&x.view(), &MemoryPool::new())
1807 }
1808}
1809
1810#[cfg(feature = "advanced_math")]
1812#[derive(Debug)]
1813pub struct AdvancedEigensolvers;
1814
1815#[cfg(feature = "advanced_math")]
1816impl AdvancedEigensolvers {
1817 pub fn lanczos(
1819 matrix: &SparseMatrix,
1820 num_eigenvalues: usize,
1821 max_iterations: usize,
1822 tolerance: f64,
1823 ) -> Result<EigResult> {
1824 let n = matrix.csr_matrix.nrows();
1825 let m = num_eigenvalues.min(max_iterations);
1826
1827 let mut q = Array1::from_vec(
1829 (0..n)
1830 .map(|_| Complex64::new(rand::random::<f64>() - 0.5, rand::random::<f64>() - 0.5))
1831 .collect(),
1832 );
1833 q = q.mapv(|x| x / Complex64::new(q.norm_l2(), 0.0));
1834
1835 let mut q_vectors = Vec::new();
1836 q_vectors.push(q.clone());
1837
1838 let mut alpha = Vec::new();
1839 let mut beta = Vec::new();
1840
1841 let mut q_prev = Array1::<Complex64>::zeros(n);
1842
1843 for j in 0..m {
1844 let q_vec = Vector::from_array1(&q_vectors[j].view(), &MemoryPool::new())?;
1846 let mut av_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
1847 matrix.matvec(&q_vec, &mut av_vec)?;
1848 let mut av = av_vec.to_array1()?;
1849
1850 let alpha_j = q_vectors[j].dot(&av);
1852 alpha.push(alpha_j);
1853
1854 av = av - alpha_j * &q_vectors[j];
1856 if j > 0 {
1857 av = av - Complex64::new(beta[j - 1], 0.0) * &q_prev;
1858 }
1859
1860 let beta_j = av.norm_l2();
1861
1862 if beta_j.abs() < tolerance {
1863 break;
1864 }
1865
1866 beta.push(beta_j);
1867 q_prev = q_vectors[j].clone();
1868
1869 if j + 1 < m {
1870 q = av / beta_j;
1871 q_vectors.push(q.clone());
1872 }
1873 }
1874
1875 let dim = alpha.len();
1877 let mut tridiag = Array2::zeros((dim, dim));
1878
1879 for i in 0..dim {
1880 tridiag[[i, i]] = alpha[i];
1881 if i > 0 {
1882 tridiag[[i - 1, i]] = Complex64::new(beta[i - 1], 0.0);
1883 tridiag[[i, i - 1]] = Complex64::new(beta[i - 1], 0.0);
1884 }
1885 }
1886
1887 let mut eigenvalues = Array1::zeros(num_eigenvalues.min(dim));
1889 for i in 0..eigenvalues.len() {
1890 eigenvalues[i] = tridiag[[i, i]]; }
1892
1893 let mut eigenvectors = Array2::zeros((n, eigenvalues.len()));
1895 for (i, mut col) in eigenvectors
1896 .columns_mut()
1897 .into_iter()
1898 .enumerate()
1899 .take(eigenvalues.len())
1900 {
1901 if i < q_vectors.len() {
1902 col.assign(&q_vectors[i]);
1903 }
1904 }
1905
1906 let values = Vector::from_array1(&eigenvalues.view(), &MemoryPool::new())?;
1907 let vectors = Matrix::from_array2(&eigenvectors.view(), &MemoryPool::new())?;
1908
1909 Ok(EigResult { values, vectors })
1910 }
1911
1912 pub fn arnoldi(
1914 matrix: &SparseMatrix,
1915 num_eigenvalues: usize,
1916 max_iterations: usize,
1917 tolerance: f64,
1918 ) -> Result<EigResult> {
1919 let n = matrix.csr_matrix.nrows();
1920 let m = num_eigenvalues.min(max_iterations);
1921
1922 let mut q = Array1::from_vec(
1924 (0..n)
1925 .map(|_| Complex64::new(rand::random::<f64>() - 0.5, rand::random::<f64>() - 0.5))
1926 .collect(),
1927 );
1928 q = q.mapv(|x| x / Complex64::new(q.norm_l2(), 0.0));
1929
1930 let mut q_vectors = Vec::new();
1931 q_vectors.push(q.clone());
1932
1933 let mut h = Array2::zeros((m + 1, m));
1934
1935 for j in 0..m {
1936 let q_vec = Vector::from_array1(&q_vectors[j].view(), &MemoryPool::new())?;
1938 let mut v_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
1939 matrix.matvec(&q_vec, &mut v_vec)?;
1940 let mut v = v_vec.to_array1()?;
1941
1942 for i in 0..=j {
1944 h[[i, j]] = q_vectors[i].dot(&v);
1945 v = v - h[[i, j]] * &q_vectors[i];
1946 }
1947
1948 h[[j + 1, j]] = Complex64::new(v.norm_l2(), 0.0);
1949
1950 if h[[j + 1, j]].norm() < tolerance {
1951 break;
1952 }
1953
1954 if j + 1 < m {
1955 q = v / h[[j + 1, j]];
1956 q_vectors.push(q.clone());
1957 }
1958 }
1959
1960 let dim = q_vectors.len();
1962 let mut eigenvalues = Array1::zeros(num_eigenvalues.min(dim));
1963 for i in 0..eigenvalues.len() {
1964 eigenvalues[i] = h[[i, i]]; }
1966
1967 let mut eigenvectors = Array2::zeros((n, eigenvalues.len()));
1969 for (i, mut col) in eigenvectors
1970 .columns_mut()
1971 .into_iter()
1972 .enumerate()
1973 .take(eigenvalues.len())
1974 {
1975 if i < q_vectors.len() {
1976 col.assign(&q_vectors[i]);
1977 }
1978 }
1979
1980 let values = Vector::from_array1(&eigenvalues.view(), &MemoryPool::new())?;
1981 let vectors = Matrix::from_array2(&eigenvectors.view(), &MemoryPool::new())?;
1982
1983 Ok(EigResult { values, vectors })
1984 }
1985}
1986
1987#[cfg(feature = "advanced_math")]
1989#[derive(Debug)]
1990pub struct AdvancedLinearAlgebra;
1991
1992#[cfg(feature = "advanced_math")]
1993impl AdvancedLinearAlgebra {
1994 pub fn qr_decomposition(matrix: &Matrix) -> Result<QRResult> {
1996 use ndarray_linalg::QR;
1997
1998 let qr_result = matrix
1999 .data
2000 .qr()
2001 .map_err(|_| SimulatorError::ComputationError("QR decomposition failed".to_string()))?;
2002
2003 let pool = MemoryPool::new();
2004 let q = Matrix::from_array2(&qr_result.0.view(), &pool)?;
2005 let r = Matrix::from_array2(&qr_result.1.view(), &pool)?;
2006
2007 Ok(QRResult { q, r })
2008 }
2009
2010 pub fn cholesky_decomposition(matrix: &Matrix) -> Result<Matrix> {
2012 use ndarray_linalg::Cholesky;
2013
2014 let chol_result = matrix
2015 .data
2016 .cholesky(ndarray_linalg::UPLO::Lower)
2017 .map_err(|_| {
2018 SimulatorError::ComputationError("Cholesky decomposition failed".to_string())
2019 })?;
2020
2021 Matrix::from_array2(&chol_result.view(), &MemoryPool::new())
2022 }
2023
2024 pub fn matrix_exponential(matrix: &Matrix, t: f64) -> Result<Matrix> {
2026 let scaled_matrix = &matrix.data * Complex64::new(0.0, -t);
2027
2028 let mut result = Array2::eye(scaled_matrix.nrows());
2030 let mut term = Array2::eye(scaled_matrix.nrows());
2031
2032 for k in 1..20 {
2034 term = term.dot(&scaled_matrix) / Complex64::new(k as f64, 0.0);
2035 result = result + &term;
2036
2037 if term.norm_l2() < 1e-12 {
2038 break;
2039 }
2040 }
2041
2042 Matrix::from_array2(&result.view(), &MemoryPool::new())
2043 }
2044
2045 pub fn pseudoinverse(matrix: &Matrix, tolerance: f64) -> Result<Matrix> {
2047 let svd_result = LAPACK::svd(matrix)?;
2048
2049 let u = svd_result.u.data;
2050 let s = svd_result.s.to_array1()?;
2051 let vt = svd_result.vt.data;
2052
2053 let mut s_pinv = Array1::zeros(s.len());
2055 for (i, &sigma) in s.iter().enumerate() {
2056 if sigma.norm() > tolerance {
2057 s_pinv[i] = Complex64::new(1.0, 0.0) / sigma;
2058 }
2059 }
2060
2061 let s_pinv_diag = Array2::from_diag(&s_pinv);
2063 let result = vt.t().dot(&s_pinv_diag).dot(&u.t());
2064
2065 Matrix::from_array2(&result.view(), &MemoryPool::new())
2066 }
2067
2068 pub fn condition_number(matrix: &Matrix) -> Result<f64> {
2070 let svd_result = LAPACK::svd(matrix)?;
2071 let s = svd_result.s.to_array1()?;
2072
2073 let mut min_singular = f64::INFINITY;
2074 let mut max_singular: f64 = 0.0;
2075
2076 for &sigma in s.iter() {
2077 let sigma_norm = sigma.norm();
2078 if sigma_norm > 1e-15 {
2079 min_singular = min_singular.min(sigma_norm);
2080 max_singular = max_singular.max(sigma_norm);
2081 }
2082 }
2083
2084 Ok(max_singular / min_singular)
2085 }
2086}
2087
2088#[cfg(feature = "advanced_math")]
2090#[derive(Debug, Clone)]
2091pub struct QRResult {
2092 pub q: Matrix,
2094 pub r: Matrix,
2096}
2097
2098pub fn benchmark_scirs2_integration() -> Result<HashMap<String, f64>> {
2100 let mut results = HashMap::new();
2101
2102 #[cfg(feature = "advanced_math")]
2104 {
2105 let start = std::time::Instant::now();
2106 let engine = FftEngine::new();
2107 let test_vector = Vector::from_array1(
2108 &Array1::from_vec((0..1024).map(|i| Complex64::new(i as f64, 0.0)).collect()).view(),
2109 &MemoryPool::new(),
2110 )?;
2111
2112 for _ in 0..100 {
2113 let _ = engine.forward(&test_vector)?;
2114 }
2115
2116 let fft_time = start.elapsed().as_millis() as f64;
2117 results.insert("fft_1024_100_iterations".to_string(), fft_time);
2118 }
2119
2120 #[cfg(feature = "advanced_math")]
2122 {
2123 use nalgebra_sparse::CsrMatrix;
2124
2125 let start = std::time::Instant::now();
2126
2127 let mut row_indices = [0; 1000];
2129 let mut col_indices = [0; 1000];
2130 let mut values = [Complex64::new(0.0, 0.0); 1000];
2131
2132 for i in 0..100 {
2133 for j in 0..10 {
2134 let idx = i * 10 + j;
2135 row_indices[idx] = i;
2136 col_indices[idx] = (i + j) % 100;
2137 values[idx] = Complex64::new(1.0, 0.0);
2138 }
2139 }
2140
2141 let csr = CsrMatrix::try_from_csr_data(
2142 100,
2143 100,
2144 row_indices.to_vec(),
2145 col_indices.to_vec(),
2146 values.to_vec(),
2147 )
2148 .map_err(|_| {
2149 SimulatorError::ComputationError("Failed to create test matrix".to_string())
2150 })?;
2151
2152 let sparse_matrix = SparseMatrix { csr_matrix: csr };
2153 let b = Vector::from_array1(&Array1::ones(100).view(), &MemoryPool::new())?;
2154
2155 let _ = SparseSolvers::conjugate_gradient(&sparse_matrix, &b, None, 1e-6, 100)?;
2156
2157 let sparse_solver_time = start.elapsed().as_millis() as f64;
2158 results.insert("cg_solver_100x100".to_string(), sparse_solver_time);
2159 }
2160
2161 #[cfg(feature = "advanced_math")]
2163 {
2164 let start = std::time::Instant::now();
2165
2166 let test_matrix = Matrix::from_array2(&Array2::eye(50).view(), &MemoryPool::new())?;
2167 for _ in 0..10 {
2168 let _ = AdvancedLinearAlgebra::qr_decomposition(&test_matrix)?;
2169 }
2170
2171 let qr_time = start.elapsed().as_millis() as f64;
2172 results.insert("qr_decomposition_50x50_10_iterations".to_string(), qr_time);
2173 }
2174
2175 Ok(results)
2176}