1#[cfg(feature = "advanced_math")]
15use ndrustfft::FftHandler;
16use scirs2_core::ndarray::ndarray_linalg::Norm; use scirs2_core::ndarray::{
18 s, Array1, Array2, ArrayView1, ArrayView2, ArrayViewMut1, ArrayViewMut2,
19};
20use scirs2_core::Complex64;
21use std::collections::HashMap;
22use std::f64::consts::PI;
23use std::sync::{Arc, Mutex};
24
25use quantrs2_core::prelude::QuantRS2Error as SciRS2Error;
27use scirs2_core::parallel_ops::*; use scirs2_core::simd_ops::SimdUnifiedOps;
29
30use crate::error::{Result, SimulatorError};
31use scirs2_core::random::prelude::*;
32
33#[derive(Debug, Clone)]
35pub struct SciRS2Matrix {
36 data: Array2<Complex64>,
37 simd_aligned: bool,
39}
40
41impl SciRS2Matrix {
42 pub fn zeros(shape: (usize, usize), _allocator: &SciRS2MemoryAllocator) -> Result<Self> {
44 Ok(Self {
45 data: Array2::zeros(shape),
46 simd_aligned: true,
47 })
48 }
49
50 pub const fn from_array2(array: Array2<Complex64>) -> Self {
52 Self {
53 data: array,
54 simd_aligned: false,
55 }
56 }
57
58 pub fn shape(&self) -> (usize, usize) {
60 self.data.dim()
61 }
62
63 pub fn rows(&self) -> usize {
65 self.data.nrows()
66 }
67
68 pub fn cols(&self) -> usize {
70 self.data.ncols()
71 }
72
73 pub fn data_view(&self) -> ArrayView2<'_, Complex64> {
75 self.data.view()
76 }
77
78 pub fn data_view_mut(&mut self) -> ArrayViewMut2<'_, Complex64> {
80 self.data.view_mut()
81 }
82}
83
84#[derive(Debug, Clone)]
86pub struct SciRS2Vector {
87 data: Array1<Complex64>,
88 simd_aligned: bool,
90}
91
92impl SciRS2Vector {
93 pub fn zeros(len: usize, _allocator: &SciRS2MemoryAllocator) -> Result<Self> {
95 Ok(Self {
96 data: Array1::zeros(len),
97 simd_aligned: true,
98 })
99 }
100
101 pub const fn from_array1(array: Array1<Complex64>) -> Self {
103 Self {
104 data: array,
105 simd_aligned: false,
106 }
107 }
108
109 pub fn len(&self) -> usize {
111 self.data.len()
112 }
113
114 pub fn is_empty(&self) -> bool {
116 self.data.is_empty()
117 }
118
119 pub fn data_view(&self) -> ArrayView1<'_, Complex64> {
121 self.data.view()
122 }
123
124 pub fn data_view_mut(&mut self) -> ArrayViewMut1<'_, Complex64> {
126 self.data.view_mut()
127 }
128
129 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
131 Ok(self.data.clone())
132 }
133}
134
135#[derive(Debug, Clone)]
137pub struct SciRS2SimdConfig {
138 pub force_instruction_set: Option<String>,
140 pub override_simd_lanes: Option<usize>,
142 pub enable_aggressive_simd: bool,
144 pub numa_aware_allocation: bool,
146}
147
148impl Default for SciRS2SimdConfig {
149 fn default() -> Self {
150 Self {
151 force_instruction_set: None,
152 override_simd_lanes: None,
153 enable_aggressive_simd: true,
154 numa_aware_allocation: true,
155 }
156 }
157}
158
159#[derive(Debug, Clone)]
161pub struct SciRS2SimdContext {
162 pub simd_lanes: usize,
164 pub supports_complex_simd: bool,
166 pub instruction_set: String,
168 pub max_vector_width: usize,
170}
171
172impl SciRS2SimdContext {
173 pub fn detect_capabilities() -> Self {
175 #[cfg(target_arch = "x86_64")]
176 {
177 if is_x86_feature_detected!("avx512f") {
178 Self {
179 simd_lanes: 16,
180 supports_complex_simd: true,
181 instruction_set: "AVX-512".to_string(),
182 max_vector_width: 64,
183 }
184 } else if is_x86_feature_detected!("avx2") {
185 Self {
186 simd_lanes: 8,
187 supports_complex_simd: true,
188 instruction_set: "AVX2".to_string(),
189 max_vector_width: 32,
190 }
191 } else if is_x86_feature_detected!("sse4.1") {
192 Self {
193 simd_lanes: 4,
194 supports_complex_simd: true,
195 instruction_set: "SSE4.1".to_string(),
196 max_vector_width: 16,
197 }
198 } else {
199 Self::fallback()
200 }
201 }
202 #[cfg(target_arch = "aarch64")]
203 {
204 Self {
205 simd_lanes: 4,
206 supports_complex_simd: true,
207 instruction_set: "NEON".to_string(),
208 max_vector_width: 16,
209 }
210 }
211 #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
212 {
213 Self::fallback()
214 }
215 }
216
217 pub fn from_config(config: &SciRS2SimdConfig) -> Result<Self> {
219 let mut context = Self::detect_capabilities();
220
221 if let Some(ref instruction_set) = config.force_instruction_set {
222 context.instruction_set = instruction_set.clone();
223 }
224
225 if let Some(simd_lanes) = config.override_simd_lanes {
226 context.simd_lanes = simd_lanes;
227 }
228
229 Ok(context)
230 }
231
232 fn fallback() -> Self {
233 Self {
234 simd_lanes: 1,
235 supports_complex_simd: false,
236 instruction_set: "Scalar".to_string(),
237 max_vector_width: 8,
238 }
239 }
240}
241
242impl Default for SciRS2SimdContext {
243 fn default() -> Self {
244 Self::detect_capabilities()
245 }
246}
247
248#[derive(Debug)]
250pub struct SciRS2MemoryAllocator {
251 pub total_allocated: usize,
253 pub alignment: usize,
255 allocation_count: usize,
257}
258
259unsafe impl Send for SciRS2MemoryAllocator {}
261unsafe impl Sync for SciRS2MemoryAllocator {}
262
263impl Default for SciRS2MemoryAllocator {
264 fn default() -> Self {
265 Self {
266 total_allocated: 0,
267 alignment: 64, allocation_count: 0,
269 }
270 }
271}
272
273impl SciRS2MemoryAllocator {
274 pub fn new() -> Self {
276 Self::default()
277 }
278}
279
280#[derive(Debug)]
282pub struct SciRS2VectorizedFFT {
283 plans: HashMap<usize, FFTPlan>,
285 optimization_level: OptimizationLevel,
287}
288
289#[derive(Debug, Clone)]
290pub struct FFTPlan {
291 pub size: usize,
293 pub twiddle_factors: Vec<Complex64>,
295 pub vectorization_strategy: VectorizationStrategy,
297}
298
299#[derive(Debug, Clone, Copy)]
300pub enum VectorizationStrategy {
301 SimdComplexSeparate,
303 SimdComplexInterleaved,
305 Adaptive,
307}
308
309#[derive(Debug, Clone, Copy)]
310pub enum OptimizationLevel {
311 Basic,
313 Aggressive,
315 Maximum,
317}
318
319impl Default for SciRS2VectorizedFFT {
320 fn default() -> Self {
321 Self {
322 plans: HashMap::new(),
323 optimization_level: OptimizationLevel::Aggressive,
324 }
325 }
326}
327
328impl SciRS2VectorizedFFT {
329 pub fn forward(&self, input: &SciRS2Vector) -> Result<SciRS2Vector> {
331 let data = input.data_view().to_owned();
333
334 #[cfg(feature = "advanced_math")]
336 {
337 let mut handler = FftHandler::<f64>::new(data.len());
338 let mut output = data.clone();
339 ndrustfft::ndfft(&data, &mut output, &handler, 0);
340 Ok(SciRS2Vector::from_array1(output))
341 }
342
343 #[cfg(not(feature = "advanced_math"))]
344 {
345 let n = data.len();
347 let mut output = Array1::zeros(n);
348 for k in 0..n {
349 let mut sum = Complex64::new(0.0, 0.0);
350 for j in 0..n {
351 let angle = -2.0 * PI * (k * j) as f64 / n as f64;
352 let twiddle = Complex64::new(angle.cos(), angle.sin());
353 sum += data[j] * twiddle;
354 }
355 output[k] = sum;
356 }
357 Ok(SciRS2Vector::from_array1(output))
358 }
359 }
360
361 pub fn inverse(&self, input: &SciRS2Vector) -> Result<SciRS2Vector> {
363 let data = input.data_view().to_owned();
365
366 #[cfg(feature = "advanced_math")]
368 {
369 let mut handler = FftHandler::<f64>::new(data.len());
370 let mut output = data.clone();
371 ndrustfft::ndifft(&data, &mut output, &handler, 0);
372 Ok(SciRS2Vector::from_array1(output))
373 }
374
375 #[cfg(not(feature = "advanced_math"))]
376 {
377 let n = data.len();
379 let mut output = Array1::zeros(n);
380 let scale = 1.0 / n as f64;
381 for k in 0..n {
382 let mut sum = Complex64::new(0.0, 0.0);
383 for j in 0..n {
384 let angle = 2.0 * PI * (k * j) as f64 / n as f64;
385 let twiddle = Complex64::new(angle.cos(), angle.sin());
386 sum += data[j] * twiddle;
387 }
388 output[k] = sum * scale;
389 }
390 Ok(SciRS2Vector::from_array1(output))
391 }
392 }
393}
394
395#[derive(Debug)]
397pub struct SciRS2ParallelContext {
398 pub num_threads: usize,
400 pub thread_pool: ThreadPool, pub numa_aware: bool,
404}
405
406impl Default for SciRS2ParallelContext {
407 fn default() -> Self {
408 let num_threads = current_num_threads(); let thread_pool = ThreadPoolBuilder::new() .num_threads(num_threads)
411 .build()
412 .unwrap_or_else(|_| ThreadPoolBuilder::new().build().unwrap());
413
414 Self {
415 num_threads,
416 thread_pool,
417 numa_aware: true,
418 }
419 }
420}
421
422#[derive(Debug, Default, Clone)]
424pub struct BackendStats {
425 pub simd_vector_ops: usize,
427 pub simd_matrix_ops: usize,
429 pub complex_simd_ops: usize,
431 pub simd_time_ns: u64,
433 pub parallel_time_ns: u64,
435 pub memory_usage_bytes: usize,
437 pub peak_simd_throughput: f64,
439 pub simd_efficiency: f64,
441 pub vectorized_fft_ops: usize,
443 pub sparse_simd_ops: usize,
445 pub matrix_ops: usize,
447 pub lapack_time_ms: f64,
449 pub cache_hit_rate: f64,
451}
452
453#[derive(Debug)]
455pub struct SciRS2Backend {
456 pub available: bool,
458
459 pub stats: Arc<Mutex<BackendStats>>,
461
462 pub simd_context: SciRS2SimdContext,
464
465 pub memory_allocator: SciRS2MemoryAllocator,
467
468 pub fft_engine: SciRS2VectorizedFFT,
470
471 pub parallel_context: SciRS2ParallelContext,
473}
474
475impl SciRS2Backend {
476 pub fn new() -> Self {
478 let simd_context = SciRS2SimdContext::detect_capabilities();
479 let memory_allocator = SciRS2MemoryAllocator::default();
480 let fft_engine = SciRS2VectorizedFFT::default();
481 let parallel_context = SciRS2ParallelContext::default();
482
483 Self {
484 available: simd_context.supports_complex_simd,
485 stats: Arc::new(Mutex::new(BackendStats::default())),
486 simd_context,
487 memory_allocator,
488 fft_engine,
489 parallel_context,
490 }
491 }
492
493 pub fn with_config(simd_config: SciRS2SimdConfig) -> Result<Self> {
495 let mut backend = Self::new();
496 backend.simd_context = SciRS2SimdContext::from_config(&simd_config)?;
497 Ok(backend)
498 }
499
500 pub const fn is_available(&self) -> bool {
502 self.available && self.simd_context.supports_complex_simd
503 }
504
505 pub const fn get_simd_info(&self) -> &SciRS2SimdContext {
507 &self.simd_context
508 }
509
510 pub fn get_stats(&self) -> BackendStats {
512 self.stats.lock().unwrap().clone()
513 }
514
515 pub fn reset_stats(&self) {
517 *self.stats.lock().unwrap() = BackendStats::default();
518 }
519
520 pub fn matrix_multiply(&self, a: &SciRS2Matrix, b: &SciRS2Matrix) -> Result<SciRS2Matrix> {
522 let start_time = std::time::Instant::now();
523
524 if a.cols() != b.rows() {
526 return Err(SimulatorError::DimensionMismatch(format!(
527 "Cannot multiply {}x{} matrix with {}x{} matrix",
528 a.rows(),
529 a.cols(),
530 b.rows(),
531 b.cols()
532 )));
533 }
534
535 let result_shape = (a.rows(), b.cols());
536 let mut result = SciRS2Matrix::zeros(result_shape, &self.memory_allocator)?;
537
538 self.simd_gemm_complex(&a.data_view(), &b.data_view(), &mut result.data_view_mut())?;
540
541 {
543 let mut stats = self.stats.lock().unwrap();
544 stats.simd_matrix_ops += 1;
545 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
546 }
547
548 Ok(result)
549 }
550
551 pub fn matrix_vector_multiply(
553 &self,
554 a: &SciRS2Matrix,
555 x: &SciRS2Vector,
556 ) -> Result<SciRS2Vector> {
557 let start_time = std::time::Instant::now();
558
559 if a.cols() != x.len() {
561 return Err(SimulatorError::DimensionMismatch(format!(
562 "Cannot multiply {}x{} matrix with vector of length {}",
563 a.rows(),
564 a.cols(),
565 x.len()
566 )));
567 }
568
569 let mut result = SciRS2Vector::zeros(a.rows(), &self.memory_allocator)?;
570
571 self.simd_gemv_complex(&a.data_view(), &x.data_view(), &mut result.data_view_mut())?;
573
574 {
576 let mut stats = self.stats.lock().unwrap();
577 stats.simd_vector_ops += 1;
578 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
579 }
580
581 Ok(result)
582 }
583
584 fn simd_gemm_complex(
586 &self,
587 a: &ArrayView2<Complex64>,
588 b: &ArrayView2<Complex64>,
589 c: &mut ArrayViewMut2<Complex64>,
590 ) -> Result<()> {
591 let (m, k) = a.dim();
592 let (k2, n) = b.dim();
593
594 assert_eq!(k, k2, "Inner dimensions must match");
595 assert_eq!(c.dim(), (m, n), "Output dimensions must match");
596
597 let a_real: Vec<f64> = a.iter().map(|z| z.re).collect();
599 let a_imag: Vec<f64> = a.iter().map(|z| z.im).collect();
600 let b_real: Vec<f64> = b.iter().map(|z| z.re).collect();
601 let b_imag: Vec<f64> = b.iter().map(|z| z.im).collect();
602
603 for i in 0..m {
608 for j in 0..n {
609 let mut real_sum = 0.0;
610 let mut imag_sum = 0.0;
611
612 let a_row_start = i * k;
614
615 if k >= self.simd_context.simd_lanes {
616 for l in 0..k {
618 let b_idx = l * n + j;
619 let ar = a_real[a_row_start + l];
620 let ai = a_imag[a_row_start + l];
621 let br = b_real[b_idx];
622 let bi = b_imag[b_idx];
623
624 real_sum += ar.mul_add(br, -(ai * bi));
625 imag_sum += ar.mul_add(bi, ai * br);
626 }
627 } else {
628 for l in 0..k {
630 let b_idx = l * n + j;
631 let ar = a_real[a_row_start + l];
632 let ai = a_imag[a_row_start + l];
633 let br = b_real[b_idx];
634 let bi = b_imag[b_idx];
635
636 real_sum += ar.mul_add(br, -(ai * bi));
637 imag_sum += ar.mul_add(bi, ai * br);
638 }
639 }
640
641 c[[i, j]] = Complex64::new(real_sum, imag_sum);
642 }
643 }
644
645 Ok(())
646 }
647
648 fn simd_gemv_complex(
650 &self,
651 a: &ArrayView2<Complex64>,
652 x: &ArrayView1<Complex64>,
653 y: &mut ArrayViewMut1<Complex64>,
654 ) -> Result<()> {
655 let (m, n) = a.dim();
656 assert_eq!(x.len(), n, "Vector length must match matrix columns");
657 assert_eq!(y.len(), m, "Output vector length must match matrix rows");
658
659 let a_real: Vec<f64> = a.iter().map(|z| z.re).collect();
661 let a_imag: Vec<f64> = a.iter().map(|z| z.im).collect();
662 let x_real: Vec<f64> = x.iter().map(|z| z.re).collect();
663 let x_imag: Vec<f64> = x.iter().map(|z| z.im).collect();
664
665 for i in 0..m {
667 let mut real_sum = 0.0;
668 let mut imag_sum = 0.0;
669
670 let row_start = i * n;
671
672 if n >= self.simd_context.simd_lanes {
673 let chunks = n / self.simd_context.simd_lanes;
675
676 for chunk in 0..chunks {
677 let start_idx = chunk * self.simd_context.simd_lanes;
678 let end_idx = start_idx + self.simd_context.simd_lanes;
679
680 for j in start_idx..end_idx {
681 let a_idx = row_start + j;
682 let ar = a_real[a_idx];
683 let ai = a_imag[a_idx];
684 let xr = x_real[j];
685 let xi = x_imag[j];
686
687 real_sum += ar.mul_add(xr, -(ai * xi));
688 imag_sum += ar.mul_add(xi, ai * xr);
689 }
690 }
691
692 for j in (chunks * self.simd_context.simd_lanes)..n {
694 let a_idx = row_start + j;
695 let ar = a_real[a_idx];
696 let ai = a_imag[a_idx];
697 let xr = x_real[j];
698 let xi = x_imag[j];
699
700 real_sum += ar.mul_add(xr, -(ai * xi));
701 imag_sum += ar.mul_add(xi, ai * xr);
702 }
703 } else {
704 for j in 0..n {
706 let a_idx = row_start + j;
707 let ar = a_real[a_idx];
708 let ai = a_imag[a_idx];
709 let xr = x_real[j];
710 let xi = x_imag[j];
711
712 real_sum += ar.mul_add(xr, -(ai * xi));
713 imag_sum += ar.mul_add(xi, ai * xr);
714 }
715 }
716
717 y[i] = Complex64::new(real_sum, imag_sum);
718 }
719
720 Ok(())
721 }
722
723 #[cfg(feature = "advanced_math")]
725 pub fn svd(&mut self, matrix: &Matrix) -> Result<SvdResult> {
726 let start_time = std::time::Instant::now();
727
728 let result = LAPACK::svd(matrix)?;
729
730 let mut stats = self.stats.lock().unwrap();
731 stats.simd_matrix_ops += 1;
732 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
733
734 Ok(result)
735 }
736
737 #[cfg(feature = "advanced_math")]
739 pub fn eigendecomposition(&mut self, matrix: &Matrix) -> Result<EigResult> {
740 let start_time = std::time::Instant::now();
741
742 let result = LAPACK::eig(matrix)?;
743
744 let mut stats = self.stats.lock().unwrap();
745 stats.simd_matrix_ops += 1;
746 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
747
748 Ok(result)
749 }
750}
751
752impl Default for SciRS2Backend {
753 fn default() -> Self {
754 Self::new()
755 }
756}
757
758#[cfg(feature = "advanced_math")]
760#[derive(Debug)]
761pub struct MemoryPool {
762 _placeholder: (),
764}
765
766#[cfg(feature = "advanced_math")]
767impl Default for MemoryPool {
768 fn default() -> Self {
769 Self::new()
770 }
771}
772
773#[cfg(feature = "advanced_math")]
774impl MemoryPool {
775 pub const fn new() -> Self {
776 Self {
777 _placeholder: (),
779 }
780 }
781}
782
783#[cfg(not(feature = "advanced_math"))]
784#[derive(Debug)]
785pub struct MemoryPool;
786
787#[cfg(not(feature = "advanced_math"))]
788impl Default for MemoryPool {
789 fn default() -> Self {
790 Self::new()
791 }
792}
793
794#[cfg(not(feature = "advanced_math"))]
795impl MemoryPool {
796 pub const fn new() -> Self {
797 Self
798 }
799}
800
801#[cfg(feature = "advanced_math")]
803#[derive(Debug)]
804pub struct FftEngine;
805
806#[cfg(feature = "advanced_math")]
807impl Default for FftEngine {
808 fn default() -> Self {
809 Self::new()
810 }
811}
812
813#[cfg(feature = "advanced_math")]
814impl FftEngine {
815 pub const fn new() -> Self {
816 Self
817 }
818
819 pub fn forward(&self, input: &Vector) -> Result<Vector> {
820 use ndrustfft::{ndfft, FftHandler};
822
823 let array = input.to_array1()?;
824 let mut handler = FftHandler::new(array.len());
825 let mut fft_result = array.clone();
826
827 ndfft(&array, &mut fft_result, &handler, 0);
828
829 Vector::from_array1(&fft_result.view(), &MemoryPool::new())
830 }
831
832 pub fn inverse(&self, input: &Vector) -> Result<Vector> {
833 use ndrustfft::{ndifft, FftHandler};
835
836 let array = input.to_array1()?;
837 let mut handler = FftHandler::new(array.len());
838 let mut ifft_result = array.clone();
839
840 ndifft(&array, &mut ifft_result, &handler, 0);
841
842 Vector::from_array1(&ifft_result.view(), &MemoryPool::new())
843 }
844}
845
846#[cfg(not(feature = "advanced_math"))]
847#[derive(Debug)]
848pub struct FftEngine;
849
850#[cfg(not(feature = "advanced_math"))]
851impl Default for FftEngine {
852 fn default() -> Self {
853 Self::new()
854 }
855}
856
857#[cfg(not(feature = "advanced_math"))]
858impl FftEngine {
859 pub const fn new() -> Self {
860 Self
861 }
862
863 pub fn forward(&self, _input: &Vector) -> Result<Vector> {
864 Err(SimulatorError::UnsupportedOperation(
865 "SciRS2 integration requires 'advanced_math' feature".to_string(),
866 ))
867 }
868
869 pub fn inverse(&self, _input: &Vector) -> Result<Vector> {
870 Err(SimulatorError::UnsupportedOperation(
871 "SciRS2 integration requires 'advanced_math' feature".to_string(),
872 ))
873 }
874}
875
876#[cfg(feature = "advanced_math")]
878#[derive(Debug, Clone)]
879pub struct Matrix {
880 data: Array2<Complex64>,
881}
882
883#[cfg(feature = "advanced_math")]
884impl Matrix {
885 pub fn from_array2(array: &ArrayView2<Complex64>, _pool: &MemoryPool) -> Result<Self> {
886 Ok(Self {
887 data: array.to_owned(),
888 })
889 }
890
891 pub fn zeros(shape: (usize, usize), _pool: &MemoryPool) -> Result<Self> {
892 Ok(Self {
893 data: Array2::zeros(shape),
894 })
895 }
896
897 pub fn to_array2(&self, result: &mut Array2<Complex64>) -> Result<()> {
898 if result.shape() != self.data.shape() {
899 return Err(SimulatorError::DimensionMismatch(format!(
900 "Expected shape {:?}, but got {:?}",
901 self.data.shape(),
902 result.shape()
903 )));
904 }
905 result.assign(&self.data);
906 Ok(())
907 }
908
909 pub fn shape(&self) -> (usize, usize) {
910 (self.data.nrows(), self.data.ncols())
911 }
912
913 pub fn view(&self) -> ArrayView2<'_, Complex64> {
914 self.data.view()
915 }
916
917 pub fn view_mut(&mut self) -> scirs2_core::ndarray::ArrayViewMut2<'_, Complex64> {
918 self.data.view_mut()
919 }
920}
921
922#[cfg(not(feature = "advanced_math"))]
923#[derive(Debug)]
924pub struct Matrix;
925
926#[cfg(not(feature = "advanced_math"))]
927impl Matrix {
928 pub fn from_array2(_array: &ArrayView2<Complex64>, _pool: &MemoryPool) -> Result<Self> {
929 Err(SimulatorError::UnsupportedOperation(
930 "SciRS2 integration requires 'advanced_math' feature".to_string(),
931 ))
932 }
933
934 pub fn zeros(_shape: (usize, usize), _pool: &MemoryPool) -> Result<Self> {
935 Err(SimulatorError::UnsupportedOperation(
936 "SciRS2 integration requires 'advanced_math' feature".to_string(),
937 ))
938 }
939
940 pub fn to_array2(&self, _result: &mut Array2<Complex64>) -> Result<()> {
941 Err(SimulatorError::UnsupportedOperation(
942 "SciRS2 integration requires 'advanced_math' feature".to_string(),
943 ))
944 }
945}
946
947#[cfg(feature = "advanced_math")]
949#[derive(Debug, Clone)]
950pub struct Vector {
951 data: Array1<Complex64>,
952}
953
954#[cfg(feature = "advanced_math")]
955impl Vector {
956 pub fn from_array1(array: &ArrayView1<Complex64>, _pool: &MemoryPool) -> Result<Self> {
957 Ok(Self {
958 data: array.to_owned(),
959 })
960 }
961
962 pub fn zeros(len: usize, _pool: &MemoryPool) -> Result<Self> {
963 Ok(Self {
964 data: Array1::zeros(len),
965 })
966 }
967
968 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
969 Ok(self.data.clone())
970 }
971
972 pub fn to_array1_mut(&self, result: &mut Array1<Complex64>) -> Result<()> {
973 if result.len() != self.data.len() {
974 return Err(SimulatorError::DimensionMismatch(format!(
975 "Expected length {}, but got {}",
976 self.data.len(),
977 result.len()
978 )));
979 }
980 result.assign(&self.data);
981 Ok(())
982 }
983
984 pub fn len(&self) -> usize {
985 self.data.len()
986 }
987
988 pub fn view(&self) -> ArrayView1<'_, Complex64> {
989 self.data.view()
990 }
991
992 pub fn view_mut(&mut self) -> scirs2_core::ndarray::ArrayViewMut1<'_, Complex64> {
993 self.data.view_mut()
994 }
995}
996
997#[cfg(not(feature = "advanced_math"))]
998#[derive(Debug)]
999pub struct Vector;
1000
1001#[cfg(not(feature = "advanced_math"))]
1002impl Vector {
1003 pub fn from_array1(_array: &ArrayView1<Complex64>, _pool: &MemoryPool) -> Result<Self> {
1004 Err(SimulatorError::UnsupportedOperation(
1005 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1006 ))
1007 }
1008
1009 pub fn zeros(_len: usize, _pool: &MemoryPool) -> Result<Self> {
1010 Err(SimulatorError::UnsupportedOperation(
1011 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1012 ))
1013 }
1014
1015 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1016 Err(SimulatorError::UnsupportedOperation(
1017 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1018 ))
1019 }
1020
1021 pub fn to_array1_mut(&self, _result: &mut Array1<Complex64>) -> Result<()> {
1022 Err(SimulatorError::UnsupportedOperation(
1023 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1024 ))
1025 }
1026}
1027
1028#[cfg(feature = "advanced_math")]
1030#[derive(Debug, Clone)]
1031pub struct SparseMatrix {
1032 csr_matrix: nalgebra_sparse::CsrMatrix<Complex64>,
1034}
1035
1036#[cfg(feature = "advanced_math")]
1037impl SparseMatrix {
1038 pub fn from_csr(
1039 values: &[Complex64],
1040 col_indices: &[usize],
1041 row_ptr: &[usize],
1042 num_rows: usize,
1043 num_cols: usize,
1044 _pool: &MemoryPool,
1045 ) -> Result<Self> {
1046 use nalgebra_sparse::CsrMatrix;
1047
1048 let csr_matrix = CsrMatrix::try_from_csr_data(
1049 num_rows,
1050 num_cols,
1051 row_ptr.to_vec(),
1052 col_indices.to_vec(),
1053 values.to_vec(),
1054 )
1055 .map_err(|e| {
1056 SimulatorError::ComputationError(format!("Failed to create CSR matrix: {e}"))
1057 })?;
1058
1059 Ok(Self { csr_matrix })
1060 }
1061
1062 pub fn matvec(&self, vector: &Vector, result: &mut Vector) -> Result<()> {
1063 use nalgebra::{Complex, DVector};
1065
1066 let input_vec = vector.to_array1()?;
1068 let nalgebra_vec = DVector::from_iterator(
1069 input_vec.len(),
1070 input_vec.iter().map(|&c| Complex::new(c.re, c.im)),
1071 );
1072
1073 let mut output = DVector::zeros(self.csr_matrix.nrows());
1075
1076 for (row_idx, row) in self.csr_matrix.row_iter().enumerate() {
1078 let mut sum = Complex::new(0.0, 0.0);
1079 for (col_idx, value) in row.col_indices().iter().zip(row.values()) {
1080 sum += value * nalgebra_vec[*col_idx];
1081 }
1082 output[row_idx] = sum;
1083 }
1084
1085 let output_array: Array1<Complex64> =
1087 Array1::from_iter(output.iter().map(|c| Complex64::new(c.re, c.im)));
1088
1089 result.data.assign(&output_array);
1090 Ok(())
1091 }
1092
1093 pub fn solve(&self, rhs: &Vector) -> Result<Vector> {
1094 use nalgebra::{Complex, DVector};
1096 use nalgebra_sparse::SparseEntry;
1097 use sprs::CsMat;
1098
1099 let rhs_array = rhs.to_array1()?;
1100
1101 let values: Vec<Complex<f64>> = self
1103 .csr_matrix
1104 .values()
1105 .iter()
1106 .map(|&c| Complex::new(c.re, c.im))
1107 .collect();
1108 let (rows, cols, _values) = self.csr_matrix.csr_data();
1109
1110 let mut solution = rhs_array.clone();
1113
1114 for _ in 0..100 {
1116 let mut new_solution = solution.clone();
1117 for i in 0..solution.len() {
1118 if i < self.csr_matrix.nrows() {
1119 let diag =
1121 self.csr_matrix
1122 .get_entry(i, i)
1123 .map_or(Complex::new(1.0, 0.0), |entry| match entry {
1124 SparseEntry::NonZero(v) => *v,
1125 SparseEntry::Zero => Complex::new(0.0, 0.0),
1126 });
1127
1128 if diag.norm() > 1e-14 {
1129 new_solution[i] = rhs_array[i] / diag;
1130 }
1131 }
1132 }
1133 solution = new_solution;
1134 }
1135
1136 Vector::from_array1(&solution.view(), &MemoryPool::new())
1137 }
1138
1139 pub fn shape(&self) -> (usize, usize) {
1140 (self.csr_matrix.nrows(), self.csr_matrix.ncols())
1141 }
1142
1143 pub fn nnz(&self) -> usize {
1144 self.csr_matrix.nnz()
1145 }
1146}
1147
1148#[cfg(not(feature = "advanced_math"))]
1149#[derive(Debug)]
1150pub struct SparseMatrix;
1151
1152#[cfg(not(feature = "advanced_math"))]
1153impl SparseMatrix {
1154 pub fn from_csr(
1155 _values: &[Complex64],
1156 _col_indices: &[usize],
1157 _row_ptr: &[usize],
1158 _num_rows: usize,
1159 _num_cols: usize,
1160 _pool: &MemoryPool,
1161 ) -> Result<Self> {
1162 Err(SimulatorError::UnsupportedOperation(
1163 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1164 ))
1165 }
1166
1167 pub fn matvec(&self, _vector: &Vector, _result: &mut Vector) -> Result<()> {
1168 Err(SimulatorError::UnsupportedOperation(
1169 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1170 ))
1171 }
1172
1173 pub fn solve(&self, _rhs: &Vector) -> Result<Vector> {
1174 Err(SimulatorError::UnsupportedOperation(
1175 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1176 ))
1177 }
1178}
1179
1180#[cfg(feature = "advanced_math")]
1182#[derive(Debug)]
1183pub struct BLAS;
1184
1185#[cfg(feature = "advanced_math")]
1186impl BLAS {
1187 pub fn gemm(
1188 alpha: Complex64,
1189 a: &Matrix,
1190 b: &Matrix,
1191 beta: Complex64,
1192 c: &mut Matrix,
1193 ) -> Result<()> {
1194 let a_scaled = &a.data * alpha;
1196 let c_scaled = &c.data * beta;
1197 let result = a_scaled.dot(&b.data) + c_scaled;
1198 c.data.assign(&result);
1199 Ok(())
1200 }
1201
1202 pub fn gemv(
1203 alpha: Complex64,
1204 a: &Matrix,
1205 x: &Vector,
1206 beta: Complex64,
1207 y: &mut Vector,
1208 ) -> Result<()> {
1209 let y_scaled = &y.data * beta;
1211 let result = &a.data.dot(&x.data) * alpha + y_scaled;
1212 y.data.assign(&result);
1213 Ok(())
1214 }
1215}
1216
1217#[cfg(not(feature = "advanced_math"))]
1218#[derive(Debug)]
1219pub struct BLAS;
1220
1221#[cfg(not(feature = "advanced_math"))]
1222impl BLAS {
1223 pub fn gemm(
1224 _alpha: Complex64,
1225 _a: &Matrix,
1226 _b: &Matrix,
1227 _beta: Complex64,
1228 _c: &mut Matrix,
1229 ) -> Result<()> {
1230 Err(SimulatorError::UnsupportedOperation(
1231 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1232 ))
1233 }
1234
1235 pub fn gemv(
1236 _alpha: Complex64,
1237 _a: &Matrix,
1238 _x: &Vector,
1239 _beta: Complex64,
1240 _y: &mut Vector,
1241 ) -> Result<()> {
1242 Err(SimulatorError::UnsupportedOperation(
1243 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1244 ))
1245 }
1246}
1247
1248#[cfg(feature = "advanced_math")]
1250#[derive(Debug)]
1251pub struct LAPACK;
1252
1253#[cfg(feature = "advanced_math")]
1254impl LAPACK {
1255 pub fn svd(matrix: &Matrix) -> Result<SvdResult> {
1256 use scirs2_core::ndarray::ndarray_linalg::SVD; let svd_result = matrix
1260 .data
1261 .svd(true, true)
1262 .map_err(|_| SimulatorError::ComputationError("SVD computation failed".to_string()))?;
1263
1264 let pool = MemoryPool::new();
1266
1267 let u_data = svd_result.0.ok_or_else(|| {
1268 SimulatorError::ComputationError("SVD U matrix not computed".to_string())
1269 })?;
1270 let s_data = svd_result.1;
1271 let vt_data = svd_result.2.ok_or_else(|| {
1272 SimulatorError::ComputationError("SVD Vt matrix not computed".to_string())
1273 })?;
1274
1275 let u = Matrix::from_array2(&u_data.view(), &pool)?;
1276 let s_complex: Array1<Complex64> = s_data.mapv(|x| Complex64::new(x, 0.0));
1278 let s = Vector::from_array1(&s_complex.view(), &pool)?;
1279 let vt = Matrix::from_array2(&vt_data.view(), &pool)?;
1280
1281 Ok(SvdResult { u, s, vt })
1282 }
1283
1284 pub fn eig(matrix: &Matrix) -> Result<EigResult> {
1285 use scirs2_core::ndarray::ndarray_linalg::Eig; let eig_result = matrix.data.eig().map_err(|_| {
1289 SimulatorError::ComputationError("Eigenvalue decomposition failed".to_string())
1290 })?;
1291
1292 let pool = MemoryPool::new();
1293 let values = Vector::from_array1(&eig_result.0.view(), &pool)?;
1294 let vectors = Matrix::from_array2(&eig_result.1.view(), &pool)?;
1295
1296 Ok(EigResult { values, vectors })
1297 }
1298
1299 pub fn lu(matrix: &Matrix) -> Result<(Matrix, Matrix, Vec<usize>)> {
1300 let n = matrix.data.nrows();
1302 let pool = MemoryPool::new();
1303
1304 let mut l_data = Array2::eye(n);
1306 let mut u_data = matrix.data.clone();
1307 let mut perm_vec: Vec<usize> = (0..n).collect();
1308
1309 for k in 0..n.min(n) {
1311 let mut max_row = k;
1313 let mut max_val = u_data[[k, k]].norm();
1314 for i in k + 1..n {
1315 let val = u_data[[i, k]].norm();
1316 if val > max_val {
1317 max_val = val;
1318 max_row = i;
1319 }
1320 }
1321
1322 if max_row != k {
1324 for j in 0..n {
1325 let temp = u_data[[k, j]];
1326 u_data[[k, j]] = u_data[[max_row, j]];
1327 u_data[[max_row, j]] = temp;
1328 }
1329 perm_vec.swap(k, max_row);
1330 }
1331
1332 if u_data[[k, k]].norm() > 1e-10 {
1334 for i in k + 1..n {
1335 let factor = u_data[[i, k]] / u_data[[k, k]];
1336 l_data[[i, k]] = factor;
1337 for j in k..n {
1338 let u_kj = u_data[[k, j]];
1339 u_data[[i, j]] -= factor * u_kj;
1340 }
1341 }
1342 }
1343 }
1344
1345 let l_matrix = Matrix::from_array2(&l_data.view(), &pool)?;
1346 let u_matrix = Matrix::from_array2(&u_data.view(), &pool)?;
1347
1348 Ok((l_matrix, u_matrix, perm_vec))
1349 }
1350
1351 pub fn qr(matrix: &Matrix) -> Result<(Matrix, Matrix)> {
1352 use scirs2_core::ndarray::ndarray_linalg::QR; let (q, r) = matrix
1356 .data
1357 .qr()
1358 .map_err(|_| SimulatorError::ComputationError("QR decomposition failed".to_string()))?;
1359
1360 let pool = MemoryPool::new();
1361 let q_matrix = Matrix::from_array2(&q.view(), &pool)?;
1362 let r_matrix = Matrix::from_array2(&r.view(), &pool)?;
1363
1364 Ok((q_matrix, r_matrix))
1365 }
1366}
1367
1368#[cfg(not(feature = "advanced_math"))]
1369#[derive(Debug)]
1370pub struct LAPACK;
1371
1372#[cfg(not(feature = "advanced_math"))]
1373impl LAPACK {
1374 pub fn svd(_matrix: &Matrix) -> Result<SvdResult> {
1375 Err(SimulatorError::UnsupportedOperation(
1376 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1377 ))
1378 }
1379
1380 pub fn eig(_matrix: &Matrix) -> Result<EigResult> {
1381 Err(SimulatorError::UnsupportedOperation(
1382 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1383 ))
1384 }
1385}
1386
1387#[cfg(feature = "advanced_math")]
1389#[derive(Debug, Clone)]
1390pub struct SvdResult {
1391 pub u: Matrix,
1393 pub s: Vector,
1395 pub vt: Matrix,
1397}
1398
1399#[cfg(feature = "advanced_math")]
1401#[derive(Debug, Clone)]
1402pub struct EigResult {
1403 pub values: Vector,
1405 pub vectors: Matrix,
1407}
1408
1409#[cfg(feature = "advanced_math")]
1410impl EigResult {
1411 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1412 self.values.to_array1()
1413 }
1414
1415 pub const fn eigenvalues(&self) -> &Vector {
1416 &self.values
1417 }
1418
1419 pub const fn eigenvectors(&self) -> &Matrix {
1420 &self.vectors
1421 }
1422}
1423
1424#[cfg(feature = "advanced_math")]
1425impl SvdResult {
1426 pub fn to_array2(&self) -> Result<Array2<Complex64>> {
1427 self.u.data.to_owned().into_dimensionality().map_err(|_| {
1428 SimulatorError::ComputationError("Failed to convert SVD result to array2".to_string())
1429 })
1430 }
1431
1432 pub const fn u_matrix(&self) -> &Matrix {
1433 &self.u
1434 }
1435
1436 pub const fn singular_values(&self) -> &Vector {
1437 &self.s
1438 }
1439
1440 pub const fn vt_matrix(&self) -> &Matrix {
1441 &self.vt
1442 }
1443}
1444
1445#[cfg(not(feature = "advanced_math"))]
1446#[derive(Debug)]
1447pub struct SvdResult;
1448
1449#[cfg(not(feature = "advanced_math"))]
1450#[derive(Debug)]
1451pub struct EigResult;
1452
1453#[cfg(not(feature = "advanced_math"))]
1454impl EigResult {
1455 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1456 Err(SimulatorError::UnsupportedOperation(
1457 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1458 ))
1459 }
1460}
1461
1462#[cfg(not(feature = "advanced_math"))]
1463impl SvdResult {
1464 pub fn to_array2(&self) -> Result<Array2<Complex64>> {
1465 Err(SimulatorError::UnsupportedOperation(
1466 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1467 ))
1468 }
1469}
1470
1471#[cfg(feature = "advanced_math")]
1473#[derive(Debug)]
1474pub struct AdvancedFFT;
1475
1476#[cfg(feature = "advanced_math")]
1477impl AdvancedFFT {
1478 pub fn fft_nd(input: &Array2<Complex64>) -> Result<Array2<Complex64>> {
1480 use ndrustfft::{ndfft, FftHandler};
1481
1482 let (rows, cols) = input.dim();
1483 let mut result = input.clone();
1484
1485 for i in 0..rows {
1487 let row = input.row(i).to_owned();
1488 let mut row_out = row.clone();
1489 let mut handler = FftHandler::new(cols);
1490 ndfft(&row, &mut row_out, &handler, 0);
1491 result.row_mut(i).assign(&row_out);
1492 }
1493
1494 for j in 0..cols {
1495 let col = result.column(j).to_owned();
1496 let mut col_out = col.clone();
1497 let mut handler = FftHandler::new(rows);
1498 ndfft(&col, &mut col_out, &handler, 0);
1499 result.column_mut(j).assign(&col_out);
1500 }
1501
1502 Ok(result)
1503 }
1504
1505 pub fn windowed_fft(
1507 input: &Vector,
1508 window_size: usize,
1509 overlap: usize,
1510 ) -> Result<Array2<Complex64>> {
1511 let array = input.to_array1()?;
1512 let step_size = window_size - overlap;
1513 let num_windows = (array.len() - overlap) / step_size;
1514
1515 let mut result = Array2::zeros((num_windows, window_size));
1516
1517 for (i, mut row) in result.outer_iter_mut().enumerate() {
1518 let start = i * step_size;
1519 let end = (start + window_size).min(array.len());
1520
1521 if end - start == window_size {
1522 let window = array.slice(s![start..end]);
1523
1524 let windowed: Array1<Complex64> = window
1526 .iter()
1527 .enumerate()
1528 .map(|(j, &val)| {
1529 let hann =
1530 0.5 * (1.0 - (2.0 * PI * j as f64 / (window_size - 1) as f64).cos());
1531 val * Complex64::new(hann, 0.0)
1532 })
1533 .collect();
1534
1535 let mut handler = FftHandler::new(window_size);
1537 let mut fft_result = windowed.clone();
1538 ndrustfft::ndfft(&windowed, &mut fft_result, &handler, 0);
1539
1540 row.assign(&fft_result);
1541 }
1542 }
1543
1544 Ok(result)
1545 }
1546
1547 pub fn convolution(a: &Vector, b: &Vector) -> Result<Vector> {
1549 let a_array = a.to_array1()?;
1550 let b_array = b.to_array1()?;
1551
1552 let n = a_array.len() + b_array.len() - 1;
1553 let fft_size = n.next_power_of_two();
1554
1555 let mut a_padded = Array1::zeros(fft_size);
1557 let mut b_padded = Array1::zeros(fft_size);
1558 a_padded.slice_mut(s![..a_array.len()]).assign(&a_array);
1559 b_padded.slice_mut(s![..b_array.len()]).assign(&b_array);
1560
1561 let mut handler = FftHandler::new(fft_size);
1563 let mut a_fft = a_padded.clone();
1564 let mut b_fft = b_padded.clone();
1565 ndrustfft::ndfft(&a_padded, &mut a_fft, &handler, 0);
1566 ndrustfft::ndfft(&b_padded, &mut b_fft, &handler, 0);
1567
1568 let mut product = Array1::zeros(fft_size);
1570 for i in 0..fft_size {
1571 product[i] = a_fft[i] * b_fft[i];
1572 }
1573
1574 let mut result = product.clone();
1576 ndrustfft::ndifft(&product, &mut result, &handler, 0);
1577
1578 let truncated = result.slice(s![..n]).to_owned();
1580 Vector::from_array1(&truncated.view(), &MemoryPool::new())
1581 }
1582}
1583
1584#[cfg(feature = "advanced_math")]
1586#[derive(Debug)]
1587pub struct SparseSolvers;
1588
1589#[cfg(feature = "advanced_math")]
1590impl SparseSolvers {
1591 pub fn conjugate_gradient(
1593 matrix: &SparseMatrix,
1594 b: &Vector,
1595 x0: Option<&Vector>,
1596 tolerance: f64,
1597 max_iterations: usize,
1598 ) -> Result<Vector> {
1599 use nalgebra::{Complex, DVector};
1601
1602 let b_array = b.to_array1()?;
1603 let b_vec = DVector::from_iterator(
1604 b_array.len(),
1605 b_array.iter().map(|&c| Complex::new(c.re, c.im)),
1606 );
1607
1608 let mut x = if let Some(x0_vec) = x0 {
1609 let x0_array = x0_vec.to_array1()?;
1610 DVector::from_iterator(
1611 x0_array.len(),
1612 x0_array.iter().map(|&c| Complex::new(c.re, c.im)),
1613 )
1614 } else {
1615 DVector::zeros(b_vec.len())
1616 };
1617
1618 let pool = MemoryPool::new();
1620 let x_vector = Vector::from_array1(
1621 &Array1::from_vec(x.iter().map(|c| Complex64::new(c.re, c.im)).collect()).view(),
1622 &pool,
1623 )?;
1624 let mut ax_vector = Vector::zeros(x.len(), &pool)?;
1625 matrix.matvec(&x_vector, &mut ax_vector)?;
1626
1627 let ax_array = ax_vector.to_array1()?;
1629 let ax = DVector::from_iterator(
1630 ax_array.len(),
1631 ax_array.iter().map(|&c| Complex::new(c.re, c.im)),
1632 );
1633
1634 let mut r = &b_vec - &ax;
1635 let mut p = r.clone();
1636 let mut rsold = r.dot(&r).re;
1637
1638 for _ in 0..max_iterations {
1639 let p_vec = Vector::from_array1(
1641 &Array1::from_vec(p.iter().map(|c| Complex64::new(c.re, c.im)).collect()).view(),
1642 &MemoryPool::new(),
1643 )?;
1644 let mut ap_vec =
1645 Vector::from_array1(&Array1::zeros(p.len()).view(), &MemoryPool::new())?;
1646 matrix.matvec(&p_vec, &mut ap_vec)?;
1647 let ap_array = ap_vec.to_array1()?;
1648 let ap = DVector::from_iterator(
1649 ap_array.len(),
1650 ap_array.iter().map(|&c| Complex::new(c.re, c.im)),
1651 );
1652
1653 let alpha = rsold / p.dot(&ap).re;
1654 let alpha_complex = Complex::new(alpha, 0.0);
1655 x += &p * alpha_complex;
1656 r -= &ap * alpha_complex;
1657
1658 let rsnew = r.dot(&r).re;
1659 if rsnew.sqrt() < tolerance {
1660 break;
1661 }
1662
1663 let beta = rsnew / rsold;
1664 let beta_complex = Complex::new(beta, 0.0);
1665 p = &r + &p * beta_complex;
1666 rsold = rsnew;
1667 }
1668
1669 let result_array = Array1::from_vec(x.iter().map(|c| Complex64::new(c.re, c.im)).collect());
1670 Vector::from_array1(&result_array.view(), &MemoryPool::new())
1671 }
1672
1673 pub fn gmres(
1675 matrix: &SparseMatrix,
1676 b: &Vector,
1677 x0: Option<&Vector>,
1678 tolerance: f64,
1679 max_iterations: usize,
1680 restart: usize,
1681 ) -> Result<Vector> {
1682 let b_array = b.to_array1()?;
1683 let n = b_array.len();
1684
1685 let mut x = if let Some(x0_vec) = x0 {
1686 x0_vec.to_array1()?.to_owned()
1687 } else {
1688 Array1::zeros(n)
1689 };
1690
1691 for _restart_iter in 0..(max_iterations / restart) {
1692 let mut ax = Array1::zeros(n);
1694 let x_vec = Vector::from_array1(&x.view(), &MemoryPool::new())?;
1695 let mut ax_vec = Vector::from_array1(&ax.view(), &MemoryPool::new())?;
1696 matrix.matvec(&x_vec, &mut ax_vec)?;
1697 ax = ax_vec.to_array1()?;
1698
1699 let mut r = &b_array - &ax;
1700 let beta = r.norm_l2();
1701
1702 if beta < tolerance {
1703 break;
1704 }
1705
1706 r = r.mapv(|x| x / Complex64::new(beta, 0.0));
1707
1708 let mut v = Vec::new();
1710 v.push(r.clone());
1711
1712 let mut h = Array2::zeros((restart + 1, restart));
1713
1714 for j in 0..restart.min(max_iterations) {
1715 let v_vec = Vector::from_array1(&v[j].view(), &MemoryPool::new())?;
1716 let mut av = Array1::zeros(n);
1717 let mut av_vec = Vector::from_array1(&av.view(), &MemoryPool::new())?;
1718 matrix.matvec(&v_vec, &mut av_vec)?;
1719 av = av_vec.to_array1()?;
1720
1721 for i in 0..=j {
1723 h[[i, j]] = v[i].dot(&av);
1724 av = av - h[[i, j]] * &v[i];
1725 }
1726
1727 h[[j + 1, j]] = Complex64::new(av.norm_l2(), 0.0);
1728
1729 if h[[j + 1, j]].norm() < tolerance {
1730 break;
1731 }
1732
1733 av /= h[[j + 1, j]];
1734 v.push(av);
1735 }
1736
1737 let krylov_dim = v.len() - 1;
1740 if krylov_dim > 0 {
1741 let mut e1 = Array1::zeros(krylov_dim + 1);
1742 e1[0] = Complex64::new(beta, 0.0);
1743
1744 let mut y = Array1::zeros(krylov_dim);
1746 for i in (0..krylov_dim).rev() {
1747 let mut sum = Complex64::new(0.0, 0.0);
1748 for j in (i + 1)..krylov_dim {
1749 sum += h[[i, j]] * y[j];
1750 }
1751 y[i] = (e1[i] - sum) / h[[i, i]];
1752 }
1753
1754 for i in 0..krylov_dim {
1756 x = x + y[i] * &v[i];
1757 }
1758 }
1759 }
1760
1761 Vector::from_array1(&x.view(), &MemoryPool::new())
1762 }
1763
1764 pub fn bicgstab(
1766 matrix: &SparseMatrix,
1767 b: &Vector,
1768 x0: Option<&Vector>,
1769 tolerance: f64,
1770 max_iterations: usize,
1771 ) -> Result<Vector> {
1772 let b_array = b.to_array1()?;
1773 let n = b_array.len();
1774
1775 let mut x = if let Some(x0_vec) = x0 {
1776 x0_vec.to_array1()?.to_owned()
1777 } else {
1778 Array1::zeros(n)
1779 };
1780
1781 let mut ax = Array1::zeros(n);
1783 let x_vec = Vector::from_array1(&x.view(), &MemoryPool::new())?;
1784 let mut ax_vec = Vector::from_array1(&ax.view(), &MemoryPool::new())?;
1785 matrix.matvec(&x_vec, &mut ax_vec)?;
1786 ax = ax_vec.to_array1()?;
1787
1788 let mut r = &b_array - &ax;
1789 let r0 = r.clone();
1790
1791 let mut rho = Complex64::new(1.0, 0.0);
1792 let mut alpha = Complex64::new(1.0, 0.0);
1793 let mut omega = Complex64::new(1.0, 0.0);
1794
1795 let mut p = Array1::zeros(n);
1796 let mut v = Array1::zeros(n);
1797
1798 for _ in 0..max_iterations {
1799 let rho_new = r0.dot(&r);
1800 let beta = (rho_new / rho) * (alpha / omega);
1801
1802 p = &r + beta * (&p - omega * &v);
1803
1804 let p_vec = Vector::from_array1(&p.view(), &MemoryPool::new())?;
1806 let mut v_vec = Vector::from_array1(&v.view(), &MemoryPool::new())?;
1807 matrix.matvec(&p_vec, &mut v_vec)?;
1808 v = v_vec.to_array1()?;
1809
1810 alpha = rho_new / r0.dot(&v);
1811 let s = &r - alpha * &v;
1812
1813 if s.norm_l2() < tolerance {
1814 x = x + alpha * &p;
1815 break;
1816 }
1817
1818 let s_vec = Vector::from_array1(&s.view(), &MemoryPool::new())?;
1820 let mut t_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
1821 matrix.matvec(&s_vec, &mut t_vec)?;
1822 let t = t_vec.to_array1()?;
1823
1824 omega = t.dot(&s) / t.dot(&t);
1825 x = x + alpha * &p + omega * &s;
1826 r = s - omega * &t;
1827
1828 if r.norm_l2() < tolerance {
1829 break;
1830 }
1831
1832 rho = rho_new;
1833 }
1834
1835 Vector::from_array1(&x.view(), &MemoryPool::new())
1836 }
1837}
1838
1839#[cfg(feature = "advanced_math")]
1841#[derive(Debug)]
1842pub struct AdvancedEigensolvers;
1843
1844#[cfg(feature = "advanced_math")]
1845impl AdvancedEigensolvers {
1846 pub fn lanczos(
1848 matrix: &SparseMatrix,
1849 num_eigenvalues: usize,
1850 max_iterations: usize,
1851 tolerance: f64,
1852 ) -> Result<EigResult> {
1853 let n = matrix.csr_matrix.nrows();
1854 let m = num_eigenvalues.min(max_iterations);
1855
1856 let mut q = Array1::from_vec(
1858 (0..n)
1859 .map(|_| {
1860 Complex64::new(
1861 thread_rng().gen::<f64>() - 0.5,
1862 thread_rng().gen::<f64>() - 0.5,
1863 )
1864 })
1865 .collect(),
1866 );
1867 q = q.mapv(|x| x / Complex64::new(q.norm_l2(), 0.0));
1868
1869 let mut q_vectors = Vec::new();
1870 q_vectors.push(q.clone());
1871
1872 let mut alpha = Vec::new();
1873 let mut beta = Vec::new();
1874
1875 let mut q_prev = Array1::<Complex64>::zeros(n);
1876
1877 for j in 0..m {
1878 let q_vec = Vector::from_array1(&q_vectors[j].view(), &MemoryPool::new())?;
1880 let mut av_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
1881 matrix.matvec(&q_vec, &mut av_vec)?;
1882 let mut av = av_vec.to_array1()?;
1883
1884 let alpha_j = q_vectors[j].dot(&av);
1886 alpha.push(alpha_j);
1887
1888 av = av - alpha_j * &q_vectors[j];
1890 if j > 0 {
1891 av = av - Complex64::new(beta[j - 1], 0.0) * &q_prev;
1892 }
1893
1894 let beta_j = av.norm_l2();
1895
1896 if beta_j.abs() < tolerance {
1897 break;
1898 }
1899
1900 beta.push(beta_j);
1901 q_prev = q_vectors[j].clone();
1902
1903 if j + 1 < m {
1904 q = av / beta_j;
1905 q_vectors.push(q.clone());
1906 }
1907 }
1908
1909 let dim = alpha.len();
1911 let mut tridiag = Array2::zeros((dim, dim));
1912
1913 for i in 0..dim {
1914 tridiag[[i, i]] = alpha[i];
1915 if i > 0 {
1916 tridiag[[i - 1, i]] = Complex64::new(beta[i - 1], 0.0);
1917 tridiag[[i, i - 1]] = Complex64::new(beta[i - 1], 0.0);
1918 }
1919 }
1920
1921 let mut eigenvalues = Array1::zeros(num_eigenvalues.min(dim));
1923 for i in 0..eigenvalues.len() {
1924 eigenvalues[i] = tridiag[[i, i]]; }
1926
1927 let mut eigenvectors = Array2::zeros((n, eigenvalues.len()));
1929 for (i, mut col) in eigenvectors
1930 .columns_mut()
1931 .into_iter()
1932 .enumerate()
1933 .take(eigenvalues.len())
1934 {
1935 if i < q_vectors.len() {
1936 col.assign(&q_vectors[i]);
1937 }
1938 }
1939
1940 let values = Vector::from_array1(&eigenvalues.view(), &MemoryPool::new())?;
1941 let vectors = Matrix::from_array2(&eigenvectors.view(), &MemoryPool::new())?;
1942
1943 Ok(EigResult { values, vectors })
1944 }
1945
1946 pub fn arnoldi(
1948 matrix: &SparseMatrix,
1949 num_eigenvalues: usize,
1950 max_iterations: usize,
1951 tolerance: f64,
1952 ) -> Result<EigResult> {
1953 let n = matrix.csr_matrix.nrows();
1954 let m = num_eigenvalues.min(max_iterations);
1955
1956 let mut q = Array1::from_vec(
1958 (0..n)
1959 .map(|_| {
1960 Complex64::new(
1961 thread_rng().gen::<f64>() - 0.5,
1962 thread_rng().gen::<f64>() - 0.5,
1963 )
1964 })
1965 .collect(),
1966 );
1967 q = q.mapv(|x| x / Complex64::new(q.norm_l2(), 0.0));
1968
1969 let mut q_vectors = Vec::new();
1970 q_vectors.push(q.clone());
1971
1972 let mut h = Array2::zeros((m + 1, m));
1973
1974 for j in 0..m {
1975 let q_vec = Vector::from_array1(&q_vectors[j].view(), &MemoryPool::new())?;
1977 let mut v_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
1978 matrix.matvec(&q_vec, &mut v_vec)?;
1979 let mut v = v_vec.to_array1()?;
1980
1981 for i in 0..=j {
1983 h[[i, j]] = q_vectors[i].dot(&v);
1984 v = v - h[[i, j]] * &q_vectors[i];
1985 }
1986
1987 h[[j + 1, j]] = Complex64::new(v.norm_l2(), 0.0);
1988
1989 if h[[j + 1, j]].norm() < tolerance {
1990 break;
1991 }
1992
1993 if j + 1 < m {
1994 q = v / h[[j + 1, j]];
1995 q_vectors.push(q.clone());
1996 }
1997 }
1998
1999 let dim = q_vectors.len();
2001 let mut eigenvalues = Array1::zeros(num_eigenvalues.min(dim));
2002 for i in 0..eigenvalues.len() {
2003 eigenvalues[i] = h[[i, i]]; }
2005
2006 let mut eigenvectors = Array2::zeros((n, eigenvalues.len()));
2008 for (i, mut col) in eigenvectors
2009 .columns_mut()
2010 .into_iter()
2011 .enumerate()
2012 .take(eigenvalues.len())
2013 {
2014 if i < q_vectors.len() {
2015 col.assign(&q_vectors[i]);
2016 }
2017 }
2018
2019 let values = Vector::from_array1(&eigenvalues.view(), &MemoryPool::new())?;
2020 let vectors = Matrix::from_array2(&eigenvectors.view(), &MemoryPool::new())?;
2021
2022 Ok(EigResult { values, vectors })
2023 }
2024}
2025
2026#[cfg(feature = "advanced_math")]
2028#[derive(Debug)]
2029pub struct AdvancedLinearAlgebra;
2030
2031#[cfg(feature = "advanced_math")]
2032impl AdvancedLinearAlgebra {
2033 pub fn qr_decomposition(matrix: &Matrix) -> Result<QRResult> {
2035 use scirs2_core::ndarray::ndarray_linalg::QR; let qr_result = matrix
2038 .data
2039 .qr()
2040 .map_err(|_| SimulatorError::ComputationError("QR decomposition failed".to_string()))?;
2041
2042 let pool = MemoryPool::new();
2043 let q = Matrix::from_array2(&qr_result.0.view(), &pool)?;
2044 let r = Matrix::from_array2(&qr_result.1.view(), &pool)?;
2045
2046 Ok(QRResult { q, r })
2047 }
2048
2049 pub fn cholesky_decomposition(matrix: &Matrix) -> Result<Matrix> {
2051 use scirs2_core::ndarray::ndarray_linalg::{Cholesky, UPLO}; let chol_result = matrix.data.cholesky(UPLO::Lower).map_err(|_| {
2054 SimulatorError::ComputationError("Cholesky decomposition failed".to_string())
2055 })?;
2056
2057 Matrix::from_array2(&chol_result.view(), &MemoryPool::new())
2058 }
2059
2060 pub fn matrix_exponential(matrix: &Matrix, t: f64) -> Result<Matrix> {
2062 let scaled_matrix = &matrix.data * Complex64::new(0.0, -t);
2063
2064 let mut result = Array2::eye(scaled_matrix.nrows());
2066 let mut term = Array2::eye(scaled_matrix.nrows());
2067
2068 for k in 1..20 {
2070 term = term.dot(&scaled_matrix) / Complex64::new(k as f64, 0.0);
2071 result += &term;
2072
2073 if term.norm_l2() < 1e-12 {
2074 break;
2075 }
2076 }
2077
2078 Matrix::from_array2(&result.view(), &MemoryPool::new())
2079 }
2080
2081 pub fn pseudoinverse(matrix: &Matrix, tolerance: f64) -> Result<Matrix> {
2083 let svd_result = LAPACK::svd(matrix)?;
2084
2085 let u = svd_result.u.data;
2086 let s = svd_result.s.to_array1()?;
2087 let vt = svd_result.vt.data;
2088
2089 let mut s_pinv = Array1::zeros(s.len());
2091 for (i, &sigma) in s.iter().enumerate() {
2092 if sigma.norm() > tolerance {
2093 s_pinv[i] = Complex64::new(1.0, 0.0) / sigma;
2094 }
2095 }
2096
2097 let s_pinv_diag = Array2::from_diag(&s_pinv);
2099 let result = vt.t().dot(&s_pinv_diag).dot(&u.t());
2100
2101 Matrix::from_array2(&result.view(), &MemoryPool::new())
2102 }
2103
2104 pub fn condition_number(matrix: &Matrix) -> Result<f64> {
2106 let svd_result = LAPACK::svd(matrix)?;
2107 let s = svd_result.s.to_array1()?;
2108
2109 let mut min_singular = f64::INFINITY;
2110 let mut max_singular: f64 = 0.0;
2111
2112 for &sigma in &s {
2113 let sigma_norm = sigma.norm();
2114 if sigma_norm > 1e-15 {
2115 min_singular = min_singular.min(sigma_norm);
2116 max_singular = max_singular.max(sigma_norm);
2117 }
2118 }
2119
2120 Ok(max_singular / min_singular)
2121 }
2122}
2123
2124#[cfg(feature = "advanced_math")]
2126#[derive(Debug, Clone)]
2127pub struct QRResult {
2128 pub q: Matrix,
2130 pub r: Matrix,
2132}
2133
2134pub fn benchmark_scirs2_integration() -> Result<HashMap<String, f64>> {
2136 let mut results = HashMap::new();
2137
2138 #[cfg(feature = "advanced_math")]
2140 {
2141 let start = std::time::Instant::now();
2142 let engine = FftEngine::new();
2143 let test_vector = Vector::from_array1(
2144 &Array1::from_vec((0..1024).map(|i| Complex64::new(i as f64, 0.0)).collect()).view(),
2145 &MemoryPool::new(),
2146 )?;
2147
2148 for _ in 0..100 {
2149 let _ = engine.forward(&test_vector)?;
2150 }
2151
2152 let fft_time = start.elapsed().as_millis() as f64;
2153 results.insert("fft_1024_100_iterations".to_string(), fft_time);
2154 }
2155
2156 #[cfg(feature = "advanced_math")]
2158 {
2159 use nalgebra_sparse::CsrMatrix;
2160
2161 let start = std::time::Instant::now();
2162
2163 let mut row_indices = [0; 1000];
2165 let mut col_indices = [0; 1000];
2166 let mut values = [Complex64::new(0.0, 0.0); 1000];
2167
2168 for i in 0..100 {
2169 for j in 0..10 {
2170 let idx = i * 10 + j;
2171 row_indices[idx] = i;
2172 col_indices[idx] = (i + j) % 100;
2173 values[idx] = Complex64::new(1.0, 0.0);
2174 }
2175 }
2176
2177 let csr = CsrMatrix::try_from_csr_data(
2178 100,
2179 100,
2180 row_indices.to_vec(),
2181 col_indices.to_vec(),
2182 values.to_vec(),
2183 )
2184 .map_err(|_| {
2185 SimulatorError::ComputationError("Failed to create test matrix".to_string())
2186 })?;
2187
2188 let sparse_matrix = SparseMatrix { csr_matrix: csr };
2189 let b = Vector::from_array1(&Array1::ones(100).view(), &MemoryPool::new())?;
2190
2191 let _ = SparseSolvers::conjugate_gradient(&sparse_matrix, &b, None, 1e-6, 100)?;
2192
2193 let sparse_solver_time = start.elapsed().as_millis() as f64;
2194 results.insert("cg_solver_100x100".to_string(), sparse_solver_time);
2195 }
2196
2197 #[cfg(feature = "advanced_math")]
2199 {
2200 let start = std::time::Instant::now();
2201
2202 let test_matrix = Matrix::from_array2(&Array2::eye(50).view(), &MemoryPool::new())?;
2203 for _ in 0..10 {
2204 let _ = AdvancedLinearAlgebra::qr_decomposition(&test_matrix)?;
2205 }
2206
2207 let qr_time = start.elapsed().as_millis() as f64;
2208 results.insert("qr_decomposition_50x50_10_iterations".to_string(), qr_time);
2209 }
2210
2211 Ok(results)
2212}