1#[cfg(feature = "advanced_math")]
15use ndrustfft::FftHandler;
16use scirs2_core::ndarray::ndarray_linalg::Norm; use scirs2_core::ndarray::{
18 s, Array1, Array2, ArrayView1, ArrayView2, ArrayViewMut1, ArrayViewMut2,
19};
20use scirs2_core::Complex64;
21use std::collections::HashMap;
22use std::f64::consts::PI;
23use std::sync::{Arc, Mutex};
24
25use quantrs2_core::prelude::QuantRS2Error as SciRS2Error;
27use scirs2_core::parallel_ops::{
28 current_num_threads, IndexedParallelIterator, ParallelIterator, ThreadPool, ThreadPoolBuilder,
29}; use scirs2_core::simd_ops::SimdUnifiedOps;
31
32use crate::error::{Result, SimulatorError};
33use scirs2_core::random::prelude::*;
34
35#[derive(Debug, Clone)]
37pub struct SciRS2Matrix {
38 data: Array2<Complex64>,
39 simd_aligned: bool,
41}
42
43impl SciRS2Matrix {
44 pub fn zeros(shape: (usize, usize), _allocator: &SciRS2MemoryAllocator) -> Result<Self> {
46 Ok(Self {
47 data: Array2::zeros(shape),
48 simd_aligned: true,
49 })
50 }
51
52 #[must_use]
54 pub const fn from_array2(array: Array2<Complex64>) -> Self {
55 Self {
56 data: array,
57 simd_aligned: false,
58 }
59 }
60
61 #[must_use]
63 pub fn shape(&self) -> (usize, usize) {
64 self.data.dim()
65 }
66
67 #[must_use]
69 pub fn rows(&self) -> usize {
70 self.data.nrows()
71 }
72
73 #[must_use]
75 pub fn cols(&self) -> usize {
76 self.data.ncols()
77 }
78
79 #[must_use]
81 pub fn data_view(&self) -> ArrayView2<'_, Complex64> {
82 self.data.view()
83 }
84
85 pub fn data_view_mut(&mut self) -> ArrayViewMut2<'_, Complex64> {
87 self.data.view_mut()
88 }
89}
90
91#[derive(Debug, Clone)]
93pub struct SciRS2Vector {
94 data: Array1<Complex64>,
95 simd_aligned: bool,
97}
98
99impl SciRS2Vector {
100 pub fn zeros(len: usize, _allocator: &SciRS2MemoryAllocator) -> Result<Self> {
102 Ok(Self {
103 data: Array1::zeros(len),
104 simd_aligned: true,
105 })
106 }
107
108 #[must_use]
110 pub const fn from_array1(array: Array1<Complex64>) -> Self {
111 Self {
112 data: array,
113 simd_aligned: false,
114 }
115 }
116
117 #[must_use]
119 pub fn len(&self) -> usize {
120 self.data.len()
121 }
122
123 #[must_use]
125 pub fn is_empty(&self) -> bool {
126 self.data.is_empty()
127 }
128
129 #[must_use]
131 pub fn data_view(&self) -> ArrayView1<'_, Complex64> {
132 self.data.view()
133 }
134
135 pub fn data_view_mut(&mut self) -> ArrayViewMut1<'_, Complex64> {
137 self.data.view_mut()
138 }
139
140 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
142 Ok(self.data.clone())
143 }
144}
145
146#[derive(Debug, Clone)]
148pub struct SciRS2SimdConfig {
149 pub force_instruction_set: Option<String>,
151 pub override_simd_lanes: Option<usize>,
153 pub enable_aggressive_simd: bool,
155 pub numa_aware_allocation: bool,
157}
158
159impl Default for SciRS2SimdConfig {
160 fn default() -> Self {
161 Self {
162 force_instruction_set: None,
163 override_simd_lanes: None,
164 enable_aggressive_simd: true,
165 numa_aware_allocation: true,
166 }
167 }
168}
169
170#[derive(Debug, Clone)]
172pub struct SciRS2SimdContext {
173 pub simd_lanes: usize,
175 pub supports_complex_simd: bool,
177 pub instruction_set: String,
179 pub max_vector_width: usize,
181}
182
183impl SciRS2SimdContext {
184 #[must_use]
186 pub fn detect_capabilities() -> Self {
187 #[cfg(target_arch = "x86_64")]
188 {
189 if is_x86_feature_detected!("avx512f") {
190 Self {
191 simd_lanes: 16,
192 supports_complex_simd: true,
193 instruction_set: "AVX-512".to_string(),
194 max_vector_width: 64,
195 }
196 } else if is_x86_feature_detected!("avx2") {
197 Self {
198 simd_lanes: 8,
199 supports_complex_simd: true,
200 instruction_set: "AVX2".to_string(),
201 max_vector_width: 32,
202 }
203 } else if is_x86_feature_detected!("sse4.1") {
204 Self {
205 simd_lanes: 4,
206 supports_complex_simd: true,
207 instruction_set: "SSE4.1".to_string(),
208 max_vector_width: 16,
209 }
210 } else {
211 Self::fallback()
212 }
213 }
214 #[cfg(target_arch = "aarch64")]
215 {
216 Self {
217 simd_lanes: 4,
218 supports_complex_simd: true,
219 instruction_set: "NEON".to_string(),
220 max_vector_width: 16,
221 }
222 }
223 #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
224 {
225 Self::fallback()
226 }
227 }
228
229 pub fn from_config(config: &SciRS2SimdConfig) -> Result<Self> {
231 let mut context = Self::detect_capabilities();
232
233 if let Some(ref instruction_set) = config.force_instruction_set {
234 context.instruction_set = instruction_set.clone();
235 }
236
237 if let Some(simd_lanes) = config.override_simd_lanes {
238 context.simd_lanes = simd_lanes;
239 }
240
241 Ok(context)
242 }
243
244 fn fallback() -> Self {
245 Self {
246 simd_lanes: 1,
247 supports_complex_simd: false,
248 instruction_set: "Scalar".to_string(),
249 max_vector_width: 8,
250 }
251 }
252}
253
254impl Default for SciRS2SimdContext {
255 fn default() -> Self {
256 Self::detect_capabilities()
257 }
258}
259
260#[derive(Debug)]
262pub struct SciRS2MemoryAllocator {
263 pub total_allocated: usize,
265 pub alignment: usize,
267 allocation_count: usize,
269}
270
271unsafe impl Send for SciRS2MemoryAllocator {}
273unsafe impl Sync for SciRS2MemoryAllocator {}
274
275impl Default for SciRS2MemoryAllocator {
276 fn default() -> Self {
277 Self {
278 total_allocated: 0,
279 alignment: 64, allocation_count: 0,
281 }
282 }
283}
284
285impl SciRS2MemoryAllocator {
286 #[must_use]
288 pub fn new() -> Self {
289 Self::default()
290 }
291}
292
293#[derive(Debug)]
295pub struct SciRS2VectorizedFFT {
296 plans: HashMap<usize, FFTPlan>,
298 optimization_level: OptimizationLevel,
300}
301
302#[derive(Debug, Clone)]
303pub struct FFTPlan {
304 pub size: usize,
306 pub twiddle_factors: Vec<Complex64>,
308 pub vectorization_strategy: VectorizationStrategy,
310}
311
312#[derive(Debug, Clone, Copy)]
313pub enum VectorizationStrategy {
314 SimdComplexSeparate,
316 SimdComplexInterleaved,
318 Adaptive,
320}
321
322#[derive(Debug, Clone, Copy)]
323pub enum OptimizationLevel {
324 Basic,
326 Aggressive,
328 Maximum,
330}
331
332impl Default for SciRS2VectorizedFFT {
333 fn default() -> Self {
334 Self {
335 plans: HashMap::new(),
336 optimization_level: OptimizationLevel::Aggressive,
337 }
338 }
339}
340
341impl SciRS2VectorizedFFT {
342 pub fn forward(&self, input: &SciRS2Vector) -> Result<SciRS2Vector> {
344 let data = input.data_view().to_owned();
346
347 #[cfg(feature = "advanced_math")]
349 {
350 let mut handler = FftHandler::<f64>::new(data.len());
351 let mut output = data.clone();
352 ndrustfft::ndfft(&data, &mut output, &handler, 0);
353 Ok(SciRS2Vector::from_array1(output))
354 }
355
356 #[cfg(not(feature = "advanced_math"))]
357 {
358 let n = data.len();
360 let mut output = Array1::zeros(n);
361 for k in 0..n {
362 let mut sum = Complex64::new(0.0, 0.0);
363 for j in 0..n {
364 let angle = -2.0 * PI * (k * j) as f64 / n as f64;
365 let twiddle = Complex64::new(angle.cos(), angle.sin());
366 sum += data[j] * twiddle;
367 }
368 output[k] = sum;
369 }
370 Ok(SciRS2Vector::from_array1(output))
371 }
372 }
373
374 pub fn inverse(&self, input: &SciRS2Vector) -> Result<SciRS2Vector> {
376 let data = input.data_view().to_owned();
378
379 #[cfg(feature = "advanced_math")]
381 {
382 let mut handler = FftHandler::<f64>::new(data.len());
383 let mut output = data.clone();
384 ndrustfft::ndifft(&data, &mut output, &handler, 0);
385 Ok(SciRS2Vector::from_array1(output))
386 }
387
388 #[cfg(not(feature = "advanced_math"))]
389 {
390 let n = data.len();
392 let mut output = Array1::zeros(n);
393 let scale = 1.0 / n as f64;
394 for k in 0..n {
395 let mut sum = Complex64::new(0.0, 0.0);
396 for j in 0..n {
397 let angle = 2.0 * PI * (k * j) as f64 / n as f64;
398 let twiddle = Complex64::new(angle.cos(), angle.sin());
399 sum += data[j] * twiddle;
400 }
401 output[k] = sum * scale;
402 }
403 Ok(SciRS2Vector::from_array1(output))
404 }
405 }
406}
407
408#[derive(Debug)]
410pub struct SciRS2ParallelContext {
411 pub num_threads: usize,
413 pub thread_pool: ThreadPool, pub numa_aware: bool,
417}
418
419impl Default for SciRS2ParallelContext {
420 fn default() -> Self {
421 let num_threads = current_num_threads(); let thread_pool = ThreadPoolBuilder::new() .num_threads(num_threads)
424 .build()
425 .unwrap_or_else(|_| {
426 ThreadPoolBuilder::new()
427 .build()
428 .expect("fallback thread pool creation should succeed")
429 });
430
431 Self {
432 num_threads,
433 thread_pool,
434 numa_aware: true,
435 }
436 }
437}
438
439#[derive(Debug, Default, Clone)]
441pub struct BackendStats {
442 pub simd_vector_ops: usize,
444 pub simd_matrix_ops: usize,
446 pub complex_simd_ops: usize,
448 pub simd_time_ns: u64,
450 pub parallel_time_ns: u64,
452 pub memory_usage_bytes: usize,
454 pub peak_simd_throughput: f64,
456 pub simd_efficiency: f64,
458 pub vectorized_fft_ops: usize,
460 pub sparse_simd_ops: usize,
462 pub matrix_ops: usize,
464 pub lapack_time_ms: f64,
466 pub cache_hit_rate: f64,
468}
469
470#[derive(Debug)]
472pub struct SciRS2Backend {
473 pub available: bool,
475
476 pub stats: Arc<Mutex<BackendStats>>,
478
479 pub simd_context: SciRS2SimdContext,
481
482 pub memory_allocator: SciRS2MemoryAllocator,
484
485 pub fft_engine: SciRS2VectorizedFFT,
487
488 pub parallel_context: SciRS2ParallelContext,
490}
491
492impl SciRS2Backend {
493 #[must_use]
495 pub fn new() -> Self {
496 let simd_context = SciRS2SimdContext::detect_capabilities();
497 let memory_allocator = SciRS2MemoryAllocator::default();
498 let fft_engine = SciRS2VectorizedFFT::default();
499 let parallel_context = SciRS2ParallelContext::default();
500
501 Self {
502 available: simd_context.supports_complex_simd,
503 stats: Arc::new(Mutex::new(BackendStats::default())),
504 simd_context,
505 memory_allocator,
506 fft_engine,
507 parallel_context,
508 }
509 }
510
511 pub fn with_config(simd_config: SciRS2SimdConfig) -> Result<Self> {
513 let mut backend = Self::new();
514 backend.simd_context = SciRS2SimdContext::from_config(&simd_config)?;
515 Ok(backend)
516 }
517
518 #[must_use]
520 pub const fn is_available(&self) -> bool {
521 self.available && self.simd_context.supports_complex_simd
522 }
523
524 #[must_use]
526 pub const fn get_simd_info(&self) -> &SciRS2SimdContext {
527 &self.simd_context
528 }
529
530 #[must_use]
532 pub fn get_stats(&self) -> BackendStats {
533 self.stats
534 .lock()
535 .map(|guard| guard.clone())
536 .unwrap_or_default()
537 }
538
539 pub fn reset_stats(&self) {
541 if let Ok(mut guard) = self.stats.lock() {
542 *guard = BackendStats::default();
543 }
544 }
545
546 pub fn matrix_multiply(&self, a: &SciRS2Matrix, b: &SciRS2Matrix) -> Result<SciRS2Matrix> {
548 let start_time = std::time::Instant::now();
549
550 if a.cols() != b.rows() {
552 return Err(SimulatorError::DimensionMismatch(format!(
553 "Cannot multiply {}x{} matrix with {}x{} matrix",
554 a.rows(),
555 a.cols(),
556 b.rows(),
557 b.cols()
558 )));
559 }
560
561 let result_shape = (a.rows(), b.cols());
562 let mut result = SciRS2Matrix::zeros(result_shape, &self.memory_allocator)?;
563
564 self.simd_gemm_complex(&a.data_view(), &b.data_view(), &mut result.data_view_mut())?;
566
567 if let Ok(mut stats) = self.stats.lock() {
569 stats.simd_matrix_ops += 1;
570 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
571 }
572
573 Ok(result)
574 }
575
576 pub fn matrix_vector_multiply(
578 &self,
579 a: &SciRS2Matrix,
580 x: &SciRS2Vector,
581 ) -> Result<SciRS2Vector> {
582 let start_time = std::time::Instant::now();
583
584 if a.cols() != x.len() {
586 return Err(SimulatorError::DimensionMismatch(format!(
587 "Cannot multiply {}x{} matrix with vector of length {}",
588 a.rows(),
589 a.cols(),
590 x.len()
591 )));
592 }
593
594 let mut result = SciRS2Vector::zeros(a.rows(), &self.memory_allocator)?;
595
596 self.simd_gemv_complex(&a.data_view(), &x.data_view(), &mut result.data_view_mut())?;
598
599 if let Ok(mut stats) = self.stats.lock() {
601 stats.simd_vector_ops += 1;
602 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
603 }
604
605 Ok(result)
606 }
607
608 fn simd_gemm_complex(
610 &self,
611 a: &ArrayView2<Complex64>,
612 b: &ArrayView2<Complex64>,
613 c: &mut ArrayViewMut2<Complex64>,
614 ) -> Result<()> {
615 let (m, k) = a.dim();
616 let (k2, n) = b.dim();
617
618 assert_eq!(k, k2, "Inner dimensions must match");
619 assert_eq!(c.dim(), (m, n), "Output dimensions must match");
620
621 let a_real: Vec<f64> = a.iter().map(|z| z.re).collect();
623 let a_imag: Vec<f64> = a.iter().map(|z| z.im).collect();
624 let b_real: Vec<f64> = b.iter().map(|z| z.re).collect();
625 let b_imag: Vec<f64> = b.iter().map(|z| z.im).collect();
626
627 for i in 0..m {
632 for j in 0..n {
633 let mut real_sum = 0.0;
634 let mut imag_sum = 0.0;
635
636 let a_row_start = i * k;
638
639 if k >= self.simd_context.simd_lanes {
640 for l in 0..k {
642 let b_idx = l * n + j;
643 let ar = a_real[a_row_start + l];
644 let ai = a_imag[a_row_start + l];
645 let br = b_real[b_idx];
646 let bi = b_imag[b_idx];
647
648 real_sum += ar.mul_add(br, -(ai * bi));
649 imag_sum += ar.mul_add(bi, ai * br);
650 }
651 } else {
652 for l in 0..k {
654 let b_idx = l * n + j;
655 let ar = a_real[a_row_start + l];
656 let ai = a_imag[a_row_start + l];
657 let br = b_real[b_idx];
658 let bi = b_imag[b_idx];
659
660 real_sum += ar.mul_add(br, -(ai * bi));
661 imag_sum += ar.mul_add(bi, ai * br);
662 }
663 }
664
665 c[[i, j]] = Complex64::new(real_sum, imag_sum);
666 }
667 }
668
669 Ok(())
670 }
671
672 fn simd_gemv_complex(
674 &self,
675 a: &ArrayView2<Complex64>,
676 x: &ArrayView1<Complex64>,
677 y: &mut ArrayViewMut1<Complex64>,
678 ) -> Result<()> {
679 let (m, n) = a.dim();
680 assert_eq!(x.len(), n, "Vector length must match matrix columns");
681 assert_eq!(y.len(), m, "Output vector length must match matrix rows");
682
683 let a_real: Vec<f64> = a.iter().map(|z| z.re).collect();
685 let a_imag: Vec<f64> = a.iter().map(|z| z.im).collect();
686 let x_real: Vec<f64> = x.iter().map(|z| z.re).collect();
687 let x_imag: Vec<f64> = x.iter().map(|z| z.im).collect();
688
689 for i in 0..m {
691 let mut real_sum = 0.0;
692 let mut imag_sum = 0.0;
693
694 let row_start = i * n;
695
696 if n >= self.simd_context.simd_lanes {
697 let chunks = n / self.simd_context.simd_lanes;
699
700 for chunk in 0..chunks {
701 let start_idx = chunk * self.simd_context.simd_lanes;
702 let end_idx = start_idx + self.simd_context.simd_lanes;
703
704 for j in start_idx..end_idx {
705 let a_idx = row_start + j;
706 let ar = a_real[a_idx];
707 let ai = a_imag[a_idx];
708 let xr = x_real[j];
709 let xi = x_imag[j];
710
711 real_sum += ar.mul_add(xr, -(ai * xi));
712 imag_sum += ar.mul_add(xi, ai * xr);
713 }
714 }
715
716 for j in (chunks * self.simd_context.simd_lanes)..n {
718 let a_idx = row_start + j;
719 let ar = a_real[a_idx];
720 let ai = a_imag[a_idx];
721 let xr = x_real[j];
722 let xi = x_imag[j];
723
724 real_sum += ar.mul_add(xr, -(ai * xi));
725 imag_sum += ar.mul_add(xi, ai * xr);
726 }
727 } else {
728 for j in 0..n {
730 let a_idx = row_start + j;
731 let ar = a_real[a_idx];
732 let ai = a_imag[a_idx];
733 let xr = x_real[j];
734 let xi = x_imag[j];
735
736 real_sum += ar.mul_add(xr, -(ai * xi));
737 imag_sum += ar.mul_add(xi, ai * xr);
738 }
739 }
740
741 y[i] = Complex64::new(real_sum, imag_sum);
742 }
743
744 Ok(())
745 }
746
747 #[cfg(feature = "advanced_math")]
749 pub fn svd(&mut self, matrix: &Matrix) -> Result<SvdResult> {
750 let start_time = std::time::Instant::now();
751
752 let result = LAPACK::svd(matrix)?;
753
754 if let Ok(mut stats) = self.stats.lock() {
755 stats.simd_matrix_ops += 1;
756 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
757 }
758
759 Ok(result)
760 }
761
762 #[cfg(feature = "advanced_math")]
764 pub fn eigendecomposition(&mut self, matrix: &Matrix) -> Result<EigResult> {
765 let start_time = std::time::Instant::now();
766
767 let result = LAPACK::eig(matrix)?;
768
769 if let Ok(mut stats) = self.stats.lock() {
770 stats.simd_matrix_ops += 1;
771 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
772 }
773
774 Ok(result)
775 }
776}
777
778impl Default for SciRS2Backend {
779 fn default() -> Self {
780 Self::new()
781 }
782}
783
784#[cfg(feature = "advanced_math")]
786#[derive(Debug)]
787pub struct MemoryPool {
788 _placeholder: (),
790}
791
792#[cfg(feature = "advanced_math")]
793impl Default for MemoryPool {
794 fn default() -> Self {
795 Self::new()
796 }
797}
798
799#[cfg(feature = "advanced_math")]
800impl MemoryPool {
801 pub const fn new() -> Self {
802 Self {
803 _placeholder: (),
805 }
806 }
807}
808
809#[cfg(not(feature = "advanced_math"))]
810#[derive(Debug)]
811pub struct MemoryPool;
812
813#[cfg(not(feature = "advanced_math"))]
814impl Default for MemoryPool {
815 fn default() -> Self {
816 Self::new()
817 }
818}
819
820#[cfg(not(feature = "advanced_math"))]
821impl MemoryPool {
822 #[must_use]
823 pub const fn new() -> Self {
824 Self
825 }
826}
827
828#[cfg(feature = "advanced_math")]
830#[derive(Debug)]
831pub struct FftEngine;
832
833#[cfg(feature = "advanced_math")]
834impl Default for FftEngine {
835 fn default() -> Self {
836 Self::new()
837 }
838}
839
840#[cfg(feature = "advanced_math")]
841impl FftEngine {
842 pub const fn new() -> Self {
843 Self
844 }
845
846 pub fn forward(&self, input: &Vector) -> Result<Vector> {
847 use ndrustfft::{ndfft, FftHandler};
849
850 let array = input.to_array1()?;
851 let mut handler = FftHandler::new(array.len());
852 let mut fft_result = array.clone();
853
854 ndfft(&array, &mut fft_result, &handler, 0);
855
856 Vector::from_array1(&fft_result.view(), &MemoryPool::new())
857 }
858
859 pub fn inverse(&self, input: &Vector) -> Result<Vector> {
860 use ndrustfft::{ndifft, FftHandler};
862
863 let array = input.to_array1()?;
864 let mut handler = FftHandler::new(array.len());
865 let mut ifft_result = array.clone();
866
867 ndifft(&array, &mut ifft_result, &handler, 0);
868
869 Vector::from_array1(&ifft_result.view(), &MemoryPool::new())
870 }
871}
872
873#[cfg(not(feature = "advanced_math"))]
874#[derive(Debug)]
875pub struct FftEngine;
876
877#[cfg(not(feature = "advanced_math"))]
878impl Default for FftEngine {
879 fn default() -> Self {
880 Self::new()
881 }
882}
883
884#[cfg(not(feature = "advanced_math"))]
885impl FftEngine {
886 #[must_use]
887 pub const fn new() -> Self {
888 Self
889 }
890
891 pub fn forward(&self, _input: &Vector) -> Result<Vector> {
892 Err(SimulatorError::UnsupportedOperation(
893 "SciRS2 integration requires 'advanced_math' feature".to_string(),
894 ))
895 }
896
897 pub fn inverse(&self, _input: &Vector) -> Result<Vector> {
898 Err(SimulatorError::UnsupportedOperation(
899 "SciRS2 integration requires 'advanced_math' feature".to_string(),
900 ))
901 }
902}
903
904#[cfg(feature = "advanced_math")]
906#[derive(Debug, Clone)]
907pub struct Matrix {
908 data: Array2<Complex64>,
909}
910
911#[cfg(feature = "advanced_math")]
912impl Matrix {
913 pub fn from_array2(array: &ArrayView2<Complex64>, _pool: &MemoryPool) -> Result<Self> {
914 Ok(Self {
915 data: array.to_owned(),
916 })
917 }
918
919 pub fn zeros(shape: (usize, usize), _pool: &MemoryPool) -> Result<Self> {
920 Ok(Self {
921 data: Array2::zeros(shape),
922 })
923 }
924
925 pub fn to_array2(&self, result: &mut Array2<Complex64>) -> Result<()> {
926 if result.shape() != self.data.shape() {
927 return Err(SimulatorError::DimensionMismatch(format!(
928 "Expected shape {:?}, but got {:?}",
929 self.data.shape(),
930 result.shape()
931 )));
932 }
933 result.assign(&self.data);
934 Ok(())
935 }
936
937 pub fn shape(&self) -> (usize, usize) {
938 (self.data.nrows(), self.data.ncols())
939 }
940
941 pub fn view(&self) -> ArrayView2<'_, Complex64> {
942 self.data.view()
943 }
944
945 pub fn view_mut(&mut self) -> scirs2_core::ndarray::ArrayViewMut2<'_, Complex64> {
946 self.data.view_mut()
947 }
948}
949
950#[cfg(not(feature = "advanced_math"))]
951#[derive(Debug)]
952pub struct Matrix;
953
954#[cfg(not(feature = "advanced_math"))]
955impl Matrix {
956 pub fn from_array2(_array: &ArrayView2<Complex64>, _pool: &MemoryPool) -> Result<Self> {
957 Err(SimulatorError::UnsupportedOperation(
958 "SciRS2 integration requires 'advanced_math' feature".to_string(),
959 ))
960 }
961
962 pub fn zeros(_shape: (usize, usize), _pool: &MemoryPool) -> Result<Self> {
963 Err(SimulatorError::UnsupportedOperation(
964 "SciRS2 integration requires 'advanced_math' feature".to_string(),
965 ))
966 }
967
968 pub fn to_array2(&self, _result: &mut Array2<Complex64>) -> Result<()> {
969 Err(SimulatorError::UnsupportedOperation(
970 "SciRS2 integration requires 'advanced_math' feature".to_string(),
971 ))
972 }
973}
974
975#[cfg(feature = "advanced_math")]
977#[derive(Debug, Clone)]
978pub struct Vector {
979 data: Array1<Complex64>,
980}
981
982#[cfg(feature = "advanced_math")]
983impl Vector {
984 pub fn from_array1(array: &ArrayView1<Complex64>, _pool: &MemoryPool) -> Result<Self> {
985 Ok(Self {
986 data: array.to_owned(),
987 })
988 }
989
990 pub fn zeros(len: usize, _pool: &MemoryPool) -> Result<Self> {
991 Ok(Self {
992 data: Array1::zeros(len),
993 })
994 }
995
996 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
997 Ok(self.data.clone())
998 }
999
1000 pub fn to_array1_mut(&self, result: &mut Array1<Complex64>) -> Result<()> {
1001 if result.len() != self.data.len() {
1002 return Err(SimulatorError::DimensionMismatch(format!(
1003 "Expected length {}, but got {}",
1004 self.data.len(),
1005 result.len()
1006 )));
1007 }
1008 result.assign(&self.data);
1009 Ok(())
1010 }
1011
1012 pub fn len(&self) -> usize {
1013 self.data.len()
1014 }
1015
1016 pub fn view(&self) -> ArrayView1<'_, Complex64> {
1017 self.data.view()
1018 }
1019
1020 pub fn view_mut(&mut self) -> scirs2_core::ndarray::ArrayViewMut1<'_, Complex64> {
1021 self.data.view_mut()
1022 }
1023}
1024
1025#[cfg(not(feature = "advanced_math"))]
1026#[derive(Debug)]
1027pub struct Vector;
1028
1029#[cfg(not(feature = "advanced_math"))]
1030impl Vector {
1031 pub fn from_array1(_array: &ArrayView1<Complex64>, _pool: &MemoryPool) -> Result<Self> {
1032 Err(SimulatorError::UnsupportedOperation(
1033 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1034 ))
1035 }
1036
1037 pub fn zeros(_len: usize, _pool: &MemoryPool) -> Result<Self> {
1038 Err(SimulatorError::UnsupportedOperation(
1039 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1040 ))
1041 }
1042
1043 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1044 Err(SimulatorError::UnsupportedOperation(
1045 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1046 ))
1047 }
1048
1049 pub fn to_array1_mut(&self, _result: &mut Array1<Complex64>) -> Result<()> {
1050 Err(SimulatorError::UnsupportedOperation(
1051 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1052 ))
1053 }
1054}
1055
1056#[cfg(feature = "advanced_math")]
1058#[derive(Debug, Clone)]
1059pub struct SparseMatrix {
1060 csr_matrix: nalgebra_sparse::CsrMatrix<Complex64>,
1062}
1063
1064#[cfg(feature = "advanced_math")]
1065impl SparseMatrix {
1066 pub fn from_csr(
1067 values: &[Complex64],
1068 col_indices: &[usize],
1069 row_ptr: &[usize],
1070 num_rows: usize,
1071 num_cols: usize,
1072 _pool: &MemoryPool,
1073 ) -> Result<Self> {
1074 use nalgebra_sparse::CsrMatrix;
1075
1076 let csr_matrix = CsrMatrix::try_from_csr_data(
1077 num_rows,
1078 num_cols,
1079 row_ptr.to_vec(),
1080 col_indices.to_vec(),
1081 values.to_vec(),
1082 )
1083 .map_err(|e| {
1084 SimulatorError::ComputationError(format!("Failed to create CSR matrix: {e}"))
1085 })?;
1086
1087 Ok(Self { csr_matrix })
1088 }
1089
1090 pub fn matvec(&self, vector: &Vector, result: &mut Vector) -> Result<()> {
1091 use nalgebra::{Complex, DVector};
1093
1094 let input_vec = vector.to_array1()?;
1096 let nalgebra_vec = DVector::from_iterator(
1097 input_vec.len(),
1098 input_vec.iter().map(|&c| Complex::new(c.re, c.im)),
1099 );
1100
1101 let mut output = DVector::zeros(self.csr_matrix.nrows());
1103
1104 for (row_idx, row) in self.csr_matrix.row_iter().enumerate() {
1106 let mut sum = Complex::new(0.0, 0.0);
1107 for (col_idx, value) in row.col_indices().iter().zip(row.values()) {
1108 sum += value * nalgebra_vec[*col_idx];
1109 }
1110 output[row_idx] = sum;
1111 }
1112
1113 let output_array: Array1<Complex64> =
1115 Array1::from_iter(output.iter().map(|c| Complex64::new(c.re, c.im)));
1116
1117 result.data.assign(&output_array);
1118 Ok(())
1119 }
1120
1121 pub fn solve(&self, rhs: &Vector) -> Result<Vector> {
1122 use nalgebra::{Complex, DVector};
1124 use nalgebra_sparse::SparseEntry;
1125 use sprs::CsMat;
1126
1127 let rhs_array = rhs.to_array1()?;
1128
1129 let values: Vec<Complex<f64>> = self
1131 .csr_matrix
1132 .values()
1133 .iter()
1134 .map(|&c| Complex::new(c.re, c.im))
1135 .collect();
1136 let (rows, cols, _values) = self.csr_matrix.csr_data();
1137
1138 let mut solution = rhs_array.clone();
1141
1142 for _ in 0..100 {
1144 let mut new_solution = solution.clone();
1145 for i in 0..solution.len() {
1146 if i < self.csr_matrix.nrows() {
1147 let diag =
1149 self.csr_matrix
1150 .get_entry(i, i)
1151 .map_or(Complex::new(1.0, 0.0), |entry| match entry {
1152 SparseEntry::NonZero(v) => *v,
1153 SparseEntry::Zero => Complex::new(0.0, 0.0),
1154 });
1155
1156 if diag.norm() > 1e-14 {
1157 new_solution[i] = rhs_array[i] / diag;
1158 }
1159 }
1160 }
1161 solution = new_solution;
1162 }
1163
1164 Vector::from_array1(&solution.view(), &MemoryPool::new())
1165 }
1166
1167 pub fn shape(&self) -> (usize, usize) {
1168 (self.csr_matrix.nrows(), self.csr_matrix.ncols())
1169 }
1170
1171 pub fn nnz(&self) -> usize {
1172 self.csr_matrix.nnz()
1173 }
1174}
1175
1176#[cfg(not(feature = "advanced_math"))]
1177#[derive(Debug)]
1178pub struct SparseMatrix;
1179
1180#[cfg(not(feature = "advanced_math"))]
1181impl SparseMatrix {
1182 pub fn from_csr(
1183 _values: &[Complex64],
1184 _col_indices: &[usize],
1185 _row_ptr: &[usize],
1186 _num_rows: usize,
1187 _num_cols: usize,
1188 _pool: &MemoryPool,
1189 ) -> Result<Self> {
1190 Err(SimulatorError::UnsupportedOperation(
1191 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1192 ))
1193 }
1194
1195 pub fn matvec(&self, _vector: &Vector, _result: &mut Vector) -> Result<()> {
1196 Err(SimulatorError::UnsupportedOperation(
1197 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1198 ))
1199 }
1200
1201 pub fn solve(&self, _rhs: &Vector) -> Result<Vector> {
1202 Err(SimulatorError::UnsupportedOperation(
1203 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1204 ))
1205 }
1206}
1207
1208#[cfg(feature = "advanced_math")]
1210#[derive(Debug)]
1211pub struct BLAS;
1212
1213#[cfg(feature = "advanced_math")]
1214impl BLAS {
1215 pub fn gemm(
1216 alpha: Complex64,
1217 a: &Matrix,
1218 b: &Matrix,
1219 beta: Complex64,
1220 c: &mut Matrix,
1221 ) -> Result<()> {
1222 let a_scaled = &a.data * alpha;
1224 let c_scaled = &c.data * beta;
1225 let result = a_scaled.dot(&b.data) + c_scaled;
1226 c.data.assign(&result);
1227 Ok(())
1228 }
1229
1230 pub fn gemv(
1231 alpha: Complex64,
1232 a: &Matrix,
1233 x: &Vector,
1234 beta: Complex64,
1235 y: &mut Vector,
1236 ) -> Result<()> {
1237 let y_scaled = &y.data * beta;
1239 let result = &a.data.dot(&x.data) * alpha + y_scaled;
1240 y.data.assign(&result);
1241 Ok(())
1242 }
1243}
1244
1245#[cfg(not(feature = "advanced_math"))]
1246#[derive(Debug)]
1247pub struct BLAS;
1248
1249#[cfg(not(feature = "advanced_math"))]
1250impl BLAS {
1251 pub fn gemm(
1252 _alpha: Complex64,
1253 _a: &Matrix,
1254 _b: &Matrix,
1255 _beta: Complex64,
1256 _c: &mut Matrix,
1257 ) -> Result<()> {
1258 Err(SimulatorError::UnsupportedOperation(
1259 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1260 ))
1261 }
1262
1263 pub fn gemv(
1264 _alpha: Complex64,
1265 _a: &Matrix,
1266 _x: &Vector,
1267 _beta: Complex64,
1268 _y: &mut Vector,
1269 ) -> Result<()> {
1270 Err(SimulatorError::UnsupportedOperation(
1271 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1272 ))
1273 }
1274}
1275
1276#[cfg(feature = "advanced_math")]
1278#[derive(Debug)]
1279pub struct LAPACK;
1280
1281#[cfg(feature = "advanced_math")]
1282impl LAPACK {
1283 pub fn svd(matrix: &Matrix) -> Result<SvdResult> {
1284 use scirs2_core::ndarray::ndarray_linalg::SVD; let svd_result = matrix
1288 .data
1289 .svd(true, true)
1290 .map_err(|_| SimulatorError::ComputationError("SVD computation failed".to_string()))?;
1291
1292 let pool = MemoryPool::new();
1294
1295 let u_data = svd_result.0.ok_or_else(|| {
1296 SimulatorError::ComputationError("SVD U matrix not computed".to_string())
1297 })?;
1298 let s_data = svd_result.1;
1299 let vt_data = svd_result.2.ok_or_else(|| {
1300 SimulatorError::ComputationError("SVD Vt matrix not computed".to_string())
1301 })?;
1302
1303 let u = Matrix::from_array2(&u_data.view(), &pool)?;
1304 let s_complex: Array1<Complex64> = s_data.mapv(|x| Complex64::new(x, 0.0));
1306 let s = Vector::from_array1(&s_complex.view(), &pool)?;
1307 let vt = Matrix::from_array2(&vt_data.view(), &pool)?;
1308
1309 Ok(SvdResult { u, s, vt })
1310 }
1311
1312 pub fn eig(matrix: &Matrix) -> Result<EigResult> {
1313 use scirs2_core::ndarray::ndarray_linalg::Eig; let eig_result = matrix.data.eig().map_err(|_| {
1317 SimulatorError::ComputationError("Eigenvalue decomposition failed".to_string())
1318 })?;
1319
1320 let pool = MemoryPool::new();
1321 let values = Vector::from_array1(&eig_result.0.view(), &pool)?;
1322 let vectors = Matrix::from_array2(&eig_result.1.view(), &pool)?;
1323
1324 Ok(EigResult { values, vectors })
1325 }
1326
1327 pub fn lu(matrix: &Matrix) -> Result<(Matrix, Matrix, Vec<usize>)> {
1328 let n = matrix.data.nrows();
1330 let pool = MemoryPool::new();
1331
1332 let mut l_data = Array2::eye(n);
1334 let mut u_data = matrix.data.clone();
1335 let mut perm_vec: Vec<usize> = (0..n).collect();
1336
1337 for k in 0..n.min(n) {
1339 let mut max_row = k;
1341 let mut max_val = u_data[[k, k]].norm();
1342 for i in k + 1..n {
1343 let val = u_data[[i, k]].norm();
1344 if val > max_val {
1345 max_val = val;
1346 max_row = i;
1347 }
1348 }
1349
1350 if max_row != k {
1352 for j in 0..n {
1353 let temp = u_data[[k, j]];
1354 u_data[[k, j]] = u_data[[max_row, j]];
1355 u_data[[max_row, j]] = temp;
1356 }
1357 perm_vec.swap(k, max_row);
1358 }
1359
1360 if u_data[[k, k]].norm() > 1e-10 {
1362 for i in k + 1..n {
1363 let factor = u_data[[i, k]] / u_data[[k, k]];
1364 l_data[[i, k]] = factor;
1365 for j in k..n {
1366 let u_kj = u_data[[k, j]];
1367 u_data[[i, j]] -= factor * u_kj;
1368 }
1369 }
1370 }
1371 }
1372
1373 let l_matrix = Matrix::from_array2(&l_data.view(), &pool)?;
1374 let u_matrix = Matrix::from_array2(&u_data.view(), &pool)?;
1375
1376 Ok((l_matrix, u_matrix, perm_vec))
1377 }
1378
1379 pub fn qr(matrix: &Matrix) -> Result<(Matrix, Matrix)> {
1380 use scirs2_core::ndarray::ndarray_linalg::QR; let (q, r) = matrix
1384 .data
1385 .qr()
1386 .map_err(|_| SimulatorError::ComputationError("QR decomposition failed".to_string()))?;
1387
1388 let pool = MemoryPool::new();
1389 let q_matrix = Matrix::from_array2(&q.view(), &pool)?;
1390 let r_matrix = Matrix::from_array2(&r.view(), &pool)?;
1391
1392 Ok((q_matrix, r_matrix))
1393 }
1394}
1395
1396#[cfg(not(feature = "advanced_math"))]
1397#[derive(Debug)]
1398pub struct LAPACK;
1399
1400#[cfg(not(feature = "advanced_math"))]
1401impl LAPACK {
1402 pub fn svd(_matrix: &Matrix) -> Result<SvdResult> {
1403 Err(SimulatorError::UnsupportedOperation(
1404 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1405 ))
1406 }
1407
1408 pub fn eig(_matrix: &Matrix) -> Result<EigResult> {
1409 Err(SimulatorError::UnsupportedOperation(
1410 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1411 ))
1412 }
1413}
1414
1415#[cfg(feature = "advanced_math")]
1417#[derive(Debug, Clone)]
1418pub struct SvdResult {
1419 pub u: Matrix,
1421 pub s: Vector,
1423 pub vt: Matrix,
1425}
1426
1427#[cfg(feature = "advanced_math")]
1429#[derive(Debug, Clone)]
1430pub struct EigResult {
1431 pub values: Vector,
1433 pub vectors: Matrix,
1435}
1436
1437#[cfg(feature = "advanced_math")]
1438impl EigResult {
1439 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1440 self.values.to_array1()
1441 }
1442
1443 pub const fn eigenvalues(&self) -> &Vector {
1444 &self.values
1445 }
1446
1447 pub const fn eigenvectors(&self) -> &Matrix {
1448 &self.vectors
1449 }
1450}
1451
1452#[cfg(feature = "advanced_math")]
1453impl SvdResult {
1454 pub fn to_array2(&self) -> Result<Array2<Complex64>> {
1455 self.u.data.to_owned().into_dimensionality().map_err(|_| {
1456 SimulatorError::ComputationError("Failed to convert SVD result to array2".to_string())
1457 })
1458 }
1459
1460 pub const fn u_matrix(&self) -> &Matrix {
1461 &self.u
1462 }
1463
1464 pub const fn singular_values(&self) -> &Vector {
1465 &self.s
1466 }
1467
1468 pub const fn vt_matrix(&self) -> &Matrix {
1469 &self.vt
1470 }
1471}
1472
1473#[cfg(not(feature = "advanced_math"))]
1474#[derive(Debug)]
1475pub struct SvdResult;
1476
1477#[cfg(not(feature = "advanced_math"))]
1478#[derive(Debug)]
1479pub struct EigResult;
1480
1481#[cfg(not(feature = "advanced_math"))]
1482impl EigResult {
1483 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1484 Err(SimulatorError::UnsupportedOperation(
1485 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1486 ))
1487 }
1488}
1489
1490#[cfg(not(feature = "advanced_math"))]
1491impl SvdResult {
1492 pub fn to_array2(&self) -> Result<Array2<Complex64>> {
1493 Err(SimulatorError::UnsupportedOperation(
1494 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1495 ))
1496 }
1497}
1498
1499#[cfg(feature = "advanced_math")]
1501#[derive(Debug)]
1502pub struct AdvancedFFT;
1503
1504#[cfg(feature = "advanced_math")]
1505impl AdvancedFFT {
1506 pub fn fft_nd(input: &Array2<Complex64>) -> Result<Array2<Complex64>> {
1508 use ndrustfft::{ndfft, FftHandler};
1509
1510 let (rows, cols) = input.dim();
1511 let mut result = input.clone();
1512
1513 for i in 0..rows {
1515 let row = input.row(i).to_owned();
1516 let mut row_out = row.clone();
1517 let mut handler = FftHandler::new(cols);
1518 ndfft(&row, &mut row_out, &handler, 0);
1519 result.row_mut(i).assign(&row_out);
1520 }
1521
1522 for j in 0..cols {
1523 let col = result.column(j).to_owned();
1524 let mut col_out = col.clone();
1525 let mut handler = FftHandler::new(rows);
1526 ndfft(&col, &mut col_out, &handler, 0);
1527 result.column_mut(j).assign(&col_out);
1528 }
1529
1530 Ok(result)
1531 }
1532
1533 pub fn windowed_fft(
1535 input: &Vector,
1536 window_size: usize,
1537 overlap: usize,
1538 ) -> Result<Array2<Complex64>> {
1539 let array = input.to_array1()?;
1540 let step_size = window_size - overlap;
1541 let num_windows = (array.len() - overlap) / step_size;
1542
1543 let mut result = Array2::zeros((num_windows, window_size));
1544
1545 for (i, mut row) in result.outer_iter_mut().enumerate() {
1546 let start = i * step_size;
1547 let end = (start + window_size).min(array.len());
1548
1549 if end - start == window_size {
1550 let window = array.slice(s![start..end]);
1551
1552 let windowed: Array1<Complex64> = window
1554 .iter()
1555 .enumerate()
1556 .map(|(j, &val)| {
1557 let hann =
1558 0.5 * (1.0 - (2.0 * PI * j as f64 / (window_size - 1) as f64).cos());
1559 val * Complex64::new(hann, 0.0)
1560 })
1561 .collect();
1562
1563 let mut handler = FftHandler::new(window_size);
1565 let mut fft_result = windowed.clone();
1566 ndrustfft::ndfft(&windowed, &mut fft_result, &handler, 0);
1567
1568 row.assign(&fft_result);
1569 }
1570 }
1571
1572 Ok(result)
1573 }
1574
1575 pub fn convolution(a: &Vector, b: &Vector) -> Result<Vector> {
1577 let a_array = a.to_array1()?;
1578 let b_array = b.to_array1()?;
1579
1580 let n = a_array.len() + b_array.len() - 1;
1581 let fft_size = n.next_power_of_two();
1582
1583 let mut a_padded = Array1::zeros(fft_size);
1585 let mut b_padded = Array1::zeros(fft_size);
1586 a_padded.slice_mut(s![..a_array.len()]).assign(&a_array);
1587 b_padded.slice_mut(s![..b_array.len()]).assign(&b_array);
1588
1589 let mut handler = FftHandler::new(fft_size);
1591 let mut a_fft = a_padded.clone();
1592 let mut b_fft = b_padded.clone();
1593 ndrustfft::ndfft(&a_padded, &mut a_fft, &handler, 0);
1594 ndrustfft::ndfft(&b_padded, &mut b_fft, &handler, 0);
1595
1596 let mut product = Array1::zeros(fft_size);
1598 for i in 0..fft_size {
1599 product[i] = a_fft[i] * b_fft[i];
1600 }
1601
1602 let mut result = product.clone();
1604 ndrustfft::ndifft(&product, &mut result, &handler, 0);
1605
1606 let truncated = result.slice(s![..n]).to_owned();
1608 Vector::from_array1(&truncated.view(), &MemoryPool::new())
1609 }
1610}
1611
1612#[cfg(feature = "advanced_math")]
1614#[derive(Debug)]
1615pub struct SparseSolvers;
1616
1617#[cfg(feature = "advanced_math")]
1618impl SparseSolvers {
1619 pub fn conjugate_gradient(
1621 matrix: &SparseMatrix,
1622 b: &Vector,
1623 x0: Option<&Vector>,
1624 tolerance: f64,
1625 max_iterations: usize,
1626 ) -> Result<Vector> {
1627 use nalgebra::{Complex, DVector};
1629
1630 let b_array = b.to_array1()?;
1631 let b_vec = DVector::from_iterator(
1632 b_array.len(),
1633 b_array.iter().map(|&c| Complex::new(c.re, c.im)),
1634 );
1635
1636 let mut x = if let Some(x0_vec) = x0 {
1637 let x0_array = x0_vec.to_array1()?;
1638 DVector::from_iterator(
1639 x0_array.len(),
1640 x0_array.iter().map(|&c| Complex::new(c.re, c.im)),
1641 )
1642 } else {
1643 DVector::zeros(b_vec.len())
1644 };
1645
1646 let pool = MemoryPool::new();
1648 let x_vector = Vector::from_array1(
1649 &Array1::from_vec(x.iter().map(|c| Complex64::new(c.re, c.im)).collect()).view(),
1650 &pool,
1651 )?;
1652 let mut ax_vector = Vector::zeros(x.len(), &pool)?;
1653 matrix.matvec(&x_vector, &mut ax_vector)?;
1654
1655 let ax_array = ax_vector.to_array1()?;
1657 let ax = DVector::from_iterator(
1658 ax_array.len(),
1659 ax_array.iter().map(|&c| Complex::new(c.re, c.im)),
1660 );
1661
1662 let mut r = &b_vec - &ax;
1663 let mut p = r.clone();
1664 let mut rsold = r.dot(&r).re;
1665
1666 for _ in 0..max_iterations {
1667 let p_vec = Vector::from_array1(
1669 &Array1::from_vec(p.iter().map(|c| Complex64::new(c.re, c.im)).collect()).view(),
1670 &MemoryPool::new(),
1671 )?;
1672 let mut ap_vec =
1673 Vector::from_array1(&Array1::zeros(p.len()).view(), &MemoryPool::new())?;
1674 matrix.matvec(&p_vec, &mut ap_vec)?;
1675 let ap_array = ap_vec.to_array1()?;
1676 let ap = DVector::from_iterator(
1677 ap_array.len(),
1678 ap_array.iter().map(|&c| Complex::new(c.re, c.im)),
1679 );
1680
1681 let alpha = rsold / p.dot(&ap).re;
1682 let alpha_complex = Complex::new(alpha, 0.0);
1683 x += &p * alpha_complex;
1684 r -= &ap * alpha_complex;
1685
1686 let rsnew = r.dot(&r).re;
1687 if rsnew.sqrt() < tolerance {
1688 break;
1689 }
1690
1691 let beta = rsnew / rsold;
1692 let beta_complex = Complex::new(beta, 0.0);
1693 p = &r + &p * beta_complex;
1694 rsold = rsnew;
1695 }
1696
1697 let result_array = Array1::from_vec(x.iter().map(|c| Complex64::new(c.re, c.im)).collect());
1698 Vector::from_array1(&result_array.view(), &MemoryPool::new())
1699 }
1700
1701 pub fn gmres(
1703 matrix: &SparseMatrix,
1704 b: &Vector,
1705 x0: Option<&Vector>,
1706 tolerance: f64,
1707 max_iterations: usize,
1708 restart: usize,
1709 ) -> Result<Vector> {
1710 let b_array = b.to_array1()?;
1711 let n = b_array.len();
1712
1713 let mut x = if let Some(x0_vec) = x0 {
1714 x0_vec.to_array1()?.to_owned()
1715 } else {
1716 Array1::zeros(n)
1717 };
1718
1719 for _restart_iter in 0..(max_iterations / restart) {
1720 let mut ax = Array1::zeros(n);
1722 let x_vec = Vector::from_array1(&x.view(), &MemoryPool::new())?;
1723 let mut ax_vec = Vector::from_array1(&ax.view(), &MemoryPool::new())?;
1724 matrix.matvec(&x_vec, &mut ax_vec)?;
1725 ax = ax_vec.to_array1()?;
1726
1727 let mut r = &b_array - &ax;
1728 let beta = r.norm_l2();
1729
1730 if beta < tolerance {
1731 break;
1732 }
1733
1734 r = r.mapv(|x| x / Complex64::new(beta, 0.0));
1735
1736 let mut v = Vec::new();
1738 v.push(r.clone());
1739
1740 let mut h = Array2::zeros((restart + 1, restart));
1741
1742 for j in 0..restart.min(max_iterations) {
1743 let v_vec = Vector::from_array1(&v[j].view(), &MemoryPool::new())?;
1744 let mut av = Array1::zeros(n);
1745 let mut av_vec = Vector::from_array1(&av.view(), &MemoryPool::new())?;
1746 matrix.matvec(&v_vec, &mut av_vec)?;
1747 av = av_vec.to_array1()?;
1748
1749 for i in 0..=j {
1751 h[[i, j]] = v[i].dot(&av);
1752 av = av - h[[i, j]] * &v[i];
1753 }
1754
1755 h[[j + 1, j]] = Complex64::new(av.norm_l2(), 0.0);
1756
1757 if h[[j + 1, j]].norm() < tolerance {
1758 break;
1759 }
1760
1761 av /= h[[j + 1, j]];
1762 v.push(av);
1763 }
1764
1765 let krylov_dim = v.len() - 1;
1768 if krylov_dim > 0 {
1769 let mut e1 = Array1::zeros(krylov_dim + 1);
1770 e1[0] = Complex64::new(beta, 0.0);
1771
1772 let mut y = Array1::zeros(krylov_dim);
1774 for i in (0..krylov_dim).rev() {
1775 let mut sum = Complex64::new(0.0, 0.0);
1776 for j in (i + 1)..krylov_dim {
1777 sum += h[[i, j]] * y[j];
1778 }
1779 y[i] = (e1[i] - sum) / h[[i, i]];
1780 }
1781
1782 for i in 0..krylov_dim {
1784 x = x + y[i] * &v[i];
1785 }
1786 }
1787 }
1788
1789 Vector::from_array1(&x.view(), &MemoryPool::new())
1790 }
1791
1792 pub fn bicgstab(
1794 matrix: &SparseMatrix,
1795 b: &Vector,
1796 x0: Option<&Vector>,
1797 tolerance: f64,
1798 max_iterations: usize,
1799 ) -> Result<Vector> {
1800 let b_array = b.to_array1()?;
1801 let n = b_array.len();
1802
1803 let mut x = if let Some(x0_vec) = x0 {
1804 x0_vec.to_array1()?.to_owned()
1805 } else {
1806 Array1::zeros(n)
1807 };
1808
1809 let mut ax = Array1::zeros(n);
1811 let x_vec = Vector::from_array1(&x.view(), &MemoryPool::new())?;
1812 let mut ax_vec = Vector::from_array1(&ax.view(), &MemoryPool::new())?;
1813 matrix.matvec(&x_vec, &mut ax_vec)?;
1814 ax = ax_vec.to_array1()?;
1815
1816 let mut r = &b_array - &ax;
1817 let r0 = r.clone();
1818
1819 let mut rho = Complex64::new(1.0, 0.0);
1820 let mut alpha = Complex64::new(1.0, 0.0);
1821 let mut omega = Complex64::new(1.0, 0.0);
1822
1823 let mut p = Array1::zeros(n);
1824 let mut v = Array1::zeros(n);
1825
1826 for _ in 0..max_iterations {
1827 let rho_new = r0.dot(&r);
1828 let beta = (rho_new / rho) * (alpha / omega);
1829
1830 p = &r + beta * (&p - omega * &v);
1831
1832 let p_vec = Vector::from_array1(&p.view(), &MemoryPool::new())?;
1834 let mut v_vec = Vector::from_array1(&v.view(), &MemoryPool::new())?;
1835 matrix.matvec(&p_vec, &mut v_vec)?;
1836 v = v_vec.to_array1()?;
1837
1838 alpha = rho_new / r0.dot(&v);
1839 let s = &r - alpha * &v;
1840
1841 if s.norm_l2() < tolerance {
1842 x = x + alpha * &p;
1843 break;
1844 }
1845
1846 let s_vec = Vector::from_array1(&s.view(), &MemoryPool::new())?;
1848 let mut t_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
1849 matrix.matvec(&s_vec, &mut t_vec)?;
1850 let t = t_vec.to_array1()?;
1851
1852 omega = t.dot(&s) / t.dot(&t);
1853 x = x + alpha * &p + omega * &s;
1854 r = s - omega * &t;
1855
1856 if r.norm_l2() < tolerance {
1857 break;
1858 }
1859
1860 rho = rho_new;
1861 }
1862
1863 Vector::from_array1(&x.view(), &MemoryPool::new())
1864 }
1865}
1866
1867#[cfg(feature = "advanced_math")]
1869#[derive(Debug)]
1870pub struct AdvancedEigensolvers;
1871
1872#[cfg(feature = "advanced_math")]
1873impl AdvancedEigensolvers {
1874 pub fn lanczos(
1876 matrix: &SparseMatrix,
1877 num_eigenvalues: usize,
1878 max_iterations: usize,
1879 tolerance: f64,
1880 ) -> Result<EigResult> {
1881 let n = matrix.csr_matrix.nrows();
1882 let m = num_eigenvalues.min(max_iterations);
1883
1884 let mut q = Array1::from_vec(
1886 (0..n)
1887 .map(|_| {
1888 Complex64::new(
1889 thread_rng().gen::<f64>() - 0.5,
1890 thread_rng().gen::<f64>() - 0.5,
1891 )
1892 })
1893 .collect(),
1894 );
1895 q = q.mapv(|x| x / Complex64::new(q.norm_l2(), 0.0));
1896
1897 let mut q_vectors = Vec::new();
1898 q_vectors.push(q.clone());
1899
1900 let mut alpha = Vec::new();
1901 let mut beta = Vec::new();
1902
1903 let mut q_prev = Array1::<Complex64>::zeros(n);
1904
1905 for j in 0..m {
1906 let q_vec = Vector::from_array1(&q_vectors[j].view(), &MemoryPool::new())?;
1908 let mut av_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
1909 matrix.matvec(&q_vec, &mut av_vec)?;
1910 let mut av = av_vec.to_array1()?;
1911
1912 let alpha_j = q_vectors[j].dot(&av);
1914 alpha.push(alpha_j);
1915
1916 av = av - alpha_j * &q_vectors[j];
1918 if j > 0 {
1919 av = av - Complex64::new(beta[j - 1], 0.0) * &q_prev;
1920 }
1921
1922 let beta_j = av.norm_l2();
1923
1924 if beta_j.abs() < tolerance {
1925 break;
1926 }
1927
1928 beta.push(beta_j);
1929 q_prev = q_vectors[j].clone();
1930
1931 if j + 1 < m {
1932 q = av / beta_j;
1933 q_vectors.push(q.clone());
1934 }
1935 }
1936
1937 let dim = alpha.len();
1939 let mut tridiag = Array2::zeros((dim, dim));
1940
1941 for i in 0..dim {
1942 tridiag[[i, i]] = alpha[i];
1943 if i > 0 {
1944 tridiag[[i - 1, i]] = Complex64::new(beta[i - 1], 0.0);
1945 tridiag[[i, i - 1]] = Complex64::new(beta[i - 1], 0.0);
1946 }
1947 }
1948
1949 let mut eigenvalues = Array1::zeros(num_eigenvalues.min(dim));
1951 for i in 0..eigenvalues.len() {
1952 eigenvalues[i] = tridiag[[i, i]]; }
1954
1955 let mut eigenvectors = Array2::zeros((n, eigenvalues.len()));
1957 for (i, mut col) in eigenvectors
1958 .columns_mut()
1959 .into_iter()
1960 .enumerate()
1961 .take(eigenvalues.len())
1962 {
1963 if i < q_vectors.len() {
1964 col.assign(&q_vectors[i]);
1965 }
1966 }
1967
1968 let values = Vector::from_array1(&eigenvalues.view(), &MemoryPool::new())?;
1969 let vectors = Matrix::from_array2(&eigenvectors.view(), &MemoryPool::new())?;
1970
1971 Ok(EigResult { values, vectors })
1972 }
1973
1974 pub fn arnoldi(
1976 matrix: &SparseMatrix,
1977 num_eigenvalues: usize,
1978 max_iterations: usize,
1979 tolerance: f64,
1980 ) -> Result<EigResult> {
1981 let n = matrix.csr_matrix.nrows();
1982 let m = num_eigenvalues.min(max_iterations);
1983
1984 let mut q = Array1::from_vec(
1986 (0..n)
1987 .map(|_| {
1988 Complex64::new(
1989 thread_rng().gen::<f64>() - 0.5,
1990 thread_rng().gen::<f64>() - 0.5,
1991 )
1992 })
1993 .collect(),
1994 );
1995 q = q.mapv(|x| x / Complex64::new(q.norm_l2(), 0.0));
1996
1997 let mut q_vectors = Vec::new();
1998 q_vectors.push(q.clone());
1999
2000 let mut h = Array2::zeros((m + 1, m));
2001
2002 for j in 0..m {
2003 let q_vec = Vector::from_array1(&q_vectors[j].view(), &MemoryPool::new())?;
2005 let mut v_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
2006 matrix.matvec(&q_vec, &mut v_vec)?;
2007 let mut v = v_vec.to_array1()?;
2008
2009 for i in 0..=j {
2011 h[[i, j]] = q_vectors[i].dot(&v);
2012 v = v - h[[i, j]] * &q_vectors[i];
2013 }
2014
2015 h[[j + 1, j]] = Complex64::new(v.norm_l2(), 0.0);
2016
2017 if h[[j + 1, j]].norm() < tolerance {
2018 break;
2019 }
2020
2021 if j + 1 < m {
2022 q = v / h[[j + 1, j]];
2023 q_vectors.push(q.clone());
2024 }
2025 }
2026
2027 let dim = q_vectors.len();
2029 let mut eigenvalues = Array1::zeros(num_eigenvalues.min(dim));
2030 for i in 0..eigenvalues.len() {
2031 eigenvalues[i] = h[[i, i]]; }
2033
2034 let mut eigenvectors = Array2::zeros((n, eigenvalues.len()));
2036 for (i, mut col) in eigenvectors
2037 .columns_mut()
2038 .into_iter()
2039 .enumerate()
2040 .take(eigenvalues.len())
2041 {
2042 if i < q_vectors.len() {
2043 col.assign(&q_vectors[i]);
2044 }
2045 }
2046
2047 let values = Vector::from_array1(&eigenvalues.view(), &MemoryPool::new())?;
2048 let vectors = Matrix::from_array2(&eigenvectors.view(), &MemoryPool::new())?;
2049
2050 Ok(EigResult { values, vectors })
2051 }
2052}
2053
2054#[cfg(feature = "advanced_math")]
2056#[derive(Debug)]
2057pub struct AdvancedLinearAlgebra;
2058
2059#[cfg(feature = "advanced_math")]
2060impl AdvancedLinearAlgebra {
2061 pub fn qr_decomposition(matrix: &Matrix) -> Result<QRResult> {
2063 use scirs2_core::ndarray::ndarray_linalg::QR; let qr_result = matrix
2066 .data
2067 .qr()
2068 .map_err(|_| SimulatorError::ComputationError("QR decomposition failed".to_string()))?;
2069
2070 let pool = MemoryPool::new();
2071 let q = Matrix::from_array2(&qr_result.0.view(), &pool)?;
2072 let r = Matrix::from_array2(&qr_result.1.view(), &pool)?;
2073
2074 Ok(QRResult { q, r })
2075 }
2076
2077 pub fn cholesky_decomposition(matrix: &Matrix) -> Result<Matrix> {
2079 use scirs2_core::ndarray::ndarray_linalg::{Cholesky, UPLO}; let chol_result = matrix.data.cholesky(UPLO::Lower).map_err(|_| {
2082 SimulatorError::ComputationError("Cholesky decomposition failed".to_string())
2083 })?;
2084
2085 Matrix::from_array2(&chol_result.view(), &MemoryPool::new())
2086 }
2087
2088 pub fn matrix_exponential(matrix: &Matrix, t: f64) -> Result<Matrix> {
2090 let scaled_matrix = &matrix.data * Complex64::new(0.0, -t);
2091
2092 let mut result = Array2::eye(scaled_matrix.nrows());
2094 let mut term = Array2::eye(scaled_matrix.nrows());
2095
2096 for k in 1..20 {
2098 term = term.dot(&scaled_matrix) / Complex64::new(k as f64, 0.0);
2099 result += &term;
2100
2101 if term.norm_l2() < 1e-12 {
2102 break;
2103 }
2104 }
2105
2106 Matrix::from_array2(&result.view(), &MemoryPool::new())
2107 }
2108
2109 pub fn pseudoinverse(matrix: &Matrix, tolerance: f64) -> Result<Matrix> {
2111 let svd_result = LAPACK::svd(matrix)?;
2112
2113 let u = svd_result.u.data;
2114 let s = svd_result.s.to_array1()?;
2115 let vt = svd_result.vt.data;
2116
2117 let mut s_pinv = Array1::zeros(s.len());
2119 for (i, &sigma) in s.iter().enumerate() {
2120 if sigma.norm() > tolerance {
2121 s_pinv[i] = Complex64::new(1.0, 0.0) / sigma;
2122 }
2123 }
2124
2125 let s_pinv_diag = Array2::from_diag(&s_pinv);
2127 let result = vt.t().dot(&s_pinv_diag).dot(&u.t());
2128
2129 Matrix::from_array2(&result.view(), &MemoryPool::new())
2130 }
2131
2132 pub fn condition_number(matrix: &Matrix) -> Result<f64> {
2134 let svd_result = LAPACK::svd(matrix)?;
2135 let s = svd_result.s.to_array1()?;
2136
2137 let mut min_singular = f64::INFINITY;
2138 let mut max_singular: f64 = 0.0;
2139
2140 for &sigma in &s {
2141 let sigma_norm = sigma.norm();
2142 if sigma_norm > 1e-15 {
2143 min_singular = min_singular.min(sigma_norm);
2144 max_singular = max_singular.max(sigma_norm);
2145 }
2146 }
2147
2148 Ok(max_singular / min_singular)
2149 }
2150}
2151
2152#[cfg(feature = "advanced_math")]
2154#[derive(Debug, Clone)]
2155pub struct QRResult {
2156 pub q: Matrix,
2158 pub r: Matrix,
2160}
2161
2162pub fn benchmark_scirs2_integration() -> Result<HashMap<String, f64>> {
2164 let mut results = HashMap::new();
2165
2166 #[cfg(feature = "advanced_math")]
2168 {
2169 let start = std::time::Instant::now();
2170 let engine = FftEngine::new();
2171 let test_vector = Vector::from_array1(
2172 &Array1::from_vec((0..1024).map(|i| Complex64::new(i as f64, 0.0)).collect()).view(),
2173 &MemoryPool::new(),
2174 )?;
2175
2176 for _ in 0..100 {
2177 let _ = engine.forward(&test_vector)?;
2178 }
2179
2180 let fft_time = start.elapsed().as_millis() as f64;
2181 results.insert("fft_1024_100_iterations".to_string(), fft_time);
2182 }
2183
2184 #[cfg(feature = "advanced_math")]
2186 {
2187 use nalgebra_sparse::CsrMatrix;
2188
2189 let start = std::time::Instant::now();
2190
2191 let mut row_indices = [0; 1000];
2193 let mut col_indices = [0; 1000];
2194 let mut values = [Complex64::new(0.0, 0.0); 1000];
2195
2196 for i in 0..100 {
2197 for j in 0..10 {
2198 let idx = i * 10 + j;
2199 row_indices[idx] = i;
2200 col_indices[idx] = (i + j) % 100;
2201 values[idx] = Complex64::new(1.0, 0.0);
2202 }
2203 }
2204
2205 let csr = CsrMatrix::try_from_csr_data(
2206 100,
2207 100,
2208 row_indices.to_vec(),
2209 col_indices.to_vec(),
2210 values.to_vec(),
2211 )
2212 .map_err(|_| {
2213 SimulatorError::ComputationError("Failed to create test matrix".to_string())
2214 })?;
2215
2216 let sparse_matrix = SparseMatrix { csr_matrix: csr };
2217 let b = Vector::from_array1(&Array1::ones(100).view(), &MemoryPool::new())?;
2218
2219 let _ = SparseSolvers::conjugate_gradient(&sparse_matrix, &b, None, 1e-6, 100)?;
2220
2221 let sparse_solver_time = start.elapsed().as_millis() as f64;
2222 results.insert("cg_solver_100x100".to_string(), sparse_solver_time);
2223 }
2224
2225 #[cfg(feature = "advanced_math")]
2227 {
2228 let start = std::time::Instant::now();
2229
2230 let test_matrix = Matrix::from_array2(&Array2::eye(50).view(), &MemoryPool::new())?;
2231 for _ in 0..10 {
2232 let _ = AdvancedLinearAlgebra::qr_decomposition(&test_matrix)?;
2233 }
2234
2235 let qr_time = start.elapsed().as_millis() as f64;
2236 results.insert("qr_decomposition_50x50_10_iterations".to_string(), qr_time);
2237 }
2238
2239 Ok(results)
2240}