1#[cfg(feature = "advanced_math")]
15use ndrustfft::FftHandler;
16use scirs2_core::ndarray::ndarray_linalg::Norm; use scirs2_core::ndarray::{
18 s, Array1, Array2, ArrayView1, ArrayView2, ArrayViewMut1, ArrayViewMut2,
19};
20use scirs2_core::Complex64;
21use std::collections::HashMap;
22use std::f64::consts::PI;
23use std::sync::{Arc, Mutex};
24
25use quantrs2_core::prelude::QuantRS2Error as SciRS2Error;
27use scirs2_core::parallel_ops::{
28 current_num_threads, IndexedParallelIterator, ParallelIterator, ThreadPool, ThreadPoolBuilder,
29}; use scirs2_core::simd_ops::SimdUnifiedOps;
31
32use crate::error::{Result, SimulatorError};
33use scirs2_core::random::prelude::*;
34
35#[derive(Debug, Clone)]
37pub struct SciRS2Matrix {
38 data: Array2<Complex64>,
39 simd_aligned: bool,
41}
42
43impl SciRS2Matrix {
44 pub fn zeros(shape: (usize, usize), _allocator: &SciRS2MemoryAllocator) -> Result<Self> {
46 Ok(Self {
47 data: Array2::zeros(shape),
48 simd_aligned: true,
49 })
50 }
51
52 #[must_use]
54 pub const fn from_array2(array: Array2<Complex64>) -> Self {
55 Self {
56 data: array,
57 simd_aligned: false,
58 }
59 }
60
61 #[must_use]
63 pub fn shape(&self) -> (usize, usize) {
64 self.data.dim()
65 }
66
67 #[must_use]
69 pub fn rows(&self) -> usize {
70 self.data.nrows()
71 }
72
73 #[must_use]
75 pub fn cols(&self) -> usize {
76 self.data.ncols()
77 }
78
79 #[must_use]
81 pub fn data_view(&self) -> ArrayView2<'_, Complex64> {
82 self.data.view()
83 }
84
85 pub fn data_view_mut(&mut self) -> ArrayViewMut2<'_, Complex64> {
87 self.data.view_mut()
88 }
89}
90
91#[derive(Debug, Clone)]
93pub struct SciRS2Vector {
94 data: Array1<Complex64>,
95 simd_aligned: bool,
97}
98
99impl SciRS2Vector {
100 pub fn zeros(len: usize, _allocator: &SciRS2MemoryAllocator) -> Result<Self> {
102 Ok(Self {
103 data: Array1::zeros(len),
104 simd_aligned: true,
105 })
106 }
107
108 #[must_use]
110 pub const fn from_array1(array: Array1<Complex64>) -> Self {
111 Self {
112 data: array,
113 simd_aligned: false,
114 }
115 }
116
117 #[must_use]
119 pub fn len(&self) -> usize {
120 self.data.len()
121 }
122
123 #[must_use]
125 pub fn is_empty(&self) -> bool {
126 self.data.is_empty()
127 }
128
129 #[must_use]
131 pub fn data_view(&self) -> ArrayView1<'_, Complex64> {
132 self.data.view()
133 }
134
135 pub fn data_view_mut(&mut self) -> ArrayViewMut1<'_, Complex64> {
137 self.data.view_mut()
138 }
139
140 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
142 Ok(self.data.clone())
143 }
144}
145
146#[derive(Debug, Clone)]
148pub struct SciRS2SimdConfig {
149 pub force_instruction_set: Option<String>,
151 pub override_simd_lanes: Option<usize>,
153 pub enable_aggressive_simd: bool,
155 pub numa_aware_allocation: bool,
157}
158
159impl Default for SciRS2SimdConfig {
160 fn default() -> Self {
161 Self {
162 force_instruction_set: None,
163 override_simd_lanes: None,
164 enable_aggressive_simd: true,
165 numa_aware_allocation: true,
166 }
167 }
168}
169
170#[derive(Debug, Clone)]
172pub struct SciRS2SimdContext {
173 pub simd_lanes: usize,
175 pub supports_complex_simd: bool,
177 pub instruction_set: String,
179 pub max_vector_width: usize,
181}
182
183impl SciRS2SimdContext {
184 #[must_use]
186 pub fn detect_capabilities() -> Self {
187 #[cfg(target_arch = "x86_64")]
188 {
189 if is_x86_feature_detected!("avx512f") {
190 Self {
191 simd_lanes: 16,
192 supports_complex_simd: true,
193 instruction_set: "AVX-512".to_string(),
194 max_vector_width: 64,
195 }
196 } else if is_x86_feature_detected!("avx2") {
197 Self {
198 simd_lanes: 8,
199 supports_complex_simd: true,
200 instruction_set: "AVX2".to_string(),
201 max_vector_width: 32,
202 }
203 } else if is_x86_feature_detected!("sse4.1") {
204 Self {
205 simd_lanes: 4,
206 supports_complex_simd: true,
207 instruction_set: "SSE4.1".to_string(),
208 max_vector_width: 16,
209 }
210 } else {
211 Self::fallback()
212 }
213 }
214 #[cfg(target_arch = "aarch64")]
215 {
216 Self {
217 simd_lanes: 4,
218 supports_complex_simd: true,
219 instruction_set: "NEON".to_string(),
220 max_vector_width: 16,
221 }
222 }
223 #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
224 {
225 Self::fallback()
226 }
227 }
228
229 pub fn from_config(config: &SciRS2SimdConfig) -> Result<Self> {
231 let mut context = Self::detect_capabilities();
232
233 if let Some(ref instruction_set) = config.force_instruction_set {
234 context.instruction_set = instruction_set.clone();
235 }
236
237 if let Some(simd_lanes) = config.override_simd_lanes {
238 context.simd_lanes = simd_lanes;
239 }
240
241 Ok(context)
242 }
243
244 fn fallback() -> Self {
245 Self {
246 simd_lanes: 1,
247 supports_complex_simd: false,
248 instruction_set: "Scalar".to_string(),
249 max_vector_width: 8,
250 }
251 }
252}
253
254impl Default for SciRS2SimdContext {
255 fn default() -> Self {
256 Self::detect_capabilities()
257 }
258}
259
260#[derive(Debug)]
262pub struct SciRS2MemoryAllocator {
263 pub total_allocated: usize,
265 pub alignment: usize,
267 allocation_count: usize,
269}
270
271unsafe impl Send for SciRS2MemoryAllocator {}
273unsafe impl Sync for SciRS2MemoryAllocator {}
274
275impl Default for SciRS2MemoryAllocator {
276 fn default() -> Self {
277 Self {
278 total_allocated: 0,
279 alignment: 64, allocation_count: 0,
281 }
282 }
283}
284
285impl SciRS2MemoryAllocator {
286 #[must_use]
288 pub fn new() -> Self {
289 Self::default()
290 }
291}
292
293#[derive(Debug)]
295pub struct SciRS2VectorizedFFT {
296 plans: HashMap<usize, FFTPlan>,
298 optimization_level: OptimizationLevel,
300}
301
302#[derive(Debug, Clone)]
303pub struct FFTPlan {
304 pub size: usize,
306 pub twiddle_factors: Vec<Complex64>,
308 pub vectorization_strategy: VectorizationStrategy,
310}
311
312#[derive(Debug, Clone, Copy)]
313pub enum VectorizationStrategy {
314 SimdComplexSeparate,
316 SimdComplexInterleaved,
318 Adaptive,
320}
321
322#[derive(Debug, Clone, Copy)]
323pub enum OptimizationLevel {
324 Basic,
326 Aggressive,
328 Maximum,
330}
331
332impl Default for SciRS2VectorizedFFT {
333 fn default() -> Self {
334 Self {
335 plans: HashMap::new(),
336 optimization_level: OptimizationLevel::Aggressive,
337 }
338 }
339}
340
341impl SciRS2VectorizedFFT {
342 pub fn forward(&self, input: &SciRS2Vector) -> Result<SciRS2Vector> {
344 let data = input.data_view().to_owned();
346
347 #[cfg(feature = "advanced_math")]
349 {
350 let mut handler = FftHandler::<f64>::new(data.len());
351 let mut output = data.clone();
352 ndrustfft::ndfft(&data, &mut output, &handler, 0);
353 Ok(SciRS2Vector::from_array1(output))
354 }
355
356 #[cfg(not(feature = "advanced_math"))]
357 {
358 let n = data.len();
360 let mut output = Array1::zeros(n);
361 for k in 0..n {
362 let mut sum = Complex64::new(0.0, 0.0);
363 for j in 0..n {
364 let angle = -2.0 * PI * (k * j) as f64 / n as f64;
365 let twiddle = Complex64::new(angle.cos(), angle.sin());
366 sum += data[j] * twiddle;
367 }
368 output[k] = sum;
369 }
370 Ok(SciRS2Vector::from_array1(output))
371 }
372 }
373
374 pub fn inverse(&self, input: &SciRS2Vector) -> Result<SciRS2Vector> {
376 let data = input.data_view().to_owned();
378
379 #[cfg(feature = "advanced_math")]
381 {
382 let mut handler = FftHandler::<f64>::new(data.len());
383 let mut output = data.clone();
384 ndrustfft::ndifft(&data, &mut output, &handler, 0);
385 Ok(SciRS2Vector::from_array1(output))
386 }
387
388 #[cfg(not(feature = "advanced_math"))]
389 {
390 let n = data.len();
392 let mut output = Array1::zeros(n);
393 let scale = 1.0 / n as f64;
394 for k in 0..n {
395 let mut sum = Complex64::new(0.0, 0.0);
396 for j in 0..n {
397 let angle = 2.0 * PI * (k * j) as f64 / n as f64;
398 let twiddle = Complex64::new(angle.cos(), angle.sin());
399 sum += data[j] * twiddle;
400 }
401 output[k] = sum * scale;
402 }
403 Ok(SciRS2Vector::from_array1(output))
404 }
405 }
406}
407
408#[derive(Debug)]
410pub struct SciRS2ParallelContext {
411 pub num_threads: usize,
413 pub thread_pool: ThreadPool, pub numa_aware: bool,
417}
418
419impl Default for SciRS2ParallelContext {
420 fn default() -> Self {
421 let num_threads = current_num_threads(); let thread_pool = ThreadPoolBuilder::new() .num_threads(num_threads)
424 .build()
425 .unwrap_or_else(|_| {
426 ThreadPoolBuilder::new()
427 .build()
428 .expect("fallback thread pool creation should succeed")
429 });
430
431 Self {
432 num_threads,
433 thread_pool,
434 numa_aware: true,
435 }
436 }
437}
438
439#[derive(Debug, Default, Clone)]
441pub struct BackendStats {
442 pub simd_vector_ops: usize,
444 pub simd_matrix_ops: usize,
446 pub complex_simd_ops: usize,
448 pub simd_time_ns: u64,
450 pub parallel_time_ns: u64,
452 pub memory_usage_bytes: usize,
454 pub peak_simd_throughput: f64,
456 pub simd_efficiency: f64,
458 pub vectorized_fft_ops: usize,
460 pub sparse_simd_ops: usize,
462 pub matrix_ops: usize,
464 pub lapack_time_ms: f64,
466 pub cache_hit_rate: f64,
468}
469
470#[derive(Debug)]
472pub struct SciRS2Backend {
473 pub available: bool,
475
476 pub stats: Arc<Mutex<BackendStats>>,
478
479 pub simd_context: SciRS2SimdContext,
481
482 pub memory_allocator: SciRS2MemoryAllocator,
484
485 pub fft_engine: SciRS2VectorizedFFT,
487
488 pub parallel_context: SciRS2ParallelContext,
490}
491
492impl SciRS2Backend {
493 #[must_use]
495 pub fn new() -> Self {
496 let simd_context = SciRS2SimdContext::detect_capabilities();
497 let memory_allocator = SciRS2MemoryAllocator::default();
498 let fft_engine = SciRS2VectorizedFFT::default();
499 let parallel_context = SciRS2ParallelContext::default();
500
501 Self {
502 available: simd_context.supports_complex_simd,
503 stats: Arc::new(Mutex::new(BackendStats::default())),
504 simd_context,
505 memory_allocator,
506 fft_engine,
507 parallel_context,
508 }
509 }
510
511 pub fn with_config(simd_config: SciRS2SimdConfig) -> Result<Self> {
513 let mut backend = Self::new();
514 backend.simd_context = SciRS2SimdContext::from_config(&simd_config)?;
515 Ok(backend)
516 }
517
518 #[must_use]
520 pub const fn is_available(&self) -> bool {
521 self.available && self.simd_context.supports_complex_simd
522 }
523
524 #[must_use]
526 pub const fn get_simd_info(&self) -> &SciRS2SimdContext {
527 &self.simd_context
528 }
529
530 #[must_use]
532 pub fn get_stats(&self) -> BackendStats {
533 self.stats
534 .lock()
535 .map(|guard| guard.clone())
536 .unwrap_or_default()
537 }
538
539 pub fn reset_stats(&self) {
541 if let Ok(mut guard) = self.stats.lock() {
542 *guard = BackendStats::default();
543 }
544 }
545
546 pub fn matrix_multiply(&self, a: &SciRS2Matrix, b: &SciRS2Matrix) -> Result<SciRS2Matrix> {
548 let start_time = std::time::Instant::now();
549
550 if a.cols() != b.rows() {
552 return Err(SimulatorError::DimensionMismatch(format!(
553 "Cannot multiply {}x{} matrix with {}x{} matrix",
554 a.rows(),
555 a.cols(),
556 b.rows(),
557 b.cols()
558 )));
559 }
560
561 let result_shape = (a.rows(), b.cols());
562 let mut result = SciRS2Matrix::zeros(result_shape, &self.memory_allocator)?;
563
564 self.simd_gemm_complex(&a.data_view(), &b.data_view(), &mut result.data_view_mut())?;
566
567 if let Ok(mut stats) = self.stats.lock() {
569 stats.simd_matrix_ops += 1;
570 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
571 }
572
573 Ok(result)
574 }
575
576 pub fn matrix_vector_multiply(
578 &self,
579 a: &SciRS2Matrix,
580 x: &SciRS2Vector,
581 ) -> Result<SciRS2Vector> {
582 let start_time = std::time::Instant::now();
583
584 if a.cols() != x.len() {
586 return Err(SimulatorError::DimensionMismatch(format!(
587 "Cannot multiply {}x{} matrix with vector of length {}",
588 a.rows(),
589 a.cols(),
590 x.len()
591 )));
592 }
593
594 let mut result = SciRS2Vector::zeros(a.rows(), &self.memory_allocator)?;
595
596 self.simd_gemv_complex(&a.data_view(), &x.data_view(), &mut result.data_view_mut())?;
598
599 if let Ok(mut stats) = self.stats.lock() {
601 stats.simd_vector_ops += 1;
602 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
603 }
604
605 Ok(result)
606 }
607
608 fn simd_gemm_complex(
610 &self,
611 a: &ArrayView2<Complex64>,
612 b: &ArrayView2<Complex64>,
613 c: &mut ArrayViewMut2<Complex64>,
614 ) -> Result<()> {
615 let (m, k) = a.dim();
616 let (k2, n) = b.dim();
617
618 assert_eq!(k, k2, "Inner dimensions must match");
619 assert_eq!(c.dim(), (m, n), "Output dimensions must match");
620
621 let a_real: Vec<f64> = a.iter().map(|z| z.re).collect();
623 let a_imag: Vec<f64> = a.iter().map(|z| z.im).collect();
624 let b_real: Vec<f64> = b.iter().map(|z| z.re).collect();
625 let b_imag: Vec<f64> = b.iter().map(|z| z.im).collect();
626
627 for i in 0..m {
632 for j in 0..n {
633 let mut real_sum = 0.0;
634 let mut imag_sum = 0.0;
635
636 let a_row_start = i * k;
638
639 if k >= self.simd_context.simd_lanes {
640 for l in 0..k {
642 let b_idx = l * n + j;
643 let ar = a_real[a_row_start + l];
644 let ai = a_imag[a_row_start + l];
645 let br = b_real[b_idx];
646 let bi = b_imag[b_idx];
647
648 real_sum += ar.mul_add(br, -(ai * bi));
649 imag_sum += ar.mul_add(bi, ai * br);
650 }
651 } else {
652 for l in 0..k {
654 let b_idx = l * n + j;
655 let ar = a_real[a_row_start + l];
656 let ai = a_imag[a_row_start + l];
657 let br = b_real[b_idx];
658 let bi = b_imag[b_idx];
659
660 real_sum += ar.mul_add(br, -(ai * bi));
661 imag_sum += ar.mul_add(bi, ai * br);
662 }
663 }
664
665 c[[i, j]] = Complex64::new(real_sum, imag_sum);
666 }
667 }
668
669 Ok(())
670 }
671
672 fn simd_gemv_complex(
674 &self,
675 a: &ArrayView2<Complex64>,
676 x: &ArrayView1<Complex64>,
677 y: &mut ArrayViewMut1<Complex64>,
678 ) -> Result<()> {
679 let (m, n) = a.dim();
680 assert_eq!(x.len(), n, "Vector length must match matrix columns");
681 assert_eq!(y.len(), m, "Output vector length must match matrix rows");
682
683 let a_real: Vec<f64> = a.iter().map(|z| z.re).collect();
685 let a_imag: Vec<f64> = a.iter().map(|z| z.im).collect();
686 let x_real: Vec<f64> = x.iter().map(|z| z.re).collect();
687 let x_imag: Vec<f64> = x.iter().map(|z| z.im).collect();
688
689 for i in 0..m {
691 let mut real_sum = 0.0;
692 let mut imag_sum = 0.0;
693
694 let row_start = i * n;
695
696 if n >= self.simd_context.simd_lanes {
697 let chunks = n / self.simd_context.simd_lanes;
699
700 for chunk in 0..chunks {
701 let start_idx = chunk * self.simd_context.simd_lanes;
702 let end_idx = start_idx + self.simd_context.simd_lanes;
703
704 for j in start_idx..end_idx {
705 let a_idx = row_start + j;
706 let ar = a_real[a_idx];
707 let ai = a_imag[a_idx];
708 let xr = x_real[j];
709 let xi = x_imag[j];
710
711 real_sum += ar.mul_add(xr, -(ai * xi));
712 imag_sum += ar.mul_add(xi, ai * xr);
713 }
714 }
715
716 for j in (chunks * self.simd_context.simd_lanes)..n {
718 let a_idx = row_start + j;
719 let ar = a_real[a_idx];
720 let ai = a_imag[a_idx];
721 let xr = x_real[j];
722 let xi = x_imag[j];
723
724 real_sum += ar.mul_add(xr, -(ai * xi));
725 imag_sum += ar.mul_add(xi, ai * xr);
726 }
727 } else {
728 for j in 0..n {
730 let a_idx = row_start + j;
731 let ar = a_real[a_idx];
732 let ai = a_imag[a_idx];
733 let xr = x_real[j];
734 let xi = x_imag[j];
735
736 real_sum += ar.mul_add(xr, -(ai * xi));
737 imag_sum += ar.mul_add(xi, ai * xr);
738 }
739 }
740
741 y[i] = Complex64::new(real_sum, imag_sum);
742 }
743
744 Ok(())
745 }
746
747 #[cfg(feature = "advanced_math")]
749 pub fn svd(&mut self, matrix: &Matrix) -> Result<SvdResult> {
750 let start_time = std::time::Instant::now();
751
752 let result = LAPACK::svd(matrix)?;
753
754 if let Ok(mut stats) = self.stats.lock() {
755 stats.simd_matrix_ops += 1;
756 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
757 }
758
759 Ok(result)
760 }
761
762 #[cfg(feature = "advanced_math")]
764 pub fn eigendecomposition(&mut self, matrix: &Matrix) -> Result<EigResult> {
765 let start_time = std::time::Instant::now();
766
767 let result = LAPACK::eig(matrix)?;
768
769 if let Ok(mut stats) = self.stats.lock() {
770 stats.simd_matrix_ops += 1;
771 stats.simd_time_ns += start_time.elapsed().as_nanos() as u64;
772 }
773
774 Ok(result)
775 }
776}
777
778impl Default for SciRS2Backend {
779 fn default() -> Self {
780 Self::new()
781 }
782}
783
784#[cfg(feature = "advanced_math")]
786#[derive(Debug)]
787pub struct MemoryPool {
788 _placeholder: (),
790}
791
792#[cfg(feature = "advanced_math")]
793impl Default for MemoryPool {
794 fn default() -> Self {
795 Self::new()
796 }
797}
798
799#[cfg(feature = "advanced_math")]
800impl MemoryPool {
801 pub const fn new() -> Self {
802 Self {
803 _placeholder: (),
805 }
806 }
807}
808
809#[cfg(not(feature = "advanced_math"))]
810#[derive(Debug)]
811pub struct MemoryPool;
812
813#[cfg(not(feature = "advanced_math"))]
814impl Default for MemoryPool {
815 fn default() -> Self {
816 Self::new()
817 }
818}
819
820#[cfg(not(feature = "advanced_math"))]
821impl MemoryPool {
822 #[must_use]
823 pub const fn new() -> Self {
824 Self
825 }
826}
827
828#[cfg(feature = "advanced_math")]
830#[derive(Debug)]
831pub struct FftEngine;
832
833#[cfg(feature = "advanced_math")]
834impl Default for FftEngine {
835 fn default() -> Self {
836 Self::new()
837 }
838}
839
840#[cfg(feature = "advanced_math")]
841impl FftEngine {
842 pub const fn new() -> Self {
843 Self
844 }
845
846 pub fn forward(&self, input: &Vector) -> Result<Vector> {
847 use ndrustfft::{ndfft, FftHandler};
849
850 let array = input.to_array1()?;
851 let mut handler = FftHandler::new(array.len());
852 let mut fft_result = array.clone();
853
854 ndfft(&array, &mut fft_result, &handler, 0);
855
856 Vector::from_array1(&fft_result.view(), &MemoryPool::new())
857 }
858
859 pub fn inverse(&self, input: &Vector) -> Result<Vector> {
860 use ndrustfft::{ndifft, FftHandler};
862
863 let array = input.to_array1()?;
864 let mut handler = FftHandler::new(array.len());
865 let mut ifft_result = array.clone();
866
867 ndifft(&array, &mut ifft_result, &handler, 0);
868
869 Vector::from_array1(&ifft_result.view(), &MemoryPool::new())
870 }
871}
872
873#[cfg(not(feature = "advanced_math"))]
874#[derive(Debug)]
875pub struct FftEngine;
876
877#[cfg(not(feature = "advanced_math"))]
878impl Default for FftEngine {
879 fn default() -> Self {
880 Self::new()
881 }
882}
883
884#[cfg(not(feature = "advanced_math"))]
885impl FftEngine {
886 #[must_use]
887 pub const fn new() -> Self {
888 Self
889 }
890
891 pub fn forward(&self, _input: &Vector) -> Result<Vector> {
892 Err(SimulatorError::UnsupportedOperation(
893 "SciRS2 integration requires 'advanced_math' feature".to_string(),
894 ))
895 }
896
897 pub fn inverse(&self, _input: &Vector) -> Result<Vector> {
898 Err(SimulatorError::UnsupportedOperation(
899 "SciRS2 integration requires 'advanced_math' feature".to_string(),
900 ))
901 }
902}
903
904#[cfg(feature = "advanced_math")]
906#[derive(Debug, Clone)]
907pub struct Matrix {
908 data: Array2<Complex64>,
909}
910
911#[cfg(feature = "advanced_math")]
912impl Matrix {
913 pub fn from_array2(array: &ArrayView2<Complex64>, _pool: &MemoryPool) -> Result<Self> {
914 Ok(Self {
915 data: array.to_owned(),
916 })
917 }
918
919 pub fn zeros(shape: (usize, usize), _pool: &MemoryPool) -> Result<Self> {
920 Ok(Self {
921 data: Array2::zeros(shape),
922 })
923 }
924
925 pub fn to_array2(&self, result: &mut Array2<Complex64>) -> Result<()> {
926 if result.shape() != self.data.shape() {
927 return Err(SimulatorError::DimensionMismatch(format!(
928 "Expected shape {:?}, but got {:?}",
929 self.data.shape(),
930 result.shape()
931 )));
932 }
933 result.assign(&self.data);
934 Ok(())
935 }
936
937 pub fn shape(&self) -> (usize, usize) {
938 (self.data.nrows(), self.data.ncols())
939 }
940
941 pub fn view(&self) -> ArrayView2<'_, Complex64> {
942 self.data.view()
943 }
944
945 pub fn view_mut(&mut self) -> scirs2_core::ndarray::ArrayViewMut2<'_, Complex64> {
946 self.data.view_mut()
947 }
948}
949
950#[cfg(not(feature = "advanced_math"))]
951#[derive(Debug)]
952pub struct Matrix;
953
954#[cfg(not(feature = "advanced_math"))]
955impl Matrix {
956 pub fn from_array2(_array: &ArrayView2<Complex64>, _pool: &MemoryPool) -> Result<Self> {
957 Err(SimulatorError::UnsupportedOperation(
958 "SciRS2 integration requires 'advanced_math' feature".to_string(),
959 ))
960 }
961
962 pub fn zeros(_shape: (usize, usize), _pool: &MemoryPool) -> Result<Self> {
963 Err(SimulatorError::UnsupportedOperation(
964 "SciRS2 integration requires 'advanced_math' feature".to_string(),
965 ))
966 }
967
968 pub fn to_array2(&self, _result: &mut Array2<Complex64>) -> Result<()> {
969 Err(SimulatorError::UnsupportedOperation(
970 "SciRS2 integration requires 'advanced_math' feature".to_string(),
971 ))
972 }
973}
974
975#[cfg(feature = "advanced_math")]
977#[derive(Debug, Clone)]
978pub struct Vector {
979 data: Array1<Complex64>,
980}
981
982#[cfg(feature = "advanced_math")]
983impl Vector {
984 pub fn from_array1(array: &ArrayView1<Complex64>, _pool: &MemoryPool) -> Result<Self> {
985 Ok(Self {
986 data: array.to_owned(),
987 })
988 }
989
990 pub fn zeros(len: usize, _pool: &MemoryPool) -> Result<Self> {
991 Ok(Self {
992 data: Array1::zeros(len),
993 })
994 }
995
996 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
997 Ok(self.data.clone())
998 }
999
1000 pub fn to_array1_mut(&self, result: &mut Array1<Complex64>) -> Result<()> {
1001 if result.len() != self.data.len() {
1002 return Err(SimulatorError::DimensionMismatch(format!(
1003 "Expected length {}, but got {}",
1004 self.data.len(),
1005 result.len()
1006 )));
1007 }
1008 result.assign(&self.data);
1009 Ok(())
1010 }
1011
1012 pub fn len(&self) -> usize {
1013 self.data.len()
1014 }
1015
1016 pub fn view(&self) -> ArrayView1<'_, Complex64> {
1017 self.data.view()
1018 }
1019
1020 pub fn view_mut(&mut self) -> scirs2_core::ndarray::ArrayViewMut1<'_, Complex64> {
1021 self.data.view_mut()
1022 }
1023}
1024
1025#[cfg(not(feature = "advanced_math"))]
1026#[derive(Debug)]
1027pub struct Vector;
1028
1029#[cfg(not(feature = "advanced_math"))]
1030impl Vector {
1031 pub fn from_array1(_array: &ArrayView1<Complex64>, _pool: &MemoryPool) -> Result<Self> {
1032 Err(SimulatorError::UnsupportedOperation(
1033 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1034 ))
1035 }
1036
1037 pub fn zeros(_len: usize, _pool: &MemoryPool) -> Result<Self> {
1038 Err(SimulatorError::UnsupportedOperation(
1039 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1040 ))
1041 }
1042
1043 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1044 Err(SimulatorError::UnsupportedOperation(
1045 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1046 ))
1047 }
1048
1049 pub fn to_array1_mut(&self, _result: &mut Array1<Complex64>) -> Result<()> {
1050 Err(SimulatorError::UnsupportedOperation(
1051 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1052 ))
1053 }
1054}
1055
1056#[cfg(feature = "advanced_math")]
1058#[derive(Debug, Clone)]
1059pub struct SparseMatrix {
1060 csr_matrix: nalgebra_sparse::CsrMatrix<Complex64>,
1062}
1063
1064#[cfg(feature = "advanced_math")]
1065impl SparseMatrix {
1066 pub fn from_csr(
1067 values: &[Complex64],
1068 col_indices: &[usize],
1069 row_ptr: &[usize],
1070 num_rows: usize,
1071 num_cols: usize,
1072 _pool: &MemoryPool,
1073 ) -> Result<Self> {
1074 use nalgebra_sparse::CsrMatrix;
1075
1076 let csr_matrix = CsrMatrix::try_from_csr_data(
1077 num_rows,
1078 num_cols,
1079 row_ptr.to_vec(),
1080 col_indices.to_vec(),
1081 values.to_vec(),
1082 )
1083 .map_err(|e| {
1084 SimulatorError::ComputationError(format!("Failed to create CSR matrix: {e}"))
1085 })?;
1086
1087 Ok(Self { csr_matrix })
1088 }
1089
1090 pub fn matvec(&self, vector: &Vector, result: &mut Vector) -> Result<()> {
1091 use nalgebra::{Complex, DVector};
1093
1094 let input_vec = vector.to_array1()?;
1096 let nalgebra_vec = DVector::from_iterator(
1097 input_vec.len(),
1098 input_vec.iter().map(|&c| Complex::new(c.re, c.im)),
1099 );
1100
1101 let mut output = DVector::zeros(self.csr_matrix.nrows());
1103
1104 for (row_idx, row) in self.csr_matrix.row_iter().enumerate() {
1106 let mut sum = Complex::new(0.0, 0.0);
1107 for (col_idx, value) in row.col_indices().iter().zip(row.values()) {
1108 sum += value * nalgebra_vec[*col_idx];
1109 }
1110 output[row_idx] = sum;
1111 }
1112
1113 let output_array: Array1<Complex64> =
1115 Array1::from_iter(output.iter().map(|c| Complex64::new(c.re, c.im)));
1116
1117 result.data.assign(&output_array);
1118 Ok(())
1119 }
1120
1121 pub fn solve(&self, rhs: &Vector) -> Result<Vector> {
1122 use nalgebra::{Complex, DVector};
1124 use nalgebra_sparse::SparseEntry;
1125 use sprs::CsMat;
1126
1127 let rhs_array = rhs.to_array1()?;
1128
1129 let values: Vec<Complex<f64>> = self
1131 .csr_matrix
1132 .values()
1133 .iter()
1134 .map(|&c| Complex::new(c.re, c.im))
1135 .collect();
1136 let (rows, cols, _values) = self.csr_matrix.csr_data();
1137
1138 let mut solution = rhs_array.clone();
1141
1142 for _ in 0..100 {
1144 let mut new_solution = solution.clone();
1145 for i in 0..solution.len() {
1146 if i < self.csr_matrix.nrows() {
1147 let diag =
1149 self.csr_matrix
1150 .get_entry(i, i)
1151 .map_or(Complex::new(1.0, 0.0), |entry| match entry {
1152 SparseEntry::NonZero(v) => *v,
1153 SparseEntry::Zero => Complex::new(0.0, 0.0),
1154 });
1155
1156 if diag.norm() > 1e-14 {
1157 new_solution[i] = rhs_array[i] / diag;
1158 }
1159 }
1160 }
1161 solution = new_solution;
1162 }
1163
1164 Vector::from_array1(&solution.view(), &MemoryPool::new())
1165 }
1166
1167 pub fn shape(&self) -> (usize, usize) {
1168 (self.csr_matrix.nrows(), self.csr_matrix.ncols())
1169 }
1170
1171 pub fn nnz(&self) -> usize {
1172 self.csr_matrix.nnz()
1173 }
1174}
1175
1176#[cfg(not(feature = "advanced_math"))]
1177#[derive(Debug)]
1178pub struct SparseMatrix;
1179
1180#[cfg(not(feature = "advanced_math"))]
1181impl SparseMatrix {
1182 pub fn from_csr(
1183 _values: &[Complex64],
1184 _col_indices: &[usize],
1185 _row_ptr: &[usize],
1186 _num_rows: usize,
1187 _num_cols: usize,
1188 _pool: &MemoryPool,
1189 ) -> Result<Self> {
1190 Err(SimulatorError::UnsupportedOperation(
1191 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1192 ))
1193 }
1194
1195 pub fn matvec(&self, _vector: &Vector, _result: &mut Vector) -> Result<()> {
1196 Err(SimulatorError::UnsupportedOperation(
1197 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1198 ))
1199 }
1200
1201 pub fn solve(&self, _rhs: &Vector) -> Result<Vector> {
1202 Err(SimulatorError::UnsupportedOperation(
1203 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1204 ))
1205 }
1206}
1207
1208#[cfg(feature = "advanced_math")]
1210#[derive(Debug)]
1211pub struct BLAS;
1212
1213#[cfg(feature = "advanced_math")]
1214impl BLAS {
1215 pub fn gemm(
1216 alpha: Complex64,
1217 a: &Matrix,
1218 b: &Matrix,
1219 beta: Complex64,
1220 c: &mut Matrix,
1221 ) -> Result<()> {
1222 let a_scaled = &a.data * alpha;
1224 let c_scaled = &c.data * beta;
1225 let result = a_scaled.dot(&b.data) + c_scaled;
1226 c.data.assign(&result);
1227 Ok(())
1228 }
1229
1230 pub fn gemv(
1231 alpha: Complex64,
1232 a: &Matrix,
1233 x: &Vector,
1234 beta: Complex64,
1235 y: &mut Vector,
1236 ) -> Result<()> {
1237 let y_scaled = &y.data * beta;
1239 let result = &a.data.dot(&x.data) * alpha + y_scaled;
1240 y.data.assign(&result);
1241 Ok(())
1242 }
1243}
1244
1245#[cfg(not(feature = "advanced_math"))]
1246#[derive(Debug)]
1247pub struct BLAS;
1248
1249#[cfg(not(feature = "advanced_math"))]
1250impl BLAS {
1251 pub fn gemm(
1252 _alpha: Complex64,
1253 _a: &Matrix,
1254 _b: &Matrix,
1255 _beta: Complex64,
1256 _c: &mut Matrix,
1257 ) -> Result<()> {
1258 Err(SimulatorError::UnsupportedOperation(
1259 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1260 ))
1261 }
1262
1263 pub fn gemv(
1264 _alpha: Complex64,
1265 _a: &Matrix,
1266 _x: &Vector,
1267 _beta: Complex64,
1268 _y: &mut Vector,
1269 ) -> Result<()> {
1270 Err(SimulatorError::UnsupportedOperation(
1271 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1272 ))
1273 }
1274}
1275
1276#[cfg(feature = "advanced_math")]
1278#[derive(Debug)]
1279pub struct LAPACK;
1280
1281#[cfg(feature = "advanced_math")]
1282impl LAPACK {
1283 pub fn svd(matrix: &Matrix) -> Result<SvdResult> {
1284 use scirs2_core::ndarray::ndarray_linalg::SVD; let svd_result = matrix
1288 .data
1289 .svd(true, true)
1290 .map_err(|_| SimulatorError::ComputationError("SVD computation failed".to_string()))?;
1291
1292 let pool = MemoryPool::new();
1294
1295 let u_data = svd_result.0;
1296 let s_data = svd_result.1;
1297 let vt_data = svd_result.2;
1298
1299 let u = Matrix::from_array2(&u_data.view(), &pool)?;
1300 let s_complex: Array1<Complex64> = s_data.mapv(|x| Complex64::new(x, 0.0));
1302 let s = Vector::from_array1(&s_complex.view(), &pool)?;
1303 let vt = Matrix::from_array2(&vt_data.view(), &pool)?;
1304
1305 Ok(SvdResult { u, s, vt })
1306 }
1307
1308 pub fn eig(matrix: &Matrix) -> Result<EigResult> {
1309 use scirs2_core::ndarray::ndarray_linalg::Eig; let (eigenvalues_array, eigenvectors_array) = matrix.data.eig().map_err(|_| {
1313 SimulatorError::ComputationError("Eigenvalue decomposition failed".to_string())
1314 })?;
1315
1316 let pool = MemoryPool::new();
1317 let values = Vector::from_array1(&eigenvalues_array.view(), &pool)?;
1318 let vectors = Matrix::from_array2(&eigenvectors_array.view(), &pool)?;
1319
1320 Ok(EigResult { values, vectors })
1321 }
1322
1323 pub fn lu(matrix: &Matrix) -> Result<(Matrix, Matrix, Vec<usize>)> {
1324 let n = matrix.data.nrows();
1326 let pool = MemoryPool::new();
1327
1328 let mut l_data = Array2::eye(n);
1330 let mut u_data = matrix.data.clone();
1331 let mut perm_vec: Vec<usize> = (0..n).collect();
1332
1333 for k in 0..n.min(n) {
1335 let mut max_row = k;
1337 let mut max_val = u_data[[k, k]].norm();
1338 for i in k + 1..n {
1339 let val = u_data[[i, k]].norm();
1340 if val > max_val {
1341 max_val = val;
1342 max_row = i;
1343 }
1344 }
1345
1346 if max_row != k {
1348 for j in 0..n {
1349 let temp = u_data[[k, j]];
1350 u_data[[k, j]] = u_data[[max_row, j]];
1351 u_data[[max_row, j]] = temp;
1352 }
1353 perm_vec.swap(k, max_row);
1354 }
1355
1356 if u_data[[k, k]].norm() > 1e-10 {
1358 for i in k + 1..n {
1359 let factor = u_data[[i, k]] / u_data[[k, k]];
1360 l_data[[i, k]] = factor;
1361 for j in k..n {
1362 let u_kj = u_data[[k, j]];
1363 u_data[[i, j]] -= factor * u_kj;
1364 }
1365 }
1366 }
1367 }
1368
1369 let l_matrix = Matrix::from_array2(&l_data.view(), &pool)?;
1370 let u_matrix = Matrix::from_array2(&u_data.view(), &pool)?;
1371
1372 Ok((l_matrix, u_matrix, perm_vec))
1373 }
1374
1375 pub fn qr(matrix: &Matrix) -> Result<(Matrix, Matrix)> {
1376 use scirs2_core::ndarray::ndarray_linalg::QR; let (q, r) = matrix
1380 .data
1381 .qr()
1382 .map_err(|_| SimulatorError::ComputationError("QR decomposition failed".to_string()))?;
1383
1384 let pool = MemoryPool::new();
1385 let q_matrix = Matrix::from_array2(&q.view(), &pool)?;
1386 let r_matrix = Matrix::from_array2(&r.view(), &pool)?;
1387
1388 Ok((q_matrix, r_matrix))
1389 }
1390}
1391
1392#[cfg(not(feature = "advanced_math"))]
1393#[derive(Debug)]
1394pub struct LAPACK;
1395
1396#[cfg(not(feature = "advanced_math"))]
1397impl LAPACK {
1398 pub fn svd(_matrix: &Matrix) -> Result<SvdResult> {
1399 Err(SimulatorError::UnsupportedOperation(
1400 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1401 ))
1402 }
1403
1404 pub fn eig(_matrix: &Matrix) -> Result<EigResult> {
1405 Err(SimulatorError::UnsupportedOperation(
1406 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1407 ))
1408 }
1409}
1410
1411#[cfg(feature = "advanced_math")]
1413#[derive(Debug, Clone)]
1414pub struct SvdResult {
1415 pub u: Matrix,
1417 pub s: Vector,
1419 pub vt: Matrix,
1421}
1422
1423#[cfg(feature = "advanced_math")]
1425#[derive(Debug, Clone)]
1426pub struct EigResult {
1427 pub values: Vector,
1429 pub vectors: Matrix,
1431}
1432
1433#[cfg(feature = "advanced_math")]
1434impl EigResult {
1435 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1436 self.values.to_array1()
1437 }
1438
1439 pub const fn eigenvalues(&self) -> &Vector {
1440 &self.values
1441 }
1442
1443 pub const fn eigenvectors(&self) -> &Matrix {
1444 &self.vectors
1445 }
1446}
1447
1448#[cfg(feature = "advanced_math")]
1449impl SvdResult {
1450 pub fn to_array2(&self) -> Result<Array2<Complex64>> {
1451 self.u.data.to_owned().into_dimensionality().map_err(|_| {
1452 SimulatorError::ComputationError("Failed to convert SVD result to array2".to_string())
1453 })
1454 }
1455
1456 pub const fn u_matrix(&self) -> &Matrix {
1457 &self.u
1458 }
1459
1460 pub const fn singular_values(&self) -> &Vector {
1461 &self.s
1462 }
1463
1464 pub const fn vt_matrix(&self) -> &Matrix {
1465 &self.vt
1466 }
1467}
1468
1469#[cfg(not(feature = "advanced_math"))]
1470#[derive(Debug)]
1471pub struct SvdResult;
1472
1473#[cfg(not(feature = "advanced_math"))]
1474#[derive(Debug)]
1475pub struct EigResult;
1476
1477#[cfg(not(feature = "advanced_math"))]
1478impl EigResult {
1479 pub fn to_array1(&self) -> Result<Array1<Complex64>> {
1480 Err(SimulatorError::UnsupportedOperation(
1481 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1482 ))
1483 }
1484}
1485
1486#[cfg(not(feature = "advanced_math"))]
1487impl SvdResult {
1488 pub fn to_array2(&self) -> Result<Array2<Complex64>> {
1489 Err(SimulatorError::UnsupportedOperation(
1490 "SciRS2 integration requires 'advanced_math' feature".to_string(),
1491 ))
1492 }
1493}
1494
1495#[cfg(feature = "advanced_math")]
1497#[derive(Debug)]
1498pub struct AdvancedFFT;
1499
1500#[cfg(feature = "advanced_math")]
1501impl AdvancedFFT {
1502 pub fn fft_nd(input: &Array2<Complex64>) -> Result<Array2<Complex64>> {
1504 use ndrustfft::{ndfft, FftHandler};
1505
1506 let (rows, cols) = input.dim();
1507 let mut result = input.clone();
1508
1509 for i in 0..rows {
1511 let row = input.row(i).to_owned();
1512 let mut row_out = row.clone();
1513 let mut handler = FftHandler::new(cols);
1514 ndfft(&row, &mut row_out, &handler, 0);
1515 result.row_mut(i).assign(&row_out);
1516 }
1517
1518 for j in 0..cols {
1519 let col = result.column(j).to_owned();
1520 let mut col_out = col.clone();
1521 let mut handler = FftHandler::new(rows);
1522 ndfft(&col, &mut col_out, &handler, 0);
1523 result.column_mut(j).assign(&col_out);
1524 }
1525
1526 Ok(result)
1527 }
1528
1529 pub fn windowed_fft(
1531 input: &Vector,
1532 window_size: usize,
1533 overlap: usize,
1534 ) -> Result<Array2<Complex64>> {
1535 let array = input.to_array1()?;
1536 let step_size = window_size - overlap;
1537 let num_windows = (array.len() - overlap) / step_size;
1538
1539 let mut result = Array2::zeros((num_windows, window_size));
1540
1541 for (i, mut row) in result.outer_iter_mut().enumerate() {
1542 let start = i * step_size;
1543 let end = (start + window_size).min(array.len());
1544
1545 if end - start == window_size {
1546 let window = array.slice(s![start..end]);
1547
1548 let windowed: Array1<Complex64> = window
1550 .iter()
1551 .enumerate()
1552 .map(|(j, &val)| {
1553 let hann =
1554 0.5 * (1.0 - (2.0 * PI * j as f64 / (window_size - 1) as f64).cos());
1555 val * Complex64::new(hann, 0.0)
1556 })
1557 .collect();
1558
1559 let mut handler = FftHandler::new(window_size);
1561 let mut fft_result = windowed.clone();
1562 ndrustfft::ndfft(&windowed, &mut fft_result, &handler, 0);
1563
1564 row.assign(&fft_result);
1565 }
1566 }
1567
1568 Ok(result)
1569 }
1570
1571 pub fn convolution(a: &Vector, b: &Vector) -> Result<Vector> {
1573 let a_array = a.to_array1()?;
1574 let b_array = b.to_array1()?;
1575
1576 let n = a_array.len() + b_array.len() - 1;
1577 let fft_size = n.next_power_of_two();
1578
1579 let mut a_padded = Array1::zeros(fft_size);
1581 let mut b_padded = Array1::zeros(fft_size);
1582 a_padded.slice_mut(s![..a_array.len()]).assign(&a_array);
1583 b_padded.slice_mut(s![..b_array.len()]).assign(&b_array);
1584
1585 let mut handler = FftHandler::new(fft_size);
1587 let mut a_fft = a_padded.clone();
1588 let mut b_fft = b_padded.clone();
1589 ndrustfft::ndfft(&a_padded, &mut a_fft, &handler, 0);
1590 ndrustfft::ndfft(&b_padded, &mut b_fft, &handler, 0);
1591
1592 let mut product = Array1::zeros(fft_size);
1594 for i in 0..fft_size {
1595 product[i] = a_fft[i] * b_fft[i];
1596 }
1597
1598 let mut result = product.clone();
1600 ndrustfft::ndifft(&product, &mut result, &handler, 0);
1601
1602 let truncated = result.slice(s![..n]).to_owned();
1604 Vector::from_array1(&truncated.view(), &MemoryPool::new())
1605 }
1606}
1607
1608#[cfg(feature = "advanced_math")]
1610#[derive(Debug)]
1611pub struct SparseSolvers;
1612
1613#[cfg(feature = "advanced_math")]
1614impl SparseSolvers {
1615 pub fn conjugate_gradient(
1617 matrix: &SparseMatrix,
1618 b: &Vector,
1619 x0: Option<&Vector>,
1620 tolerance: f64,
1621 max_iterations: usize,
1622 ) -> Result<Vector> {
1623 use nalgebra::{Complex, DVector};
1625
1626 let b_array = b.to_array1()?;
1627 let b_vec = DVector::from_iterator(
1628 b_array.len(),
1629 b_array.iter().map(|&c| Complex::new(c.re, c.im)),
1630 );
1631
1632 let mut x = if let Some(x0_vec) = x0 {
1633 let x0_array = x0_vec.to_array1()?;
1634 DVector::from_iterator(
1635 x0_array.len(),
1636 x0_array.iter().map(|&c| Complex::new(c.re, c.im)),
1637 )
1638 } else {
1639 DVector::zeros(b_vec.len())
1640 };
1641
1642 let pool = MemoryPool::new();
1644 let x_vector = Vector::from_array1(
1645 &Array1::from_vec(x.iter().map(|c| Complex64::new(c.re, c.im)).collect()).view(),
1646 &pool,
1647 )?;
1648 let mut ax_vector = Vector::zeros(x.len(), &pool)?;
1649 matrix.matvec(&x_vector, &mut ax_vector)?;
1650
1651 let ax_array = ax_vector.to_array1()?;
1653 let ax = DVector::from_iterator(
1654 ax_array.len(),
1655 ax_array.iter().map(|&c| Complex::new(c.re, c.im)),
1656 );
1657
1658 let mut r = &b_vec - &ax;
1659 let mut p = r.clone();
1660 let mut rsold = r.dot(&r).re;
1661
1662 for _ in 0..max_iterations {
1663 let p_vec = Vector::from_array1(
1665 &Array1::from_vec(p.iter().map(|c| Complex64::new(c.re, c.im)).collect()).view(),
1666 &MemoryPool::new(),
1667 )?;
1668 let mut ap_vec =
1669 Vector::from_array1(&Array1::zeros(p.len()).view(), &MemoryPool::new())?;
1670 matrix.matvec(&p_vec, &mut ap_vec)?;
1671 let ap_array = ap_vec.to_array1()?;
1672 let ap = DVector::from_iterator(
1673 ap_array.len(),
1674 ap_array.iter().map(|&c| Complex::new(c.re, c.im)),
1675 );
1676
1677 let alpha = rsold / p.dot(&ap).re;
1678 let alpha_complex = Complex::new(alpha, 0.0);
1679 x += &p * alpha_complex;
1680 r -= &ap * alpha_complex;
1681
1682 let rsnew = r.dot(&r).re;
1683 if rsnew.sqrt() < tolerance {
1684 break;
1685 }
1686
1687 let beta = rsnew / rsold;
1688 let beta_complex = Complex::new(beta, 0.0);
1689 p = &r + &p * beta_complex;
1690 rsold = rsnew;
1691 }
1692
1693 let result_array = Array1::from_vec(x.iter().map(|c| Complex64::new(c.re, c.im)).collect());
1694 Vector::from_array1(&result_array.view(), &MemoryPool::new())
1695 }
1696
1697 pub fn gmres(
1699 matrix: &SparseMatrix,
1700 b: &Vector,
1701 x0: Option<&Vector>,
1702 tolerance: f64,
1703 max_iterations: usize,
1704 restart: usize,
1705 ) -> Result<Vector> {
1706 let b_array = b.to_array1()?;
1707 let n = b_array.len();
1708
1709 let mut x = if let Some(x0_vec) = x0 {
1710 x0_vec.to_array1()?.to_owned()
1711 } else {
1712 Array1::zeros(n)
1713 };
1714
1715 for _restart_iter in 0..(max_iterations / restart) {
1716 let mut ax = Array1::zeros(n);
1718 let x_vec = Vector::from_array1(&x.view(), &MemoryPool::new())?;
1719 let mut ax_vec = Vector::from_array1(&ax.view(), &MemoryPool::new())?;
1720 matrix.matvec(&x_vec, &mut ax_vec)?;
1721 ax = ax_vec.to_array1()?;
1722
1723 let mut r = &b_array - &ax;
1724 let beta = r.norm_l2()?;
1725
1726 if beta < tolerance {
1727 break;
1728 }
1729
1730 r = r.mapv(|x| x / Complex64::new(beta, 0.0));
1731
1732 let mut v = Vec::new();
1734 v.push(r.clone());
1735
1736 let mut h = Array2::zeros((restart + 1, restart));
1737
1738 for j in 0..restart.min(max_iterations) {
1739 let v_vec = Vector::from_array1(&v[j].view(), &MemoryPool::new())?;
1740 let mut av = Array1::zeros(n);
1741 let mut av_vec = Vector::from_array1(&av.view(), &MemoryPool::new())?;
1742 matrix.matvec(&v_vec, &mut av_vec)?;
1743 av = av_vec.to_array1()?;
1744
1745 for i in 0..=j {
1747 h[[i, j]] = v[i].dot(&av);
1748 av = av - h[[i, j]] * &v[i];
1749 }
1750
1751 h[[j + 1, j]] = Complex64::new(av.norm_l2()?, 0.0);
1752
1753 if h[[j + 1, j]].norm() < tolerance {
1754 break;
1755 }
1756
1757 av /= h[[j + 1, j]];
1758 v.push(av);
1759 }
1760
1761 let krylov_dim = v.len() - 1;
1764 if krylov_dim > 0 {
1765 let mut e1 = Array1::zeros(krylov_dim + 1);
1766 e1[0] = Complex64::new(beta, 0.0);
1767
1768 let mut y = Array1::zeros(krylov_dim);
1770 for i in (0..krylov_dim).rev() {
1771 let mut sum = Complex64::new(0.0, 0.0);
1772 for j in (i + 1)..krylov_dim {
1773 sum += h[[i, j]] * y[j];
1774 }
1775 y[i] = (e1[i] - sum) / h[[i, i]];
1776 }
1777
1778 for i in 0..krylov_dim {
1780 x = x + y[i] * &v[i];
1781 }
1782 }
1783 }
1784
1785 Vector::from_array1(&x.view(), &MemoryPool::new())
1786 }
1787
1788 pub fn bicgstab(
1790 matrix: &SparseMatrix,
1791 b: &Vector,
1792 x0: Option<&Vector>,
1793 tolerance: f64,
1794 max_iterations: usize,
1795 ) -> Result<Vector> {
1796 let b_array = b.to_array1()?;
1797 let n = b_array.len();
1798
1799 let mut x = if let Some(x0_vec) = x0 {
1800 x0_vec.to_array1()?.to_owned()
1801 } else {
1802 Array1::zeros(n)
1803 };
1804
1805 let mut ax = Array1::zeros(n);
1807 let x_vec = Vector::from_array1(&x.view(), &MemoryPool::new())?;
1808 let mut ax_vec = Vector::from_array1(&ax.view(), &MemoryPool::new())?;
1809 matrix.matvec(&x_vec, &mut ax_vec)?;
1810 ax = ax_vec.to_array1()?;
1811
1812 let mut r = &b_array - &ax;
1813 let r0 = r.clone();
1814
1815 let mut rho = Complex64::new(1.0, 0.0);
1816 let mut alpha = Complex64::new(1.0, 0.0);
1817 let mut omega = Complex64::new(1.0, 0.0);
1818
1819 let mut p = Array1::zeros(n);
1820 let mut v = Array1::zeros(n);
1821
1822 for _ in 0..max_iterations {
1823 let rho_new = r0.dot(&r);
1824 let beta = (rho_new / rho) * (alpha / omega);
1825
1826 p = &r + beta * (&p - omega * &v);
1827
1828 let p_vec = Vector::from_array1(&p.view(), &MemoryPool::new())?;
1830 let mut v_vec = Vector::from_array1(&v.view(), &MemoryPool::new())?;
1831 matrix.matvec(&p_vec, &mut v_vec)?;
1832 v = v_vec.to_array1()?;
1833
1834 alpha = rho_new / r0.dot(&v);
1835 let s = &r - alpha * &v;
1836
1837 if s.norm_l2()? < tolerance {
1838 x = x + alpha * &p;
1839 break;
1840 }
1841
1842 let s_vec = Vector::from_array1(&s.view(), &MemoryPool::new())?;
1844 let mut t_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
1845 matrix.matvec(&s_vec, &mut t_vec)?;
1846 let t = t_vec.to_array1()?;
1847
1848 omega = t.dot(&s) / t.dot(&t);
1849 x = x + alpha * &p + omega * &s;
1850 r = s - omega * &t;
1851
1852 if r.norm_l2()? < tolerance {
1853 break;
1854 }
1855
1856 rho = rho_new;
1857 }
1858
1859 Vector::from_array1(&x.view(), &MemoryPool::new())
1860 }
1861}
1862
1863#[cfg(feature = "advanced_math")]
1865#[derive(Debug)]
1866pub struct AdvancedEigensolvers;
1867
1868#[cfg(feature = "advanced_math")]
1869impl AdvancedEigensolvers {
1870 pub fn lanczos(
1872 matrix: &SparseMatrix,
1873 num_eigenvalues: usize,
1874 max_iterations: usize,
1875 tolerance: f64,
1876 ) -> Result<EigResult> {
1877 let n = matrix.csr_matrix.nrows();
1878 let m = num_eigenvalues.min(max_iterations);
1879
1880 let mut q = Array1::from_vec(
1882 (0..n)
1883 .map(|_| {
1884 Complex64::new(
1885 thread_rng().gen::<f64>() - 0.5,
1886 thread_rng().gen::<f64>() - 0.5,
1887 )
1888 })
1889 .collect(),
1890 );
1891 let q_norm = q.norm_l2()?;
1892 q = q.mapv(|x| x / Complex64::new(q_norm, 0.0));
1893
1894 let mut q_vectors = Vec::new();
1895 q_vectors.push(q.clone());
1896
1897 let mut alpha = Vec::new();
1898 let mut beta = Vec::new();
1899
1900 let mut q_prev = Array1::<Complex64>::zeros(n);
1901
1902 for j in 0..m {
1903 let q_vec = Vector::from_array1(&q_vectors[j].view(), &MemoryPool::new())?;
1905 let mut av_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
1906 matrix.matvec(&q_vec, &mut av_vec)?;
1907 let mut av = av_vec.to_array1()?;
1908
1909 let alpha_j = q_vectors[j].dot(&av);
1911 alpha.push(alpha_j);
1912
1913 av = av - alpha_j * &q_vectors[j];
1915 if j > 0 {
1916 av = av - Complex64::new(beta[j - 1], 0.0) * &q_prev;
1917 }
1918
1919 let beta_j = av.norm_l2()?;
1920
1921 if beta_j.abs() < tolerance {
1922 break;
1923 }
1924
1925 beta.push(beta_j);
1926 q_prev = q_vectors[j].clone();
1927
1928 if j + 1 < m {
1929 q = av / beta_j;
1930 q_vectors.push(q.clone());
1931 }
1932 }
1933
1934 let dim = alpha.len();
1936 let mut tridiag = Array2::zeros((dim, dim));
1937
1938 for i in 0..dim {
1939 tridiag[[i, i]] = alpha[i];
1940 if i > 0 {
1941 tridiag[[i - 1, i]] = Complex64::new(beta[i - 1], 0.0);
1942 tridiag[[i, i - 1]] = Complex64::new(beta[i - 1], 0.0);
1943 }
1944 }
1945
1946 let mut eigenvalues = Array1::zeros(num_eigenvalues.min(dim));
1948 for i in 0..eigenvalues.len() {
1949 eigenvalues[i] = tridiag[[i, i]]; }
1951
1952 let mut eigenvectors = Array2::zeros((n, eigenvalues.len()));
1954 for (i, mut col) in eigenvectors
1955 .columns_mut()
1956 .into_iter()
1957 .enumerate()
1958 .take(eigenvalues.len())
1959 {
1960 if i < q_vectors.len() {
1961 col.assign(&q_vectors[i]);
1962 }
1963 }
1964
1965 let values = Vector::from_array1(&eigenvalues.view(), &MemoryPool::new())?;
1966 let vectors = Matrix::from_array2(&eigenvectors.view(), &MemoryPool::new())?;
1967
1968 Ok(EigResult { values, vectors })
1969 }
1970
1971 pub fn arnoldi(
1973 matrix: &SparseMatrix,
1974 num_eigenvalues: usize,
1975 max_iterations: usize,
1976 tolerance: f64,
1977 ) -> Result<EigResult> {
1978 let n = matrix.csr_matrix.nrows();
1979 let m = num_eigenvalues.min(max_iterations);
1980
1981 let mut q = Array1::from_vec(
1983 (0..n)
1984 .map(|_| {
1985 Complex64::new(
1986 thread_rng().gen::<f64>() - 0.5,
1987 thread_rng().gen::<f64>() - 0.5,
1988 )
1989 })
1990 .collect(),
1991 );
1992 let q_norm = q.norm_l2()?;
1993 q = q.mapv(|x| x / Complex64::new(q_norm, 0.0));
1994
1995 let mut q_vectors = Vec::new();
1996 q_vectors.push(q.clone());
1997
1998 let mut h = Array2::zeros((m + 1, m));
1999
2000 for j in 0..m {
2001 let q_vec = Vector::from_array1(&q_vectors[j].view(), &MemoryPool::new())?;
2003 let mut v_vec = Vector::from_array1(&Array1::zeros(n).view(), &MemoryPool::new())?;
2004 matrix.matvec(&q_vec, &mut v_vec)?;
2005 let mut v = v_vec.to_array1()?;
2006
2007 for i in 0..=j {
2009 h[[i, j]] = q_vectors[i].dot(&v);
2010 v = v - h[[i, j]] * &q_vectors[i];
2011 }
2012
2013 h[[j + 1, j]] = Complex64::new(v.norm_l2()?, 0.0);
2014
2015 if h[[j + 1, j]].norm() < tolerance {
2016 break;
2017 }
2018
2019 if j + 1 < m {
2020 q = v / h[[j + 1, j]];
2021 q_vectors.push(q.clone());
2022 }
2023 }
2024
2025 let dim = q_vectors.len();
2027 let mut eigenvalues = Array1::zeros(num_eigenvalues.min(dim));
2028 for i in 0..eigenvalues.len() {
2029 eigenvalues[i] = h[[i, i]]; }
2031
2032 let mut eigenvectors = Array2::zeros((n, eigenvalues.len()));
2034 for (i, mut col) in eigenvectors
2035 .columns_mut()
2036 .into_iter()
2037 .enumerate()
2038 .take(eigenvalues.len())
2039 {
2040 if i < q_vectors.len() {
2041 col.assign(&q_vectors[i]);
2042 }
2043 }
2044
2045 let values = Vector::from_array1(&eigenvalues.view(), &MemoryPool::new())?;
2046 let vectors = Matrix::from_array2(&eigenvectors.view(), &MemoryPool::new())?;
2047
2048 Ok(EigResult { values, vectors })
2049 }
2050}
2051
2052#[cfg(feature = "advanced_math")]
2054#[derive(Debug)]
2055pub struct AdvancedLinearAlgebra;
2056
2057#[cfg(feature = "advanced_math")]
2058impl AdvancedLinearAlgebra {
2059 pub fn qr_decomposition(matrix: &Matrix) -> Result<QRResult> {
2061 use scirs2_core::ndarray::ndarray_linalg::QR; let qr_result = matrix
2064 .data
2065 .qr()
2066 .map_err(|_| SimulatorError::ComputationError("QR decomposition failed".to_string()))?;
2067
2068 let pool = MemoryPool::new();
2069 let q = Matrix::from_array2(&qr_result.0.view(), &pool)?;
2070 let r = Matrix::from_array2(&qr_result.1.view(), &pool)?;
2071
2072 Ok(QRResult { q, r })
2073 }
2074
2075 pub fn cholesky_decomposition(matrix: &Matrix) -> Result<Matrix> {
2077 use scirs2_core::ndarray::ndarray_linalg::{Cholesky, UPLO}; let chol_result = matrix.data.cholesky(UPLO::Lower).map_err(|_| {
2080 SimulatorError::ComputationError("Cholesky decomposition failed".to_string())
2081 })?;
2082
2083 Matrix::from_array2(&chol_result.view(), &MemoryPool::new())
2084 }
2085
2086 pub fn matrix_exponential(matrix: &Matrix, t: f64) -> Result<Matrix> {
2088 let scaled_matrix = &matrix.data * Complex64::new(0.0, -t);
2089
2090 let mut result = Array2::eye(scaled_matrix.nrows());
2092 let mut term = Array2::eye(scaled_matrix.nrows());
2093
2094 for k in 1..20 {
2096 term = term.dot(&scaled_matrix) / Complex64::new(k as f64, 0.0);
2097 result += &term;
2098
2099 if term.norm_l2().unwrap_or(f64::INFINITY) < 1e-12 {
2100 break;
2101 }
2102 }
2103
2104 Matrix::from_array2(&result.view(), &MemoryPool::new())
2105 }
2106
2107 pub fn pseudoinverse(matrix: &Matrix, tolerance: f64) -> Result<Matrix> {
2109 let svd_result = LAPACK::svd(matrix)?;
2110
2111 let u = svd_result.u.data;
2112 let s = svd_result.s.to_array1()?;
2113 let vt = svd_result.vt.data;
2114
2115 let mut s_pinv = Array1::zeros(s.len());
2117 for (i, &sigma) in s.iter().enumerate() {
2118 if sigma.norm() > tolerance {
2119 s_pinv[i] = Complex64::new(1.0, 0.0) / sigma;
2120 }
2121 }
2122
2123 let s_pinv_diag = Array2::from_diag(&s_pinv);
2125 let result = vt.t().dot(&s_pinv_diag).dot(&u.t());
2126
2127 Matrix::from_array2(&result.view(), &MemoryPool::new())
2128 }
2129
2130 pub fn condition_number(matrix: &Matrix) -> Result<f64> {
2132 let svd_result = LAPACK::svd(matrix)?;
2133 let s = svd_result.s.to_array1()?;
2134
2135 let mut min_singular = f64::INFINITY;
2136 let mut max_singular: f64 = 0.0;
2137
2138 for &sigma in &s {
2139 let sigma_norm = sigma.norm();
2140 if sigma_norm > 1e-15 {
2141 min_singular = min_singular.min(sigma_norm);
2142 max_singular = max_singular.max(sigma_norm);
2143 }
2144 }
2145
2146 Ok(max_singular / min_singular)
2147 }
2148}
2149
2150#[cfg(feature = "advanced_math")]
2152#[derive(Debug, Clone)]
2153pub struct QRResult {
2154 pub q: Matrix,
2156 pub r: Matrix,
2158}
2159
2160pub fn benchmark_scirs2_integration() -> Result<HashMap<String, f64>> {
2162 let mut results = HashMap::new();
2163
2164 #[cfg(feature = "advanced_math")]
2166 {
2167 let start = std::time::Instant::now();
2168 let engine = FftEngine::new();
2169 let test_vector = Vector::from_array1(
2170 &Array1::from_vec((0..1024).map(|i| Complex64::new(i as f64, 0.0)).collect()).view(),
2171 &MemoryPool::new(),
2172 )?;
2173
2174 for _ in 0..100 {
2175 let _ = engine.forward(&test_vector)?;
2176 }
2177
2178 let fft_time = start.elapsed().as_millis() as f64;
2179 results.insert("fft_1024_100_iterations".to_string(), fft_time);
2180 }
2181
2182 #[cfg(feature = "advanced_math")]
2184 {
2185 use nalgebra_sparse::CsrMatrix;
2186
2187 let start = std::time::Instant::now();
2188
2189 let mut row_indices = [0; 1000];
2191 let mut col_indices = [0; 1000];
2192 let mut values = [Complex64::new(0.0, 0.0); 1000];
2193
2194 for i in 0..100 {
2195 for j in 0..10 {
2196 let idx = i * 10 + j;
2197 row_indices[idx] = i;
2198 col_indices[idx] = (i + j) % 100;
2199 values[idx] = Complex64::new(1.0, 0.0);
2200 }
2201 }
2202
2203 let csr = CsrMatrix::try_from_csr_data(
2204 100,
2205 100,
2206 row_indices.to_vec(),
2207 col_indices.to_vec(),
2208 values.to_vec(),
2209 )
2210 .map_err(|_| {
2211 SimulatorError::ComputationError("Failed to create test matrix".to_string())
2212 })?;
2213
2214 let sparse_matrix = SparseMatrix { csr_matrix: csr };
2215 let b = Vector::from_array1(&Array1::ones(100).view(), &MemoryPool::new())?;
2216
2217 let _ = SparseSolvers::conjugate_gradient(&sparse_matrix, &b, None, 1e-6, 100)?;
2218
2219 let sparse_solver_time = start.elapsed().as_millis() as f64;
2220 results.insert("cg_solver_100x100".to_string(), sparse_solver_time);
2221 }
2222
2223 #[cfg(feature = "advanced_math")]
2225 {
2226 let start = std::time::Instant::now();
2227
2228 let test_matrix = Matrix::from_array2(&Array2::eye(50).view(), &MemoryPool::new())?;
2229 for _ in 0..10 {
2230 let _ = AdvancedLinearAlgebra::qr_decomposition(&test_matrix)?;
2231 }
2232
2233 let qr_time = start.elapsed().as_millis() as f64;
2234 results.insert("qr_decomposition_50x50_10_iterations".to_string(), qr_time);
2235 }
2236
2237 Ok(results)
2238}