1use crate::error::{SparseError, SparseResult};
7use crate::linalg::interface::{AdjointOperator, LinearOperator};
8use crate::parallel_vector_ops::*;
9use scirs2_core::numeric::{Float, NumAssign};
10use std::fmt::Debug;
11use std::iter::Sum;
12
13use scirs2_core::simd_ops::SimdUnifiedOps;
15
16#[derive(Debug, Clone)]
18pub struct EnhancedOperatorOptions {
19 pub use_parallel: bool,
21 pub parallel_threshold: usize,
23 pub use_simd: bool,
25 pub simd_threshold: usize,
27 pub chunk_size: usize,
29}
30
31impl Default for EnhancedOperatorOptions {
32 fn default() -> Self {
33 Self {
34 use_parallel: true,
35 parallel_threshold: 10000,
36 use_simd: true,
37 simd_threshold: 32,
38 chunk_size: 1024,
39 }
40 }
41}
42
43#[derive(Clone)]
45pub struct EnhancedDiagonalOperator<F> {
46 diagonal: Vec<F>,
47 #[allow(dead_code)]
48 options: EnhancedOperatorOptions,
49}
50
51impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> EnhancedDiagonalOperator<F> {
52 #[allow(dead_code)]
54 pub fn new(diagonal: Vec<F>) -> Self {
55 Self {
56 diagonal,
57 options: EnhancedOperatorOptions::default(),
58 }
59 }
60
61 #[allow(dead_code)]
63 pub fn with_options(diagonal: Vec<F>, options: EnhancedOperatorOptions) -> Self {
64 Self { diagonal, options }
65 }
66
67 pub fn diagonal(&self) -> &[F] {
69 &self.diagonal
70 }
71}
72
73impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> LinearOperator<F>
74 for EnhancedDiagonalOperator<F>
75{
76 fn shape(&self) -> (usize, usize) {
77 let n = self.diagonal.len();
78 (n, n)
79 }
80
81 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
82 if x.len() != self.diagonal.len() {
83 return Err(SparseError::DimensionMismatch {
84 expected: self.diagonal.len(),
85 found: x.len(),
86 });
87 }
88
89 let mut result = vec![F::zero(); x.len()];
90
91 if self.options.use_parallel && x.len() >= self.options.parallel_threshold {
95 use scirs2_core::parallel_ops::*;
97 let indices: Vec<usize> = (0..x.len()).collect();
98 let values = parallel_map(&indices, |&i| self.diagonal[i] * x[i]);
99 result = values;
100 } else {
101 #[allow(clippy::needless_range_loop)]
103 for i in 0..x.len() {
104 result[i] = self.diagonal[i] * x[i];
105 }
106 }
107
108 Ok(result)
109 }
110
111 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
112 self.matvec(x)
114 }
115
116 fn has_adjoint(&self) -> bool {
117 true
118 }
119}
120
121pub struct EnhancedSumOperator<F> {
123 a: Box<dyn LinearOperator<F>>,
124 b: Box<dyn LinearOperator<F>>,
125 #[allow(dead_code)]
126 options: EnhancedOperatorOptions,
127}
128
129impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> EnhancedSumOperator<F> {
130 #[allow(dead_code)]
132 pub fn new(a: Box<dyn LinearOperator<F>>, b: Box<dyn LinearOperator<F>>) -> SparseResult<Self> {
133 if a.shape() != b.shape() {
134 return Err(SparseError::ShapeMismatch {
135 expected: a.shape(),
136 found: b.shape(),
137 });
138 }
139 Ok(Self {
140 a,
141 b,
142 options: EnhancedOperatorOptions::default(),
143 })
144 }
145
146 #[allow(dead_code)]
148 pub fn with_options(
149 a: Box<dyn LinearOperator<F>>,
150 b: Box<dyn LinearOperator<F>>,
151 #[allow(dead_code)] options: EnhancedOperatorOptions,
152 ) -> SparseResult<Self> {
153 if a.shape() != b.shape() {
154 return Err(SparseError::ShapeMismatch {
155 expected: a.shape(),
156 found: b.shape(),
157 });
158 }
159 Ok(Self { a, b, options })
160 }
161}
162
163impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> LinearOperator<F>
164 for EnhancedSumOperator<F>
165{
166 fn shape(&self) -> (usize, usize) {
167 self.a.shape()
168 }
169
170 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
171 let a_result = self.a.matvec(x)?;
172 let b_result = self.b.matvec(x)?;
173
174 let mut result = vec![F::zero(); a_result.len()];
175 let parallel_opts = Some(ParallelVectorOptions {
176 use_parallel: self.options.use_parallel,
177 parallel_threshold: self.options.parallel_threshold,
178 chunk_size: self.options.chunk_size,
179 use_simd: self.options.use_simd,
180 simd_threshold: self.options.simd_threshold,
181 });
182
183 parallel_vector_add(&a_result, &b_result, &mut result, parallel_opts);
185 Ok(result)
186 }
187
188 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
189 if !self.a.has_adjoint() || !self.b.has_adjoint() {
190 return Err(SparseError::OperationNotSupported(
191 "adjoint not supported for one or both operators".to_string(),
192 ));
193 }
194 let a_result = self.a.rmatvec(x)?;
195 let b_result = self.b.rmatvec(x)?;
196
197 let mut result = vec![F::zero(); a_result.len()];
198 let parallel_opts = Some(ParallelVectorOptions {
199 use_parallel: self.options.use_parallel,
200 parallel_threshold: self.options.parallel_threshold,
201 chunk_size: self.options.chunk_size,
202 use_simd: self.options.use_simd,
203 simd_threshold: self.options.simd_threshold,
204 });
205
206 parallel_vector_add(&a_result, &b_result, &mut result, parallel_opts);
207 Ok(result)
208 }
209
210 fn has_adjoint(&self) -> bool {
211 self.a.has_adjoint() && self.b.has_adjoint()
212 }
213}
214
215pub struct EnhancedDifferenceOperator<F> {
217 a: Box<dyn LinearOperator<F>>,
218 b: Box<dyn LinearOperator<F>>,
219 #[allow(dead_code)]
220 options: EnhancedOperatorOptions,
221}
222
223impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
224 EnhancedDifferenceOperator<F>
225{
226 #[allow(dead_code)]
228 pub fn new(a: Box<dyn LinearOperator<F>>, b: Box<dyn LinearOperator<F>>) -> SparseResult<Self> {
229 if a.shape() != b.shape() {
230 return Err(SparseError::ShapeMismatch {
231 expected: a.shape(),
232 found: b.shape(),
233 });
234 }
235 Ok(Self {
236 a,
237 b,
238 options: EnhancedOperatorOptions::default(),
239 })
240 }
241
242 #[allow(dead_code)]
244 pub fn with_options(
245 a: Box<dyn LinearOperator<F>>,
246 b: Box<dyn LinearOperator<F>>,
247 #[allow(dead_code)] options: EnhancedOperatorOptions,
248 ) -> SparseResult<Self> {
249 if a.shape() != b.shape() {
250 return Err(SparseError::ShapeMismatch {
251 expected: a.shape(),
252 found: b.shape(),
253 });
254 }
255 Ok(Self { a, b, options })
256 }
257}
258
259impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> LinearOperator<F>
260 for EnhancedDifferenceOperator<F>
261{
262 fn shape(&self) -> (usize, usize) {
263 self.a.shape()
264 }
265
266 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
267 let a_result = self.a.matvec(x)?;
268 let b_result = self.b.matvec(x)?;
269
270 let mut result = vec![F::zero(); a_result.len()];
271 let parallel_opts = Some(ParallelVectorOptions {
272 use_parallel: self.options.use_parallel,
273 parallel_threshold: self.options.parallel_threshold,
274 chunk_size: self.options.chunk_size,
275 use_simd: self.options.use_simd,
276 simd_threshold: self.options.simd_threshold,
277 });
278
279 parallel_vector_sub(&a_result, &b_result, &mut result, parallel_opts);
281 Ok(result)
282 }
283
284 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
285 if !self.a.has_adjoint() || !self.b.has_adjoint() {
286 return Err(SparseError::OperationNotSupported(
287 "adjoint not supported for one or both operators".to_string(),
288 ));
289 }
290 let a_result = self.a.rmatvec(x)?;
291 let b_result = self.b.rmatvec(x)?;
292
293 let mut result = vec![F::zero(); a_result.len()];
294 let parallel_opts = Some(ParallelVectorOptions {
295 use_parallel: self.options.use_parallel,
296 parallel_threshold: self.options.parallel_threshold,
297 chunk_size: self.options.chunk_size,
298 use_simd: self.options.use_simd,
299 simd_threshold: self.options.simd_threshold,
300 });
301
302 parallel_vector_sub(&a_result, &b_result, &mut result, parallel_opts);
303 Ok(result)
304 }
305
306 fn has_adjoint(&self) -> bool {
307 self.a.has_adjoint() && self.b.has_adjoint()
308 }
309}
310
311pub struct EnhancedScaledOperator<F> {
313 alpha: F,
314 operator: Box<dyn LinearOperator<F>>,
315 #[allow(dead_code)]
316 options: EnhancedOperatorOptions,
317}
318
319impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> EnhancedScaledOperator<F> {
320 #[allow(dead_code)]
322 pub fn new(alpha: F, operator: Box<dyn LinearOperator<F>>) -> Self {
323 Self {
324 alpha,
325 operator,
326 options: EnhancedOperatorOptions::default(),
327 }
328 }
329
330 #[allow(dead_code)]
332 pub fn with_options(
333 alpha: F,
334 operator: Box<dyn LinearOperator<F>>,
335 #[allow(dead_code)] options: EnhancedOperatorOptions,
336 ) -> Self {
337 Self {
338 alpha,
339 operator,
340 options,
341 }
342 }
343}
344
345impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> LinearOperator<F>
346 for EnhancedScaledOperator<F>
347{
348 fn shape(&self) -> (usize, usize) {
349 self.operator.shape()
350 }
351
352 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
353 let result = self.operator.matvec(x)?;
354 let mut scaled_result = vec![F::zero(); result.len()];
355
356 let parallel_opts = Some(ParallelVectorOptions {
357 use_parallel: self.options.use_parallel,
358 parallel_threshold: self.options.parallel_threshold,
359 chunk_size: self.options.chunk_size,
360 use_simd: self.options.use_simd,
361 simd_threshold: self.options.simd_threshold,
362 });
363
364 parallel_vector_scale(self.alpha, &result, &mut scaled_result, parallel_opts);
366 Ok(scaled_result)
367 }
368
369 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
370 if !self.operator.has_adjoint() {
371 return Err(SparseError::OperationNotSupported(
372 "adjoint not supported for underlying operator".to_string(),
373 ));
374 }
375 let result = self.operator.rmatvec(x)?;
376 let mut scaled_result = vec![F::zero(); result.len()];
377
378 let parallel_opts = Some(ParallelVectorOptions {
379 use_parallel: self.options.use_parallel,
380 parallel_threshold: self.options.parallel_threshold,
381 chunk_size: self.options.chunk_size,
382 use_simd: self.options.use_simd,
383 simd_threshold: self.options.simd_threshold,
384 });
385
386 parallel_vector_scale(self.alpha, &result, &mut scaled_result, parallel_opts);
387 Ok(scaled_result)
388 }
389
390 fn has_adjoint(&self) -> bool {
391 self.operator.has_adjoint()
392 }
393}
394
395pub struct ConvolutionOperator<F> {
397 kernel: Vec<F>,
398 input_size: usize,
399 output_size: usize,
400 mode: ConvolutionMode,
401 #[allow(dead_code)]
402 options: EnhancedOperatorOptions,
403}
404
405#[derive(Debug, Clone, Copy)]
406pub enum ConvolutionMode {
407 Full,
409 Same,
411 Valid,
413}
414
415impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> ConvolutionOperator<F> {
416 #[allow(dead_code)]
418 pub fn new(kernel: Vec<F>, input_size: usize, mode: ConvolutionMode) -> Self {
419 let output_size = match mode {
420 ConvolutionMode::Full => input_size + kernel.len() - 1,
421 ConvolutionMode::Same => input_size,
422 ConvolutionMode::Valid => {
423 if input_size >= kernel.len() {
424 input_size - kernel.len() + 1
425 } else {
426 0
427 }
428 }
429 };
430
431 Self {
432 kernel,
433 input_size,
434 output_size,
435 mode,
436 options: EnhancedOperatorOptions::default(),
437 }
438 }
439
440 fn optimize_convolution_parallel(&self, result: &mut [F], x: &[F]) -> SparseResult<()> {
442 use scirs2_core::parallel_ops::*;
443
444 let chunk_size = self.options.chunk_size.min(self.output_size);
446 let indices: Vec<usize> = (0..self.output_size).collect();
447 let chunks: Vec<&[usize]> = indices.chunks(chunk_size).collect();
448
449 let parallel_results: Vec<Vec<F>> = parallel_map(&chunks, |chunk| {
450 let mut chunk_result = vec![F::zero(); chunk.len()];
451
452 for (local_i, &global_i) in chunk.iter().enumerate() {
453 chunk_result[local_i] = self.compute_convolution_at_index(global_i, x);
454 }
455
456 chunk_result
457 });
458
459 let mut result_idx = 0;
461 for chunk_result in parallel_results {
462 for val in chunk_result {
463 if result_idx < result.len() {
464 result[result_idx] = val;
465 result_idx += 1;
466 }
467 }
468 }
469
470 Ok(())
471 }
472
473 fn optimize_convolution_simd(&self, result: &mut [F], x: &[F]) -> SparseResult<()> {
475 #[allow(clippy::needless_range_loop)]
479 for i in 0..self.output_size {
480 let convolution_result = self.compute_convolution_at_index_simd(i, x);
482 result[i] = convolution_result;
483 }
484
485 Ok(())
486 }
487
488 fn compute_convolution_at_index(&self, i: usize, x: &[F]) -> F {
490 let mut sum = F::zero();
491
492 match self.mode {
493 ConvolutionMode::Full => {
494 for (j, &kernel_val) in self.kernel.iter().enumerate() {
495 if i >= j && (i - j) < x.len() {
496 sum += kernel_val * x[i - j];
497 }
498 }
499 }
500 ConvolutionMode::Same => {
501 let pad = self.kernel.len() / 2;
502 for (j, &kernel_val) in self.kernel.iter().enumerate() {
503 let idx = i + j;
504 if idx >= pad && (idx - pad) < x.len() {
505 sum += kernel_val * x[idx - pad];
506 }
507 }
508 }
509 ConvolutionMode::Valid => {
510 for (j, &kernel_val) in self.kernel.iter().enumerate() {
511 let idx = i + j;
512 if idx < x.len() {
513 sum += kernel_val * x[idx];
514 }
515 }
516 }
517 }
518
519 sum
520 }
521
522 fn compute_convolution_at_index_simd(&self, i: usize, x: &[F]) -> F {
524 let mut x_segment = Vec::new();
526 let mut kernel_segment = Vec::new();
527
528 match self.mode {
529 ConvolutionMode::Full => {
530 for (j, &kernel_val) in self.kernel.iter().enumerate() {
531 if i >= j && (i - j) < x.len() {
532 kernel_segment.push(kernel_val);
533 x_segment.push(x[i - j]);
534 }
535 }
536 }
537 ConvolutionMode::Same => {
538 let pad = self.kernel.len() / 2;
539 for (j, &kernel_val) in self.kernel.iter().enumerate() {
540 let idx = i + j;
541 if idx >= pad && (idx - pad) < x.len() {
542 kernel_segment.push(kernel_val);
543 x_segment.push(x[idx - pad]);
544 }
545 }
546 }
547 ConvolutionMode::Valid => {
548 for (j, &kernel_val) in self.kernel.iter().enumerate() {
549 let idx = i + j;
550 if idx < x.len() {
551 kernel_segment.push(kernel_val);
552 x_segment.push(x[idx]);
553 }
554 }
555 }
556 }
557
558 if kernel_segment.len() >= self.options.simd_threshold {
560 use scirs2_core::ndarray::ArrayView1;
561 let kernel_view = ArrayView1::from(&kernel_segment);
562 let x_view = ArrayView1::from(&x_segment);
563 F::simd_dot(&kernel_view, &x_view)
564 } else {
565 kernel_segment
567 .iter()
568 .zip(x_segment.iter())
569 .map(|(&k, &x)| k * x)
570 .sum()
571 }
572 }
573
574 #[allow(dead_code)]
576 pub fn with_options(
577 kernel: Vec<F>,
578 input_size: usize,
579 mode: ConvolutionMode,
580 #[allow(dead_code)] options: EnhancedOperatorOptions,
581 ) -> Self {
582 let output_size = match mode {
583 ConvolutionMode::Full => input_size + kernel.len() - 1,
584 ConvolutionMode::Same => input_size,
585 ConvolutionMode::Valid => {
586 if input_size >= kernel.len() {
587 input_size - kernel.len() + 1
588 } else {
589 0
590 }
591 }
592 };
593
594 Self {
595 kernel,
596 input_size,
597 output_size,
598 mode,
599 options,
600 }
601 }
602}
603
604impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> LinearOperator<F>
605 for ConvolutionOperator<F>
606{
607 fn shape(&self) -> (usize, usize) {
608 (self.output_size, self.input_size)
609 }
610
611 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
612 if x.len() != self.input_size {
613 return Err(SparseError::DimensionMismatch {
614 expected: self.input_size,
615 found: x.len(),
616 });
617 }
618
619 let mut result = vec![F::zero(); self.output_size];
620
621 match self.mode {
623 ConvolutionMode::Full => {
624 for i in 0..self.output_size {
625 let mut sum = F::zero();
626 for (j, &kernel_val) in self.kernel.iter().enumerate() {
627 if i >= j && (i - j) < x.len() {
628 sum += kernel_val * x[i - j];
629 }
630 }
631 result[i] = sum;
632 }
633 }
634 ConvolutionMode::Same => {
635 let pad = self.kernel.len() / 2;
636 #[allow(clippy::needless_range_loop)]
637 for i in 0..self.output_size {
638 let mut sum = F::zero();
639 for (j, &kernel_val) in self.kernel.iter().enumerate() {
640 let idx = i + j;
641 if idx >= pad && (idx - pad) < x.len() {
642 sum += kernel_val * x[idx - pad];
643 }
644 }
645 result[i] = sum;
646 }
647 }
648 ConvolutionMode::Valid =>
649 {
650 #[allow(clippy::needless_range_loop)]
651 for i in 0..self.output_size {
652 let mut sum = F::zero();
653 for (j, &kernel_val) in self.kernel.iter().enumerate() {
654 let idx = i + j;
655 if idx < x.len() {
656 sum += kernel_val * x[idx];
657 }
658 }
659 result[i] = sum;
660 }
661 }
662 }
663
664 if self.options.use_parallel && self.output_size >= self.options.parallel_threshold {
666 self.optimize_convolution_parallel(&mut result, x)?;
667 } else if self.options.use_simd && self.kernel.len() >= self.options.simd_threshold {
668 self.optimize_convolution_simd(&mut result, x)?;
669 }
670
671 Ok(result)
672 }
673
674 fn has_adjoint(&self) -> bool {
675 true
676 }
677
678 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
679 if x.len() != self.output_size {
680 return Err(SparseError::DimensionMismatch {
681 expected: self.output_size,
682 found: x.len(),
683 });
684 }
685
686 let mut result = vec![F::zero(); self.input_size];
688 let flipped_kernel: Vec<F> = self.kernel.iter().rev().copied().collect();
689
690 match self.mode {
692 ConvolutionMode::Full =>
693 {
694 #[allow(clippy::needless_range_loop)]
695 for i in 0..self.input_size {
696 let mut sum = F::zero();
697 for (j, &kernel_val) in flipped_kernel.iter().enumerate() {
698 let idx = i + j;
699 if idx < x.len() {
700 sum += kernel_val * x[idx];
701 }
702 }
703 result[i] = sum;
704 }
705 }
706 ConvolutionMode::Same => {
707 let pad = flipped_kernel.len() / 2;
708 for i in 0..self.input_size {
709 let mut sum = F::zero();
710 for (j, &kernel_val) in flipped_kernel.iter().enumerate() {
711 if i + pad >= j && (i + pad - j) < x.len() {
712 sum += kernel_val * x[i + pad - j];
713 }
714 }
715 result[i] = sum;
716 }
717 }
718 ConvolutionMode::Valid => {
719 for i in 0..self.input_size {
720 let mut sum = F::zero();
721 for (j, &kernel_val) in flipped_kernel.iter().enumerate() {
722 if i >= j && (i - j) < x.len() {
723 sum += kernel_val * x[i - j];
724 }
725 }
726 result[i] = sum;
727 }
728 }
729 }
730
731 Ok(result)
732 }
733}
734
735pub struct FiniteDifferenceOperator<F> {
737 size: usize,
738 order: usize,
739 spacing: F,
740 boundary: BoundaryCondition,
741 #[allow(dead_code)]
742 options: EnhancedOperatorOptions,
743}
744
745#[derive(Debug, Clone, Copy)]
746pub enum BoundaryCondition {
747 Neumann,
749 Dirichlet,
751 Periodic,
753}
754
755impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> FiniteDifferenceOperator<F> {
756 #[allow(dead_code)]
758 pub fn new(size: usize, order: usize, spacing: F, boundary: BoundaryCondition) -> Self {
759 Self {
760 size,
761 order,
762 spacing,
763 boundary,
764 options: EnhancedOperatorOptions::default(),
765 }
766 }
767
768 #[allow(dead_code)]
770 pub fn with_options(
771 size: usize,
772 order: usize,
773 spacing: F,
774 boundary: BoundaryCondition,
775 #[allow(dead_code)] options: EnhancedOperatorOptions,
776 ) -> Self {
777 Self {
778 size,
779 order,
780 spacing,
781 boundary,
782 options,
783 }
784 }
785
786 fn get_coefficients(&self) -> Vec<F> {
788 match self.order {
789 1 => {
790 vec![
792 -F::one() / (F::from(2.0).unwrap() * self.spacing),
793 F::zero(),
794 F::one() / (F::from(2.0).unwrap() * self.spacing),
795 ]
796 }
797 2 => {
798 let h_sq = self.spacing * self.spacing;
800 vec![
801 F::one() / h_sq,
802 -F::from(2.0).unwrap() / h_sq,
803 F::one() / h_sq,
804 ]
805 }
806 _ => {
807 vec![F::zero(); 2 * self.order + 1]
810 }
811 }
812 }
813}
814
815impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> LinearOperator<F>
816 for FiniteDifferenceOperator<F>
817{
818 fn shape(&self) -> (usize, usize) {
819 (self.size, self.size)
820 }
821
822 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
823 if x.len() != self.size {
824 return Err(SparseError::DimensionMismatch {
825 expected: self.size,
826 found: x.len(),
827 });
828 }
829
830 let mut result = vec![F::zero(); self.size];
831 let coeffs = self.get_coefficients();
832 let stencil_radius = coeffs.len() / 2;
833
834 #[allow(clippy::needless_range_loop)]
835 for i in 0..self.size {
836 let mut sum = F::zero();
837 for (j, &coeff) in coeffs.iter().enumerate() {
838 let idx = i as isize + j as isize - stencil_radius as isize;
839
840 let value = match self.boundary {
841 BoundaryCondition::Neumann => {
842 if idx < 0 {
843 x[0]
844 } else if idx >= self.size as isize {
845 x[self.size - 1]
846 } else {
847 x[idx as usize]
848 }
849 }
850 BoundaryCondition::Dirichlet => {
851 if idx < 0 || idx >= self.size as isize {
852 F::zero()
853 } else {
854 x[idx as usize]
855 }
856 }
857 BoundaryCondition::Periodic => {
858 let periodic_idx = ((idx % self.size as isize + self.size as isize)
859 % self.size as isize)
860 as usize;
861 x[periodic_idx]
862 }
863 };
864
865 sum += coeff * value;
866 }
867 result[i] = sum;
868 }
869
870 Ok(result)
871 }
872
873 fn has_adjoint(&self) -> bool {
874 true
875 }
876
877 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
878 self.matvec(x)
881 }
882}
883
884#[allow(dead_code)]
887pub fn enhanced_diagonal<
888 F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
889>(
890 diagonal: Vec<F>,
891) -> Box<dyn LinearOperator<F>> {
892 Box::new(EnhancedDiagonalOperator::new(diagonal))
893}
894
895#[allow(dead_code)]
897pub fn enhanced_add<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static>(
898 left: Box<dyn LinearOperator<F>>,
899 right: Box<dyn LinearOperator<F>>,
900) -> SparseResult<Box<dyn LinearOperator<F>>> {
901 Ok(Box::new(EnhancedSumOperator::new(left, right)?))
902}
903
904#[allow(dead_code)]
906pub fn enhanced_subtract<
907 F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
908>(
909 left: Box<dyn LinearOperator<F>>,
910 right: Box<dyn LinearOperator<F>>,
911) -> SparseResult<Box<dyn LinearOperator<F>>> {
912 Ok(Box::new(EnhancedDifferenceOperator::new(left, right)?))
913}
914
915#[allow(dead_code)]
917pub fn enhanced_scale<
918 F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
919>(
920 alpha: F,
921 operator: Box<dyn LinearOperator<F>>,
922) -> Box<dyn LinearOperator<F>> {
923 Box::new(EnhancedScaledOperator::new(alpha, operator))
924}
925
926#[allow(dead_code)]
928pub fn convolution_operator<
929 F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
930>(
931 kernel: Vec<F>,
932 input_size: usize,
933 mode: ConvolutionMode,
934) -> Box<dyn LinearOperator<F>> {
935 Box::new(ConvolutionOperator::new(kernel, input_size, mode))
936}
937
938#[allow(dead_code)]
940pub fn finite_difference_operator<
941 F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
942>(
943 size: usize,
944 order: usize,
945 spacing: F,
946 boundary: BoundaryCondition,
947) -> Box<dyn LinearOperator<F>> {
948 Box::new(FiniteDifferenceOperator::new(
949 size, order, spacing, boundary,
950 ))
951}
952
953pub struct EnhancedCompositionOperator<F> {
955 left: Box<dyn LinearOperator<F>>, right: Box<dyn LinearOperator<F>>, #[allow(dead_code)]
958 options: EnhancedOperatorOptions,
959}
960
961impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
962 EnhancedCompositionOperator<F>
963{
964 #[allow(dead_code)]
967 pub fn new(
968 left: Box<dyn LinearOperator<F>>,
969 right: Box<dyn LinearOperator<F>>,
970 ) -> SparseResult<Self> {
971 let (left_rows, left_cols) = left.shape();
972 let (right_rows, right_cols) = right.shape();
973
974 if left_cols != right_rows {
975 return Err(SparseError::ShapeMismatch {
976 expected: (left_rows, right_rows),
977 found: (left_rows, left_cols),
978 });
979 }
980
981 Ok(Self {
982 left,
983 right,
984 options: EnhancedOperatorOptions::default(),
985 })
986 }
987
988 #[allow(dead_code)]
990 pub fn with_options(
991 left: Box<dyn LinearOperator<F>>,
992 right: Box<dyn LinearOperator<F>>,
993 options: EnhancedOperatorOptions,
994 ) -> SparseResult<Self> {
995 let (left_rows, left_cols) = left.shape();
996 let (right_rows, right_cols) = right.shape();
997
998 if left_cols != right_rows {
999 return Err(SparseError::ShapeMismatch {
1000 expected: (left_rows, right_rows),
1001 found: (left_rows, left_cols),
1002 });
1003 }
1004
1005 Ok(Self {
1006 left,
1007 right,
1008 options,
1009 })
1010 }
1011}
1012
1013impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> LinearOperator<F>
1014 for EnhancedCompositionOperator<F>
1015{
1016 fn shape(&self) -> (usize, usize) {
1017 let (left_rows, _) = self.left.shape();
1018 let (_, right_cols) = self.right.shape();
1019 (left_rows, right_cols)
1020 }
1021
1022 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1023 let intermediate = self.right.matvec(x)?;
1025 self.left.matvec(&intermediate)
1026 }
1027
1028 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1029 if !self.left.has_adjoint() || !self.right.has_adjoint() {
1030 return Err(SparseError::OperationNotSupported(
1031 "adjoint not supported for one or both operators".to_string(),
1032 ));
1033 }
1034
1035 let intermediate = self.left.rmatvec(x)?;
1037 self.right.rmatvec(&intermediate)
1038 }
1039
1040 fn has_adjoint(&self) -> bool {
1041 self.left.has_adjoint() && self.right.has_adjoint()
1042 }
1043}
1044
1045pub struct ElementwiseFunctionOperator<F> {
1047 function: Box<dyn Fn(F) -> F + Send + Sync>,
1048 size: usize,
1049 #[allow(dead_code)]
1050 options: EnhancedOperatorOptions,
1051}
1052
1053impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
1054 ElementwiseFunctionOperator<F>
1055{
1056 pub fn new<Func>(function: Func, size: usize) -> Self
1058 where
1059 Func: Fn(F) -> F + Send + Sync + 'static,
1060 {
1061 Self {
1062 function: Box::new(function),
1063 size,
1064 options: EnhancedOperatorOptions::default(),
1065 }
1066 }
1067
1068 #[allow(dead_code)]
1070 pub fn with_options<Func>(function: Func, size: usize, options: EnhancedOperatorOptions) -> Self
1071 where
1072 Func: Fn(F) -> F + Send + Sync + 'static,
1073 {
1074 Self {
1075 function: Box::new(function),
1076 size,
1077 options,
1078 }
1079 }
1080}
1081
1082impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> LinearOperator<F>
1083 for ElementwiseFunctionOperator<F>
1084{
1085 fn shape(&self) -> (usize, usize) {
1086 (self.size, self.size)
1087 }
1088
1089 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1090 if x.len() != self.size {
1091 return Err(SparseError::DimensionMismatch {
1092 expected: self.size,
1093 found: x.len(),
1094 });
1095 }
1096
1097 if self.options.use_parallel && x.len() >= self.options.parallel_threshold {
1098 use scirs2_core::parallel_ops::*;
1100 let result = parallel_map(x, |&val| (self.function)(val));
1101 Ok(result)
1102 } else {
1103 let result: Vec<F> = x.iter().map(|&val| (self.function)(val)).collect();
1105 Ok(result)
1106 }
1107 }
1108
1109 fn has_adjoint(&self) -> bool {
1110 false }
1112
1113 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1114 Err(SparseError::OperationNotSupported(
1115 "adjoint not supported for general element-wise functions".to_string(),
1116 ))
1117 }
1118}
1119
1120pub struct KroneckerProductOperator<F> {
1122 left: Box<dyn LinearOperator<F>>,
1123 right: Box<dyn LinearOperator<F>>,
1124 #[allow(dead_code)]
1125 options: EnhancedOperatorOptions,
1126}
1127
1128impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> KroneckerProductOperator<F> {
1129 #[allow(dead_code)]
1131 pub fn new(left: Box<dyn LinearOperator<F>>, right: Box<dyn LinearOperator<F>>) -> Self {
1132 Self {
1133 left,
1134 right,
1135 options: EnhancedOperatorOptions::default(),
1136 }
1137 }
1138
1139 #[allow(dead_code)]
1141 pub fn with_options(
1142 left: Box<dyn LinearOperator<F>>,
1143 right: Box<dyn LinearOperator<F>>,
1144 options: EnhancedOperatorOptions,
1145 ) -> Self {
1146 Self {
1147 left,
1148 right,
1149 options,
1150 }
1151 }
1152}
1153
1154impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> LinearOperator<F>
1155 for KroneckerProductOperator<F>
1156{
1157 fn shape(&self) -> (usize, usize) {
1158 let (left_rows, left_cols) = self.left.shape();
1159 let (right_rows, right_cols) = self.right.shape();
1160 (left_rows * right_rows, left_cols * right_cols)
1161 }
1162
1163 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1164 let (left_rows, left_cols) = self.left.shape();
1165 let (right_rows, right_cols) = self.right.shape();
1166
1167 if x.len() != left_cols * right_cols {
1168 return Err(SparseError::DimensionMismatch {
1169 expected: left_cols * right_cols,
1170 found: x.len(),
1171 });
1172 }
1173
1174 let mut result = vec![F::zero(); left_rows * right_rows];
1177
1178 for j in 0..left_cols {
1179 let mut col: Vec<F> = Vec::with_capacity(right_cols);
1181 for i in 0..right_cols {
1182 col.push(x[i * left_cols + j]);
1183 }
1184
1185 let right_result = self.right.matvec(&col)?;
1187
1188 for i in 0..right_rows {
1190 result[i * left_cols + j] = right_result[i];
1191 }
1192 }
1193
1194 let mut final_result = vec![F::zero(); left_rows * right_rows];
1196 for i in 0..right_rows {
1197 let mut row: Vec<F> = Vec::with_capacity(left_cols);
1199 for j in 0..left_cols {
1200 row.push(result[i * left_cols + j]);
1201 }
1202
1203 let left_result = self.left.matvec(&row)?;
1205
1206 for k in 0..left_rows {
1208 final_result[k * right_rows + i] = left_result[k];
1209 }
1210 }
1211
1212 Ok(final_result)
1213 }
1214
1215 fn has_adjoint(&self) -> bool {
1216 self.left.has_adjoint() && self.right.has_adjoint()
1217 }
1218
1219 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1220 if !self.has_adjoint() {
1221 return Err(SparseError::OperationNotSupported(
1222 "adjoint not supported for one or both operators".to_string(),
1223 ));
1224 }
1225
1226 Err(SparseError::OperationNotSupported(
1230 "Kronecker product adjoint requires operator cloning which is not currently supported"
1231 .to_string(),
1232 ))
1233 }
1234}
1235
1236#[allow(dead_code)]
1238pub fn enhanced_compose<
1239 F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
1240>(
1241 left: Box<dyn LinearOperator<F>>,
1242 right: Box<dyn LinearOperator<F>>,
1243) -> SparseResult<Box<dyn LinearOperator<F>>> {
1244 Ok(Box::new(EnhancedCompositionOperator::new(left, right)?))
1245}
1246
1247#[allow(dead_code)]
1249pub fn elementwisefunction<
1250 F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
1251 Func: Fn(F) -> F + Send + Sync + 'static,
1252>(
1253 function: Func,
1254 size: usize,
1255) -> Box<dyn LinearOperator<F>> {
1256 Box::new(ElementwiseFunctionOperator::new(function, size))
1257}
1258
1259#[allow(dead_code)]
1261pub fn kronecker_product<
1262 F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
1263>(
1264 left: Box<dyn LinearOperator<F>>,
1265 right: Box<dyn LinearOperator<F>>,
1266) -> Box<dyn LinearOperator<F>> {
1267 Box::new(KroneckerProductOperator::new(left, right))
1268}
1269
1270#[allow(dead_code)]
1272pub fn adjoint_operator<
1273 F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
1274>(
1275 operator: Box<dyn LinearOperator<F>>,
1276) -> SparseResult<Box<dyn LinearOperator<F>>> {
1277 Ok(Box::new(AdjointOperator::new(operator)?))
1278}
1279
1280#[cfg(test)]
1281mod tests {
1282 use super::*;
1283 use approx::assert_relative_eq;
1284
1285 #[test]
1286 fn test_enhanced_diagonal_operator() {
1287 let diag = vec![2.0, 3.0, 4.0];
1288 let op = EnhancedDiagonalOperator::new(diag);
1289 let x = vec![1.0, 2.0, 3.0];
1290 let y = op.matvec(&x).unwrap();
1291 assert_eq!(y, vec![2.0, 6.0, 12.0]);
1292 }
1293
1294 #[test]
1295 fn test_enhanced_sum_operator() {
1296 let diag1 = enhanced_diagonal(vec![1.0, 2.0, 3.0]);
1297 let diag2 = enhanced_diagonal(vec![2.0, 1.0, 1.0]);
1298 let sum_op = EnhancedSumOperator::new(diag1, diag2).unwrap();
1299
1300 let x = vec![1.0, 1.0, 1.0];
1301 let y = sum_op.matvec(&x).unwrap();
1302 assert_eq!(y, vec![3.0, 3.0, 4.0]); }
1304
1305 #[test]
1306 fn test_enhanced_difference_operator() {
1307 let diag1 = enhanced_diagonal(vec![3.0, 4.0, 5.0]);
1308 let diag2 = enhanced_diagonal(vec![1.0, 2.0, 1.0]);
1309 let diff_op = EnhancedDifferenceOperator::new(diag1, diag2).unwrap();
1310
1311 let x = vec![1.0, 1.0, 1.0];
1312 let y = diff_op.matvec(&x).unwrap();
1313 assert_eq!(y, vec![2.0, 2.0, 4.0]); }
1315
1316 #[test]
1317 fn test_enhanced_scaled_operator() {
1318 let diag = enhanced_diagonal(vec![1.0, 2.0, 3.0]);
1319 let scaled_op = EnhancedScaledOperator::new(2.0, diag);
1320
1321 let x = vec![1.0, 1.0, 1.0];
1322 let y = scaled_op.matvec(&x).unwrap();
1323 assert_eq!(y, vec![2.0, 4.0, 6.0]); }
1325
1326 #[test]
1327 fn test_convolution_operator_full() {
1328 let kernel = vec![1.0, 2.0, 3.0];
1329 let conv_op = ConvolutionOperator::new(kernel, 3, ConvolutionMode::Full);
1330
1331 let x = vec![1.0, 0.0, 0.0];
1332 let y = conv_op.matvec(&x).unwrap();
1333 assert_eq!(y, vec![1.0, 2.0, 3.0, 0.0, 0.0]); }
1335
1336 #[test]
1337 fn test_convolution_operator_same() {
1338 let kernel = vec![0.0, 1.0, 0.0]; let conv_op = ConvolutionOperator::new(kernel, 3, ConvolutionMode::Same);
1340
1341 let x = vec![1.0, 2.0, 3.0];
1342 let y = conv_op.matvec(&x).unwrap();
1343 assert_relative_eq!(y[0], 1.0, epsilon = 1e-10);
1344 assert_relative_eq!(y[1], 2.0, epsilon = 1e-10);
1345 assert_relative_eq!(y[2], 3.0, epsilon = 1e-10);
1346 }
1347
1348 #[test]
1349 fn test_finite_difference_first_derivative() {
1350 let fd_op = FiniteDifferenceOperator::new(5, 1, 1.0, BoundaryCondition::Dirichlet);
1351
1352 let x = vec![0.0, 1.0, 2.0, 3.0, 4.0];
1354 let y = fd_op.matvec(&x).unwrap();
1355
1356 #[allow(clippy::needless_range_loop)]
1358 for i in 1..y.len() - 1 {
1359 assert_relative_eq!(y[i], 1.0, epsilon = 0.1);
1360 }
1361 }
1362
1363 #[test]
1364 fn test_finite_difference_second_derivative() {
1365 let fd_op = FiniteDifferenceOperator::new(5, 2, 1.0, BoundaryCondition::Dirichlet);
1366
1367 let x = vec![0.0, 1.0, 4.0, 9.0, 16.0];
1369 let y = fd_op.matvec(&x).unwrap();
1370
1371 #[allow(clippy::needless_range_loop)]
1373 for i in 1..y.len() - 1 {
1374 assert_relative_eq!(y[i], 2.0, epsilon = 0.5);
1375 }
1376 }
1377
1378 #[test]
1379 #[ignore = "timeout"]
1380 fn test_enhanced_operators_with_large_vectors() {
1381 let large_size = 15000; let diag1: Vec<f64> = (0..large_size).map(|i| (i + 1) as f64).collect();
1384 let diag2: Vec<f64> = vec![2.0; large_size];
1385
1386 let op1 = enhanced_diagonal(diag1);
1387 let op2 = enhanced_diagonal(diag2);
1388 let sum_op = EnhancedSumOperator::new(op1, op2).unwrap();
1389
1390 let x = vec![1.0; large_size];
1391 let y = sum_op.matvec(&x).unwrap();
1392
1393 assert_relative_eq!(y[0], 3.0, epsilon = 1e-10); assert_relative_eq!(y[1], 4.0, epsilon = 1e-10); assert_relative_eq!(y[999], 1002.0, epsilon = 1e-10); }
1398}