1use crate::error::{SparseError, SparseResult};
7use crate::linalg::interface::{AdjointOperator, LinearOperator};
8use crate::parallel_vector_ops::*;
9use scirs2_core::numeric::{Float, NumAssign};
10use scirs2_core::SparseElement;
11use std::fmt::Debug;
12use std::iter::Sum;
13
14use scirs2_core::simd_ops::SimdUnifiedOps;
16
17#[derive(Debug, Clone)]
19pub struct EnhancedOperatorOptions {
20 pub use_parallel: bool,
22 pub parallel_threshold: usize,
24 pub use_simd: bool,
26 pub simd_threshold: usize,
28 pub chunk_size: usize,
30}
31
32impl Default for EnhancedOperatorOptions {
33 fn default() -> Self {
34 Self {
35 use_parallel: true,
36 parallel_threshold: 10000,
37 use_simd: true,
38 simd_threshold: 32,
39 chunk_size: 1024,
40 }
41 }
42}
43
44#[derive(Clone)]
46pub struct EnhancedDiagonalOperator<F> {
47 diagonal: Vec<F>,
48 #[allow(dead_code)]
49 options: EnhancedOperatorOptions,
50}
51
52impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> EnhancedDiagonalOperator<F> {
53 #[allow(dead_code)]
55 pub fn new(diagonal: Vec<F>) -> Self {
56 Self {
57 diagonal,
58 options: EnhancedOperatorOptions::default(),
59 }
60 }
61
62 #[allow(dead_code)]
64 pub fn with_options(diagonal: Vec<F>, options: EnhancedOperatorOptions) -> Self {
65 Self { diagonal, options }
66 }
67
68 pub fn diagonal(&self) -> &[F] {
70 &self.diagonal
71 }
72}
73
74impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
75 LinearOperator<F> for EnhancedDiagonalOperator<F>
76{
77 fn shape(&self) -> (usize, usize) {
78 let n = self.diagonal.len();
79 (n, n)
80 }
81
82 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
83 if x.len() != self.diagonal.len() {
84 return Err(SparseError::DimensionMismatch {
85 expected: self.diagonal.len(),
86 found: x.len(),
87 });
88 }
89
90 let mut result = vec![F::sparse_zero(); x.len()];
91
92 if self.options.use_parallel && x.len() >= self.options.parallel_threshold {
96 use scirs2_core::parallel_ops::*;
98 let indices: Vec<usize> = (0..x.len()).collect();
99 let values = parallel_map(&indices, |&i| self.diagonal[i] * x[i]);
100 result = values;
101 } else {
102 #[allow(clippy::needless_range_loop)]
104 for i in 0..x.len() {
105 result[i] = self.diagonal[i] * x[i];
106 }
107 }
108
109 Ok(result)
110 }
111
112 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
113 self.matvec(x)
115 }
116
117 fn has_adjoint(&self) -> bool {
118 true
119 }
120}
121
122pub struct EnhancedSumOperator<F> {
124 a: Box<dyn LinearOperator<F>>,
125 b: Box<dyn LinearOperator<F>>,
126 #[allow(dead_code)]
127 options: EnhancedOperatorOptions,
128}
129
130impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> EnhancedSumOperator<F> {
131 #[allow(dead_code)]
133 pub fn new(a: Box<dyn LinearOperator<F>>, b: Box<dyn LinearOperator<F>>) -> SparseResult<Self> {
134 if a.shape() != b.shape() {
135 return Err(SparseError::ShapeMismatch {
136 expected: a.shape(),
137 found: b.shape(),
138 });
139 }
140 Ok(Self {
141 a,
142 b,
143 options: EnhancedOperatorOptions::default(),
144 })
145 }
146
147 #[allow(dead_code)]
149 pub fn with_options(
150 a: Box<dyn LinearOperator<F>>,
151 b: Box<dyn LinearOperator<F>>,
152 #[allow(dead_code)] options: EnhancedOperatorOptions,
153 ) -> SparseResult<Self> {
154 if a.shape() != b.shape() {
155 return Err(SparseError::ShapeMismatch {
156 expected: a.shape(),
157 found: b.shape(),
158 });
159 }
160 Ok(Self { a, b, options })
161 }
162}
163
164impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
165 LinearOperator<F> for EnhancedSumOperator<F>
166{
167 fn shape(&self) -> (usize, usize) {
168 self.a.shape()
169 }
170
171 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
172 let a_result = self.a.matvec(x)?;
173 let b_result = self.b.matvec(x)?;
174
175 let mut result = vec![F::sparse_zero(); a_result.len()];
176 let parallel_opts = Some(ParallelVectorOptions {
177 use_parallel: self.options.use_parallel,
178 parallel_threshold: self.options.parallel_threshold,
179 chunk_size: self.options.chunk_size,
180 use_simd: self.options.use_simd,
181 simd_threshold: self.options.simd_threshold,
182 });
183
184 parallel_vector_add(&a_result, &b_result, &mut result, parallel_opts);
186 Ok(result)
187 }
188
189 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
190 if !self.a.has_adjoint() || !self.b.has_adjoint() {
191 return Err(SparseError::OperationNotSupported(
192 "adjoint not supported for one or both operators".to_string(),
193 ));
194 }
195 let a_result = self.a.rmatvec(x)?;
196 let b_result = self.b.rmatvec(x)?;
197
198 let mut result = vec![F::sparse_zero(); a_result.len()];
199 let parallel_opts = Some(ParallelVectorOptions {
200 use_parallel: self.options.use_parallel,
201 parallel_threshold: self.options.parallel_threshold,
202 chunk_size: self.options.chunk_size,
203 use_simd: self.options.use_simd,
204 simd_threshold: self.options.simd_threshold,
205 });
206
207 parallel_vector_add(&a_result, &b_result, &mut result, parallel_opts);
208 Ok(result)
209 }
210
211 fn has_adjoint(&self) -> bool {
212 self.a.has_adjoint() && self.b.has_adjoint()
213 }
214}
215
216pub struct EnhancedDifferenceOperator<F> {
218 a: Box<dyn LinearOperator<F>>,
219 b: Box<dyn LinearOperator<F>>,
220 #[allow(dead_code)]
221 options: EnhancedOperatorOptions,
222}
223
224impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
225 EnhancedDifferenceOperator<F>
226{
227 #[allow(dead_code)]
229 pub fn new(a: Box<dyn LinearOperator<F>>, b: Box<dyn LinearOperator<F>>) -> SparseResult<Self> {
230 if a.shape() != b.shape() {
231 return Err(SparseError::ShapeMismatch {
232 expected: a.shape(),
233 found: b.shape(),
234 });
235 }
236 Ok(Self {
237 a,
238 b,
239 options: EnhancedOperatorOptions::default(),
240 })
241 }
242
243 #[allow(dead_code)]
245 pub fn with_options(
246 a: Box<dyn LinearOperator<F>>,
247 b: Box<dyn LinearOperator<F>>,
248 #[allow(dead_code)] options: EnhancedOperatorOptions,
249 ) -> SparseResult<Self> {
250 if a.shape() != b.shape() {
251 return Err(SparseError::ShapeMismatch {
252 expected: a.shape(),
253 found: b.shape(),
254 });
255 }
256 Ok(Self { a, b, options })
257 }
258}
259
260impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
261 LinearOperator<F> for EnhancedDifferenceOperator<F>
262{
263 fn shape(&self) -> (usize, usize) {
264 self.a.shape()
265 }
266
267 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
268 let a_result = self.a.matvec(x)?;
269 let b_result = self.b.matvec(x)?;
270
271 let mut result = vec![F::sparse_zero(); a_result.len()];
272 let parallel_opts = Some(ParallelVectorOptions {
273 use_parallel: self.options.use_parallel,
274 parallel_threshold: self.options.parallel_threshold,
275 chunk_size: self.options.chunk_size,
276 use_simd: self.options.use_simd,
277 simd_threshold: self.options.simd_threshold,
278 });
279
280 parallel_vector_sub(&a_result, &b_result, &mut result, parallel_opts);
282 Ok(result)
283 }
284
285 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
286 if !self.a.has_adjoint() || !self.b.has_adjoint() {
287 return Err(SparseError::OperationNotSupported(
288 "adjoint not supported for one or both operators".to_string(),
289 ));
290 }
291 let a_result = self.a.rmatvec(x)?;
292 let b_result = self.b.rmatvec(x)?;
293
294 let mut result = vec![F::sparse_zero(); a_result.len()];
295 let parallel_opts = Some(ParallelVectorOptions {
296 use_parallel: self.options.use_parallel,
297 parallel_threshold: self.options.parallel_threshold,
298 chunk_size: self.options.chunk_size,
299 use_simd: self.options.use_simd,
300 simd_threshold: self.options.simd_threshold,
301 });
302
303 parallel_vector_sub(&a_result, &b_result, &mut result, parallel_opts);
304 Ok(result)
305 }
306
307 fn has_adjoint(&self) -> bool {
308 self.a.has_adjoint() && self.b.has_adjoint()
309 }
310}
311
312pub struct EnhancedScaledOperator<F> {
314 alpha: F,
315 operator: Box<dyn LinearOperator<F>>,
316 #[allow(dead_code)]
317 options: EnhancedOperatorOptions,
318}
319
320impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> EnhancedScaledOperator<F> {
321 #[allow(dead_code)]
323 pub fn new(alpha: F, operator: Box<dyn LinearOperator<F>>) -> Self {
324 Self {
325 alpha,
326 operator,
327 options: EnhancedOperatorOptions::default(),
328 }
329 }
330
331 #[allow(dead_code)]
333 pub fn with_options(
334 alpha: F,
335 operator: Box<dyn LinearOperator<F>>,
336 #[allow(dead_code)] options: EnhancedOperatorOptions,
337 ) -> Self {
338 Self {
339 alpha,
340 operator,
341 options,
342 }
343 }
344}
345
346impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
347 LinearOperator<F> for EnhancedScaledOperator<F>
348{
349 fn shape(&self) -> (usize, usize) {
350 self.operator.shape()
351 }
352
353 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
354 let result = self.operator.matvec(x)?;
355 let mut scaled_result = vec![F::sparse_zero(); result.len()];
356
357 let parallel_opts = Some(ParallelVectorOptions {
358 use_parallel: self.options.use_parallel,
359 parallel_threshold: self.options.parallel_threshold,
360 chunk_size: self.options.chunk_size,
361 use_simd: self.options.use_simd,
362 simd_threshold: self.options.simd_threshold,
363 });
364
365 parallel_vector_scale(self.alpha, &result, &mut scaled_result, parallel_opts);
367 Ok(scaled_result)
368 }
369
370 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
371 if !self.operator.has_adjoint() {
372 return Err(SparseError::OperationNotSupported(
373 "adjoint not supported for underlying operator".to_string(),
374 ));
375 }
376 let result = self.operator.rmatvec(x)?;
377 let mut scaled_result = vec![F::sparse_zero(); result.len()];
378
379 let parallel_opts = Some(ParallelVectorOptions {
380 use_parallel: self.options.use_parallel,
381 parallel_threshold: self.options.parallel_threshold,
382 chunk_size: self.options.chunk_size,
383 use_simd: self.options.use_simd,
384 simd_threshold: self.options.simd_threshold,
385 });
386
387 parallel_vector_scale(self.alpha, &result, &mut scaled_result, parallel_opts);
388 Ok(scaled_result)
389 }
390
391 fn has_adjoint(&self) -> bool {
392 self.operator.has_adjoint()
393 }
394}
395
396pub struct ConvolutionOperator<F> {
398 kernel: Vec<F>,
399 input_size: usize,
400 output_size: usize,
401 mode: ConvolutionMode,
402 #[allow(dead_code)]
403 options: EnhancedOperatorOptions,
404}
405
406#[derive(Debug, Clone, Copy)]
407pub enum ConvolutionMode {
408 Full,
410 Same,
412 Valid,
414}
415
416impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
417 ConvolutionOperator<F>
418{
419 #[allow(dead_code)]
421 pub fn new(kernel: Vec<F>, input_size: usize, mode: ConvolutionMode) -> Self {
422 let output_size = match mode {
423 ConvolutionMode::Full => input_size + kernel.len() - 1,
424 ConvolutionMode::Same => input_size,
425 ConvolutionMode::Valid => {
426 if input_size >= kernel.len() {
427 input_size - kernel.len() + 1
428 } else {
429 0
430 }
431 }
432 };
433
434 Self {
435 kernel,
436 input_size,
437 output_size,
438 mode,
439 options: EnhancedOperatorOptions::default(),
440 }
441 }
442
443 fn optimize_convolution_parallel(&self, result: &mut [F], x: &[F]) -> SparseResult<()> {
445 use scirs2_core::parallel_ops::*;
446
447 let chunk_size = self.options.chunk_size.min(self.output_size);
449 let indices: Vec<usize> = (0..self.output_size).collect();
450 let chunks: Vec<&[usize]> = indices.chunks(chunk_size).collect();
451
452 let parallel_results: Vec<Vec<F>> = parallel_map(&chunks, |chunk| {
453 let mut chunk_result = vec![F::sparse_zero(); chunk.len()];
454
455 for (local_i, &global_i) in chunk.iter().enumerate() {
456 chunk_result[local_i] = self.compute_convolution_at_index(global_i, x);
457 }
458
459 chunk_result
460 });
461
462 let mut result_idx = 0;
464 for chunk_result in parallel_results {
465 for val in chunk_result {
466 if result_idx < result.len() {
467 result[result_idx] = val;
468 result_idx += 1;
469 }
470 }
471 }
472
473 Ok(())
474 }
475
476 fn optimize_convolution_simd(&self, result: &mut [F], x: &[F]) -> SparseResult<()> {
478 #[allow(clippy::needless_range_loop)]
482 for i in 0..self.output_size {
483 let convolution_result = self.compute_convolution_at_index_simd(i, x);
485 result[i] = convolution_result;
486 }
487
488 Ok(())
489 }
490
491 fn compute_convolution_at_index(&self, i: usize, x: &[F]) -> F {
493 let mut sum = F::sparse_zero();
494
495 match self.mode {
496 ConvolutionMode::Full => {
497 for (j, &kernel_val) in self.kernel.iter().enumerate() {
498 if i >= j && (i - j) < x.len() {
499 sum += kernel_val * x[i - j];
500 }
501 }
502 }
503 ConvolutionMode::Same => {
504 let pad = self.kernel.len() / 2;
505 for (j, &kernel_val) in self.kernel.iter().enumerate() {
506 let idx = i + j;
507 if idx >= pad && (idx - pad) < x.len() {
508 sum += kernel_val * x[idx - pad];
509 }
510 }
511 }
512 ConvolutionMode::Valid => {
513 for (j, &kernel_val) in self.kernel.iter().enumerate() {
514 let idx = i + j;
515 if idx < x.len() {
516 sum += kernel_val * x[idx];
517 }
518 }
519 }
520 }
521
522 sum
523 }
524
525 fn compute_convolution_at_index_simd(&self, i: usize, x: &[F]) -> F {
527 let mut x_segment = Vec::new();
529 let mut kernel_segment = Vec::new();
530
531 match self.mode {
532 ConvolutionMode::Full => {
533 for (j, &kernel_val) in self.kernel.iter().enumerate() {
534 if i >= j && (i - j) < x.len() {
535 kernel_segment.push(kernel_val);
536 x_segment.push(x[i - j]);
537 }
538 }
539 }
540 ConvolutionMode::Same => {
541 let pad = self.kernel.len() / 2;
542 for (j, &kernel_val) in self.kernel.iter().enumerate() {
543 let idx = i + j;
544 if idx >= pad && (idx - pad) < x.len() {
545 kernel_segment.push(kernel_val);
546 x_segment.push(x[idx - pad]);
547 }
548 }
549 }
550 ConvolutionMode::Valid => {
551 for (j, &kernel_val) in self.kernel.iter().enumerate() {
552 let idx = i + j;
553 if idx < x.len() {
554 kernel_segment.push(kernel_val);
555 x_segment.push(x[idx]);
556 }
557 }
558 }
559 }
560
561 if kernel_segment.len() >= self.options.simd_threshold {
563 use scirs2_core::ndarray::ArrayView1;
564 let kernel_view = ArrayView1::from(&kernel_segment);
565 let x_view = ArrayView1::from(&x_segment);
566 F::simd_dot(&kernel_view, &x_view)
567 } else {
568 kernel_segment
570 .iter()
571 .zip(x_segment.iter())
572 .map(|(&k, &x)| k * x)
573 .sum()
574 }
575 }
576
577 #[allow(dead_code)]
579 pub fn with_options(
580 kernel: Vec<F>,
581 input_size: usize,
582 mode: ConvolutionMode,
583 #[allow(dead_code)] options: EnhancedOperatorOptions,
584 ) -> Self {
585 let output_size = match mode {
586 ConvolutionMode::Full => input_size + kernel.len() - 1,
587 ConvolutionMode::Same => input_size,
588 ConvolutionMode::Valid => {
589 if input_size >= kernel.len() {
590 input_size - kernel.len() + 1
591 } else {
592 0
593 }
594 }
595 };
596
597 Self {
598 kernel,
599 input_size,
600 output_size,
601 mode,
602 options,
603 }
604 }
605}
606
607impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
608 LinearOperator<F> for ConvolutionOperator<F>
609{
610 fn shape(&self) -> (usize, usize) {
611 (self.output_size, self.input_size)
612 }
613
614 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
615 if x.len() != self.input_size {
616 return Err(SparseError::DimensionMismatch {
617 expected: self.input_size,
618 found: x.len(),
619 });
620 }
621
622 let mut result = vec![F::sparse_zero(); self.output_size];
623
624 match self.mode {
626 ConvolutionMode::Full => {
627 for i in 0..self.output_size {
628 let mut sum = F::sparse_zero();
629 for (j, &kernel_val) in self.kernel.iter().enumerate() {
630 if i >= j && (i - j) < x.len() {
631 sum += kernel_val * x[i - j];
632 }
633 }
634 result[i] = sum;
635 }
636 }
637 ConvolutionMode::Same => {
638 let pad = self.kernel.len() / 2;
639 #[allow(clippy::needless_range_loop)]
640 for i in 0..self.output_size {
641 let mut sum = F::sparse_zero();
642 for (j, &kernel_val) in self.kernel.iter().enumerate() {
643 let idx = i + j;
644 if idx >= pad && (idx - pad) < x.len() {
645 sum += kernel_val * x[idx - pad];
646 }
647 }
648 result[i] = sum;
649 }
650 }
651 ConvolutionMode::Valid =>
652 {
653 #[allow(clippy::needless_range_loop)]
654 for i in 0..self.output_size {
655 let mut sum = F::sparse_zero();
656 for (j, &kernel_val) in self.kernel.iter().enumerate() {
657 let idx = i + j;
658 if idx < x.len() {
659 sum += kernel_val * x[idx];
660 }
661 }
662 result[i] = sum;
663 }
664 }
665 }
666
667 if self.options.use_parallel && self.output_size >= self.options.parallel_threshold {
669 self.optimize_convolution_parallel(&mut result, x)?;
670 } else if self.options.use_simd && self.kernel.len() >= self.options.simd_threshold {
671 self.optimize_convolution_simd(&mut result, x)?;
672 }
673
674 Ok(result)
675 }
676
677 fn has_adjoint(&self) -> bool {
678 true
679 }
680
681 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
682 if x.len() != self.output_size {
683 return Err(SparseError::DimensionMismatch {
684 expected: self.output_size,
685 found: x.len(),
686 });
687 }
688
689 let mut result = vec![F::sparse_zero(); self.input_size];
691 let flipped_kernel: Vec<F> = self.kernel.iter().rev().copied().collect();
692
693 match self.mode {
695 ConvolutionMode::Full =>
696 {
697 #[allow(clippy::needless_range_loop)]
698 for i in 0..self.input_size {
699 let mut sum = F::sparse_zero();
700 for (j, &kernel_val) in flipped_kernel.iter().enumerate() {
701 let idx = i + j;
702 if idx < x.len() {
703 sum += kernel_val * x[idx];
704 }
705 }
706 result[i] = sum;
707 }
708 }
709 ConvolutionMode::Same => {
710 let pad = flipped_kernel.len() / 2;
711 for i in 0..self.input_size {
712 let mut sum = F::sparse_zero();
713 for (j, &kernel_val) in flipped_kernel.iter().enumerate() {
714 if i + pad >= j && (i + pad - j) < x.len() {
715 sum += kernel_val * x[i + pad - j];
716 }
717 }
718 result[i] = sum;
719 }
720 }
721 ConvolutionMode::Valid => {
722 for i in 0..self.input_size {
723 let mut sum = F::sparse_zero();
724 for (j, &kernel_val) in flipped_kernel.iter().enumerate() {
725 if i >= j && (i - j) < x.len() {
726 sum += kernel_val * x[i - j];
727 }
728 }
729 result[i] = sum;
730 }
731 }
732 }
733
734 Ok(result)
735 }
736}
737
738pub struct FiniteDifferenceOperator<F> {
740 size: usize,
741 order: usize,
742 spacing: F,
743 boundary: BoundaryCondition,
744 #[allow(dead_code)]
745 options: EnhancedOperatorOptions,
746}
747
748#[derive(Debug, Clone, Copy)]
749pub enum BoundaryCondition {
750 Neumann,
752 Dirichlet,
754 Periodic,
756}
757
758impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
759 FiniteDifferenceOperator<F>
760{
761 #[allow(dead_code)]
763 pub fn new(size: usize, order: usize, spacing: F, boundary: BoundaryCondition) -> Self {
764 Self {
765 size,
766 order,
767 spacing,
768 boundary,
769 options: EnhancedOperatorOptions::default(),
770 }
771 }
772
773 #[allow(dead_code)]
775 pub fn with_options(
776 size: usize,
777 order: usize,
778 spacing: F,
779 boundary: BoundaryCondition,
780 #[allow(dead_code)] options: EnhancedOperatorOptions,
781 ) -> Self {
782 Self {
783 size,
784 order,
785 spacing,
786 boundary,
787 options,
788 }
789 }
790
791 fn get_coefficients(&self) -> Vec<F> {
793 match self.order {
794 1 => {
795 vec![
797 -F::sparse_one() / (F::from(2.0).unwrap() * self.spacing),
798 F::sparse_zero(),
799 F::sparse_one() / (F::from(2.0).unwrap() * self.spacing),
800 ]
801 }
802 2 => {
803 let h_sq = self.spacing * self.spacing;
805 vec![
806 F::sparse_one() / h_sq,
807 -F::from(2.0).unwrap() / h_sq,
808 F::sparse_one() / h_sq,
809 ]
810 }
811 _ => {
812 vec![F::sparse_zero(); 2 * self.order + 1]
815 }
816 }
817 }
818}
819
820impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
821 LinearOperator<F> for FiniteDifferenceOperator<F>
822{
823 fn shape(&self) -> (usize, usize) {
824 (self.size, self.size)
825 }
826
827 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
828 if x.len() != self.size {
829 return Err(SparseError::DimensionMismatch {
830 expected: self.size,
831 found: x.len(),
832 });
833 }
834
835 let mut result = vec![F::sparse_zero(); self.size];
836 let coeffs = self.get_coefficients();
837 let stencil_radius = coeffs.len() / 2;
838
839 #[allow(clippy::needless_range_loop)]
840 for i in 0..self.size {
841 let mut sum = F::sparse_zero();
842 for (j, &coeff) in coeffs.iter().enumerate() {
843 let idx = i as isize + j as isize - stencil_radius as isize;
844
845 let value = match self.boundary {
846 BoundaryCondition::Neumann => {
847 if idx < 0 {
848 x[0]
849 } else if idx >= self.size as isize {
850 x[self.size - 1]
851 } else {
852 x[idx as usize]
853 }
854 }
855 BoundaryCondition::Dirichlet => {
856 if idx < 0 || idx >= self.size as isize {
857 F::sparse_zero()
858 } else {
859 x[idx as usize]
860 }
861 }
862 BoundaryCondition::Periodic => {
863 let periodic_idx = ((idx % self.size as isize + self.size as isize)
864 % self.size as isize)
865 as usize;
866 x[periodic_idx]
867 }
868 };
869
870 sum += coeff * value;
871 }
872 result[i] = sum;
873 }
874
875 Ok(result)
876 }
877
878 fn has_adjoint(&self) -> bool {
879 true
880 }
881
882 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
883 self.matvec(x)
886 }
887}
888
889#[allow(dead_code)]
892pub fn enhanced_diagonal<
893 F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
894>(
895 diagonal: Vec<F>,
896) -> Box<dyn LinearOperator<F>> {
897 Box::new(EnhancedDiagonalOperator::new(diagonal))
898}
899
900#[allow(dead_code)]
902pub fn enhanced_add<
903 F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
904>(
905 left: Box<dyn LinearOperator<F>>,
906 right: Box<dyn LinearOperator<F>>,
907) -> SparseResult<Box<dyn LinearOperator<F>>> {
908 Ok(Box::new(EnhancedSumOperator::new(left, right)?))
909}
910
911#[allow(dead_code)]
913pub fn enhanced_subtract<
914 F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
915>(
916 left: Box<dyn LinearOperator<F>>,
917 right: Box<dyn LinearOperator<F>>,
918) -> SparseResult<Box<dyn LinearOperator<F>>> {
919 Ok(Box::new(EnhancedDifferenceOperator::new(left, right)?))
920}
921
922#[allow(dead_code)]
924pub fn enhanced_scale<
925 F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
926>(
927 alpha: F,
928 operator: Box<dyn LinearOperator<F>>,
929) -> Box<dyn LinearOperator<F>> {
930 Box::new(EnhancedScaledOperator::new(alpha, operator))
931}
932
933#[allow(dead_code)]
935pub fn convolution_operator<
936 F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
937>(
938 kernel: Vec<F>,
939 input_size: usize,
940 mode: ConvolutionMode,
941) -> Box<dyn LinearOperator<F>> {
942 Box::new(ConvolutionOperator::new(kernel, input_size, mode))
943}
944
945#[allow(dead_code)]
947pub fn finite_difference_operator<
948 F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
949>(
950 size: usize,
951 order: usize,
952 spacing: F,
953 boundary: BoundaryCondition,
954) -> Box<dyn LinearOperator<F>> {
955 Box::new(FiniteDifferenceOperator::new(
956 size, order, spacing, boundary,
957 ))
958}
959
960pub struct EnhancedCompositionOperator<F> {
962 left: Box<dyn LinearOperator<F>>, right: Box<dyn LinearOperator<F>>, #[allow(dead_code)]
965 options: EnhancedOperatorOptions,
966}
967
968impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
969 EnhancedCompositionOperator<F>
970{
971 #[allow(dead_code)]
974 pub fn new(
975 left: Box<dyn LinearOperator<F>>,
976 right: Box<dyn LinearOperator<F>>,
977 ) -> SparseResult<Self> {
978 let (left_rows, left_cols) = left.shape();
979 let (right_rows, right_cols) = right.shape();
980
981 if left_cols != right_rows {
982 return Err(SparseError::ShapeMismatch {
983 expected: (left_rows, right_rows),
984 found: (left_rows, left_cols),
985 });
986 }
987
988 Ok(Self {
989 left,
990 right,
991 options: EnhancedOperatorOptions::default(),
992 })
993 }
994
995 #[allow(dead_code)]
997 pub fn with_options(
998 left: Box<dyn LinearOperator<F>>,
999 right: Box<dyn LinearOperator<F>>,
1000 options: EnhancedOperatorOptions,
1001 ) -> SparseResult<Self> {
1002 let (left_rows, left_cols) = left.shape();
1003 let (right_rows, right_cols) = right.shape();
1004
1005 if left_cols != right_rows {
1006 return Err(SparseError::ShapeMismatch {
1007 expected: (left_rows, right_rows),
1008 found: (left_rows, left_cols),
1009 });
1010 }
1011
1012 Ok(Self {
1013 left,
1014 right,
1015 options,
1016 })
1017 }
1018}
1019
1020impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
1021 LinearOperator<F> for EnhancedCompositionOperator<F>
1022{
1023 fn shape(&self) -> (usize, usize) {
1024 let (left_rows, _) = self.left.shape();
1025 let (_, right_cols) = self.right.shape();
1026 (left_rows, right_cols)
1027 }
1028
1029 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1030 let intermediate = self.right.matvec(x)?;
1032 self.left.matvec(&intermediate)
1033 }
1034
1035 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1036 if !self.left.has_adjoint() || !self.right.has_adjoint() {
1037 return Err(SparseError::OperationNotSupported(
1038 "adjoint not supported for one or both operators".to_string(),
1039 ));
1040 }
1041
1042 let intermediate = self.left.rmatvec(x)?;
1044 self.right.rmatvec(&intermediate)
1045 }
1046
1047 fn has_adjoint(&self) -> bool {
1048 self.left.has_adjoint() && self.right.has_adjoint()
1049 }
1050}
1051
1052pub struct ElementwiseFunctionOperator<F> {
1054 function: Box<dyn Fn(F) -> F + Send + Sync>,
1055 size: usize,
1056 #[allow(dead_code)]
1057 options: EnhancedOperatorOptions,
1058}
1059
1060impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
1061 ElementwiseFunctionOperator<F>
1062{
1063 pub fn new<Func>(function: Func, size: usize) -> Self
1065 where
1066 Func: Fn(F) -> F + Send + Sync + 'static,
1067 {
1068 Self {
1069 function: Box::new(function),
1070 size,
1071 options: EnhancedOperatorOptions::default(),
1072 }
1073 }
1074
1075 #[allow(dead_code)]
1077 pub fn with_options<Func>(function: Func, size: usize, options: EnhancedOperatorOptions) -> Self
1078 where
1079 Func: Fn(F) -> F + Send + Sync + 'static,
1080 {
1081 Self {
1082 function: Box::new(function),
1083 size,
1084 options,
1085 }
1086 }
1087}
1088
1089impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
1090 LinearOperator<F> for ElementwiseFunctionOperator<F>
1091{
1092 fn shape(&self) -> (usize, usize) {
1093 (self.size, self.size)
1094 }
1095
1096 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1097 if x.len() != self.size {
1098 return Err(SparseError::DimensionMismatch {
1099 expected: self.size,
1100 found: x.len(),
1101 });
1102 }
1103
1104 if self.options.use_parallel && x.len() >= self.options.parallel_threshold {
1105 use scirs2_core::parallel_ops::*;
1107 let result = parallel_map(x, |&val| (self.function)(val));
1108 Ok(result)
1109 } else {
1110 let result: Vec<F> = x.iter().map(|&val| (self.function)(val)).collect();
1112 Ok(result)
1113 }
1114 }
1115
1116 fn has_adjoint(&self) -> bool {
1117 false }
1119
1120 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1121 Err(SparseError::OperationNotSupported(
1122 "adjoint not supported for general element-wise functions".to_string(),
1123 ))
1124 }
1125}
1126
1127pub struct KroneckerProductOperator<F> {
1129 left: Box<dyn LinearOperator<F>>,
1130 right: Box<dyn LinearOperator<F>>,
1131 #[allow(dead_code)]
1132 options: EnhancedOperatorOptions,
1133}
1134
1135impl<F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps> KroneckerProductOperator<F> {
1136 #[allow(dead_code)]
1138 pub fn new(left: Box<dyn LinearOperator<F>>, right: Box<dyn LinearOperator<F>>) -> Self {
1139 Self {
1140 left,
1141 right,
1142 options: EnhancedOperatorOptions::default(),
1143 }
1144 }
1145
1146 #[allow(dead_code)]
1148 pub fn with_options(
1149 left: Box<dyn LinearOperator<F>>,
1150 right: Box<dyn LinearOperator<F>>,
1151 options: EnhancedOperatorOptions,
1152 ) -> Self {
1153 Self {
1154 left,
1155 right,
1156 options,
1157 }
1158 }
1159}
1160
1161impl<F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps>
1162 LinearOperator<F> for KroneckerProductOperator<F>
1163{
1164 fn shape(&self) -> (usize, usize) {
1165 let (left_rows, left_cols) = self.left.shape();
1166 let (right_rows, right_cols) = self.right.shape();
1167 (left_rows * right_rows, left_cols * right_cols)
1168 }
1169
1170 fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1171 let (left_rows, left_cols) = self.left.shape();
1172 let (right_rows, right_cols) = self.right.shape();
1173
1174 if x.len() != left_cols * right_cols {
1175 return Err(SparseError::DimensionMismatch {
1176 expected: left_cols * right_cols,
1177 found: x.len(),
1178 });
1179 }
1180
1181 let mut result = vec![F::sparse_zero(); left_rows * right_rows];
1184
1185 for j in 0..left_cols {
1186 let mut col: Vec<F> = Vec::with_capacity(right_cols);
1188 for i in 0..right_cols {
1189 col.push(x[i * left_cols + j]);
1190 }
1191
1192 let right_result = self.right.matvec(&col)?;
1194
1195 for i in 0..right_rows {
1197 result[i * left_cols + j] = right_result[i];
1198 }
1199 }
1200
1201 let mut final_result = vec![F::sparse_zero(); left_rows * right_rows];
1203 for i in 0..right_rows {
1204 let mut row: Vec<F> = Vec::with_capacity(left_cols);
1206 for j in 0..left_cols {
1207 row.push(result[i * left_cols + j]);
1208 }
1209
1210 let left_result = self.left.matvec(&row)?;
1212
1213 for k in 0..left_rows {
1215 final_result[k * right_rows + i] = left_result[k];
1216 }
1217 }
1218
1219 Ok(final_result)
1220 }
1221
1222 fn has_adjoint(&self) -> bool {
1223 self.left.has_adjoint() && self.right.has_adjoint()
1224 }
1225
1226 fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
1227 if !self.has_adjoint() {
1228 return Err(SparseError::OperationNotSupported(
1229 "adjoint not supported for one or both operators".to_string(),
1230 ));
1231 }
1232
1233 Err(SparseError::OperationNotSupported(
1237 "Kronecker product adjoint requires operator cloning which is not currently supported"
1238 .to_string(),
1239 ))
1240 }
1241}
1242
1243#[allow(dead_code)]
1245pub fn enhanced_compose<
1246 F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
1247>(
1248 left: Box<dyn LinearOperator<F>>,
1249 right: Box<dyn LinearOperator<F>>,
1250) -> SparseResult<Box<dyn LinearOperator<F>>> {
1251 Ok(Box::new(EnhancedCompositionOperator::new(left, right)?))
1252}
1253
1254#[allow(dead_code)]
1256pub fn elementwisefunction<
1257 F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
1258 Func: Fn(F) -> F + Send + Sync + 'static,
1259>(
1260 function: Func,
1261 size: usize,
1262) -> Box<dyn LinearOperator<F>> {
1263 Box::new(ElementwiseFunctionOperator::new(function, size))
1264}
1265
1266#[allow(dead_code)]
1268pub fn kronecker_product<
1269 F: Float + SparseElement + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
1270>(
1271 left: Box<dyn LinearOperator<F>>,
1272 right: Box<dyn LinearOperator<F>>,
1273) -> Box<dyn LinearOperator<F>> {
1274 Box::new(KroneckerProductOperator::new(left, right))
1275}
1276
1277#[allow(dead_code)]
1279pub fn adjoint_operator<
1280 F: Float + NumAssign + Sum + Copy + Send + Sync + SimdUnifiedOps + 'static,
1281>(
1282 operator: Box<dyn LinearOperator<F>>,
1283) -> SparseResult<Box<dyn LinearOperator<F>>> {
1284 Ok(Box::new(AdjointOperator::new(operator)?))
1285}
1286
1287#[cfg(test)]
1288mod tests {
1289 use super::*;
1290 use approx::assert_relative_eq;
1291
1292 #[test]
1293 fn test_enhanced_diagonal_operator() {
1294 let diag = vec![2.0, 3.0, 4.0];
1295 let op = EnhancedDiagonalOperator::new(diag);
1296 let x = vec![1.0, 2.0, 3.0];
1297 let y = op.matvec(&x).unwrap();
1298 assert_eq!(y, vec![2.0, 6.0, 12.0]);
1299 }
1300
1301 #[test]
1302 fn test_enhanced_sum_operator() {
1303 let diag1 = enhanced_diagonal(vec![1.0, 2.0, 3.0]);
1304 let diag2 = enhanced_diagonal(vec![2.0, 1.0, 1.0]);
1305 let sum_op = EnhancedSumOperator::new(diag1, diag2).unwrap();
1306
1307 let x = vec![1.0, 1.0, 1.0];
1308 let y = sum_op.matvec(&x).unwrap();
1309 assert_eq!(y, vec![3.0, 3.0, 4.0]); }
1311
1312 #[test]
1313 fn test_enhanced_difference_operator() {
1314 let diag1 = enhanced_diagonal(vec![3.0, 4.0, 5.0]);
1315 let diag2 = enhanced_diagonal(vec![1.0, 2.0, 1.0]);
1316 let diff_op = EnhancedDifferenceOperator::new(diag1, diag2).unwrap();
1317
1318 let x = vec![1.0, 1.0, 1.0];
1319 let y = diff_op.matvec(&x).unwrap();
1320 assert_eq!(y, vec![2.0, 2.0, 4.0]); }
1322
1323 #[test]
1324 fn test_enhanced_scaled_operator() {
1325 let diag = enhanced_diagonal(vec![1.0, 2.0, 3.0]);
1326 let scaled_op = EnhancedScaledOperator::new(2.0, diag);
1327
1328 let x = vec![1.0, 1.0, 1.0];
1329 let y = scaled_op.matvec(&x).unwrap();
1330 assert_eq!(y, vec![2.0, 4.0, 6.0]); }
1332
1333 #[test]
1334 fn test_convolution_operator_full() {
1335 let kernel = vec![1.0, 2.0, 3.0];
1336 let conv_op = ConvolutionOperator::new(kernel, 3, ConvolutionMode::Full);
1337
1338 let x = vec![1.0, 0.0, 0.0];
1339 let y = conv_op.matvec(&x).unwrap();
1340 assert_eq!(y, vec![1.0, 2.0, 3.0, 0.0, 0.0]); }
1342
1343 #[test]
1344 fn test_convolution_operator_same() {
1345 let kernel = vec![0.0, 1.0, 0.0]; let conv_op = ConvolutionOperator::new(kernel, 3, ConvolutionMode::Same);
1347
1348 let x = vec![1.0, 2.0, 3.0];
1349 let y = conv_op.matvec(&x).unwrap();
1350 assert_relative_eq!(y[0], 1.0, epsilon = 1e-10);
1351 assert_relative_eq!(y[1], 2.0, epsilon = 1e-10);
1352 assert_relative_eq!(y[2], 3.0, epsilon = 1e-10);
1353 }
1354
1355 #[test]
1356 fn test_finite_difference_first_derivative() {
1357 let fd_op = FiniteDifferenceOperator::new(5, 1, 1.0, BoundaryCondition::Dirichlet);
1358
1359 let x = vec![0.0, 1.0, 2.0, 3.0, 4.0];
1361 let y = fd_op.matvec(&x).unwrap();
1362
1363 #[allow(clippy::needless_range_loop)]
1365 for i in 1..y.len() - 1 {
1366 assert_relative_eq!(y[i], 1.0, epsilon = 0.1);
1367 }
1368 }
1369
1370 #[test]
1371 fn test_finite_difference_second_derivative() {
1372 let fd_op = FiniteDifferenceOperator::new(5, 2, 1.0, BoundaryCondition::Dirichlet);
1373
1374 let x = vec![0.0, 1.0, 4.0, 9.0, 16.0];
1376 let y = fd_op.matvec(&x).unwrap();
1377
1378 #[allow(clippy::needless_range_loop)]
1380 for i in 1..y.len() - 1 {
1381 assert_relative_eq!(y[i], 2.0, epsilon = 0.5);
1382 }
1383 }
1384
1385 #[test]
1386 #[ignore = "timeout"]
1387 fn test_enhanced_operators_with_large_vectors() {
1388 let large_size = 15000; let diag1: Vec<f64> = (0..large_size).map(|i| (i + 1) as f64).collect();
1391 let diag2: Vec<f64> = vec![2.0; large_size];
1392
1393 let op1 = enhanced_diagonal(diag1);
1394 let op2 = enhanced_diagonal(diag2);
1395 let sum_op = EnhancedSumOperator::new(op1, op2).unwrap();
1396
1397 let x = vec![1.0; large_size];
1398 let y = sum_op.matvec(&x).unwrap();
1399
1400 assert_relative_eq!(y[0], 3.0, epsilon = 1e-10); assert_relative_eq!(y[1], 4.0, epsilon = 1e-10); assert_relative_eq!(y[999], 1002.0, epsilon = 1e-10); }
1405}