1use crate::error::{CoreError, CoreResult};
36use crate::types::{NumericConversion, NumericConversionError};
37use num_complex::Complex;
38use num_traits::{Bounded, Float, NumCast, Zero};
39use std::fmt;
40#[cfg(feature = "simd")]
41use wide::{f32x4, f64x2, i32x4};
42
43#[cfg(feature = "parallel")]
44use crate::parallel_ops::*;
45
46#[derive(Debug, Clone)]
48pub struct BatchConversionConfig {
49 pub use_simd: bool,
51 pub use_parallel: bool,
53 pub parallel_chunk_size: usize,
55 pub simd_vector_size: Option<usize>,
57 pub parallel_threshold: usize,
59}
60
61impl Default for BatchConversionConfig {
62 fn default() -> Self {
63 Self {
64 use_simd: cfg!(feature = "simd"),
65 use_parallel: cfg!(feature = "parallel"),
66 parallel_chunk_size: 1024,
67 simd_vector_size: None,
68 parallel_threshold: 10000,
69 }
70 }
71}
72
73impl BatchConversionConfig {
74 pub fn with_simd(mut self, enable: bool) -> Self {
76 self.use_simd = enable;
77 self
78 }
79
80 pub fn with_parallel(mut self, enable: bool) -> Self {
82 self.use_parallel = enable;
83 self
84 }
85
86 pub fn with_chunk_size(mut self, chunksize: usize) -> Self {
88 self.parallel_chunk_size = chunksize;
89 self
90 }
91
92 pub fn with_parallel_threshold(mut self, threshold: usize) -> Self {
94 self.parallel_threshold = threshold;
95 self
96 }
97}
98
99#[derive(Debug, Clone)]
101pub struct ElementConversionError {
102 pub index: usize,
104 pub error: NumericConversionError,
106}
107
108#[derive(Debug, Clone)]
110pub struct BatchConversionResult<T> {
111 pub converted: Vec<(usize, T)>,
113 pub errors: Vec<ElementConversionError>,
115}
116
117pub struct BatchConverter {
119 config: BatchConversionConfig,
120}
121
122impl BatchConverter {
123 pub fn new(config: BatchConversionConfig) -> Self {
125 Self { config }
126 }
127
128 pub fn with_default_config() -> Self {
130 Self::new(BatchConversionConfig::default())
131 }
132
133 pub fn convert_slice_witherrors<S, T>(
135 &self,
136 slice: &[S],
137 ) -> (Vec<T>, Vec<ElementConversionError>)
138 where
139 S: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
140 T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Copy + 'static,
141 {
142 if slice.is_empty() {
143 return (Vec::new(), Vec::new());
144 }
145
146 if self.config.use_parallel && slice.len() >= self.config.parallel_threshold {
148 self.convert_slice_parallel_witherrors(slice)
149 } else if self.config.use_simd {
150 self.convert_slice_simd_witherrors(slice)
151 } else {
152 self.convert_slice_sequential_witherrors(slice)
153 }
154 }
155
156 pub fn convert_slice<S, T>(&self, slice: &[S]) -> CoreResult<Vec<T>>
158 where
159 S: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
160 T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Copy + 'static,
161 {
162 let (converted, errors) = self.convert_slice_witherrors(slice);
163
164 if !errors.is_empty() {
165 return Err(CoreError::InvalidArgument(crate::error::ErrorContext::new(
166 {
167 let numerrors = errors.len();
168 format!("Batch conversion failed for {numerrors} elements")
169 },
170 )));
171 }
172
173 Ok(converted)
174 }
175
176 pub fn convert_slice_clamped<S, T>(&self, slice: &[S]) -> Vec<T>
178 where
179 S: Copy + NumericConversion + Send + Sync,
180 T: Bounded + NumCast + PartialOrd + Zero + Send + Sync,
181 {
182 if slice.is_empty() {
183 return Vec::new();
184 }
185
186 if self.config.use_parallel && slice.len() >= self.config.parallel_threshold {
187 self.convert_slice_parallel_clamped(slice)
188 } else {
189 slice.iter().map(|&x| x.to_numeric_clamped()).collect()
190 }
191 }
192
193 fn convert_slice_sequential_witherrors<S, T>(
195 &self,
196 slice: &[S],
197 ) -> (Vec<T>, Vec<ElementConversionError>)
198 where
199 S: Copy + NumCast + PartialOrd + fmt::Display + 'static,
200 T: Bounded + NumCast + PartialOrd + fmt::Display + Copy + 'static,
201 {
202 let mut converted = Vec::new();
203 let mut errors = Vec::new();
204
205 for (index, &value) in slice.iter().enumerate() {
206 match value.to_numeric() {
207 Ok(result) => converted.push(result),
208 Err(error) => errors.push(ElementConversionError { index, error }),
209 }
210 }
211
212 (converted, errors)
213 }
214
215 #[cfg(feature = "simd")]
217 fn convert_slice_simd_witherrors<S, T>(
218 &self,
219 slice: &[S],
220 ) -> (Vec<T>, Vec<ElementConversionError>)
221 where
222 S: Copy + NumCast + PartialOrd + fmt::Display + 'static,
223 T: Bounded + NumCast + PartialOrd + fmt::Display + Copy + 'static,
224 {
225 if self.can_use_simd_for_conversion::<S, T>() {
227 self.convert_slice_simd_optimized(slice)
228 } else {
229 self.convert_slice_sequential_witherrors(slice)
230 }
231 }
232
233 #[cfg(not(feature = "simd"))]
234 fn convert_slice_simd_witherrors<S, T>(
235 &self,
236 slice: &[S],
237 ) -> (Vec<T>, Vec<ElementConversionError>)
238 where
239 S: Copy + NumCast + PartialOrd + fmt::Display + 'static,
240 T: Bounded + NumCast + PartialOrd + fmt::Display + Copy + 'static,
241 {
242 self.convert_slice_sequential_witherrors(slice)
243 }
244
245 #[cfg(feature = "parallel")]
247 fn convert_slice_parallel_witherrors<S, T>(
248 &self,
249 slice: &[S],
250 ) -> (Vec<T>, Vec<ElementConversionError>)
251 where
252 S: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
253 T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Copy + 'static,
254 {
255 let chunk_size = self.config.parallel_chunk_size;
256 let chunks: Vec<_> = slice.chunks(chunk_size).enumerate().collect();
257
258 let results: Vec<_> = chunks
259 .into_par_iter()
260 .map(|(chunk_idx, chunk)| {
261 let base_index = chunk_idx * chunk_size;
262 let mut converted: Vec<T> = Vec::new();
263 let mut errors = Vec::new();
264
265 for (idx, &value) in chunk.iter().enumerate() {
266 let global_index = base_index + idx;
267 match value.to_numeric() {
268 Ok(result) => converted.push(result),
269 Err(error) => errors.push(ElementConversionError {
270 index: global_index,
271 error,
272 }),
273 }
274 }
275
276 (converted, errors)
277 })
278 .collect();
279
280 let mut all_converted = Vec::new();
282 let mut allerrors = Vec::new();
283
284 for (converted, errors) in results {
285 all_converted.extend(converted);
286 allerrors.extend(errors);
287 }
288
289 (all_converted, allerrors)
290 }
291
292 #[cfg(not(feature = "parallel"))]
293 fn convert_slice_parallel_witherrors<S, T>(
294 &self,
295 slice: &[S],
296 ) -> (Vec<T>, Vec<ElementConversionError>)
297 where
298 S: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
299 T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Copy + 'static,
300 {
301 self.convert_slice_sequential_witherrors(slice)
302 }
303
304 #[cfg(feature = "parallel")]
306 fn convert_slice_parallel_clamped<S, T>(&self, slice: &[S]) -> Vec<T>
307 where
308 S: Copy + NumericConversion + Send + Sync,
309 T: Bounded + NumCast + PartialOrd + Zero + Send + Sync,
310 {
311 slice
312 .par_chunks(self.config.parallel_chunk_size)
313 .flat_map(|chunk| {
314 chunk
315 .iter()
316 .map(|&x| x.to_numeric_clamped())
317 .collect::<Vec<_>>()
318 })
319 .collect()
320 }
321
322 #[cfg(not(feature = "parallel"))]
323 fn convert_slice_parallel_clamped<S, T>(&self, slice: &[S]) -> Vec<T>
324 where
325 S: Copy + NumericConversion + Send + Sync,
326 T: Bounded + NumCast + PartialOrd + Zero + Send + Sync,
327 {
328 slice.iter().map(|&x| x.to_numeric_clamped()).collect()
329 }
330
331 #[allow(dead_code)]
333 #[cfg(feature = "simd")]
334 fn can_use_simd_for_conversion<S: 'static, T: 'static>(&self) -> bool {
335 use std::any::TypeId;
336
337 let src_type = TypeId::of::<S>();
339 let dst_type = TypeId::of::<T>();
340
341 if src_type == TypeId::of::<f64>() && dst_type == TypeId::of::<f32>() {
343 return true;
344 }
345
346 if src_type == TypeId::of::<f32>() && dst_type == TypeId::of::<f64>() {
348 return true;
349 }
350
351 if src_type == TypeId::of::<i32>() && dst_type == TypeId::of::<f32>() {
353 return true;
354 }
355
356 if src_type == TypeId::of::<i64>() && dst_type == TypeId::of::<f64>() {
358 return true;
359 }
360
361 false
362 }
363
364 #[allow(dead_code)]
365 #[cfg(not(feature = "simd"))]
366 fn can_use_simd_for_conversion<S: 'static, T: 'static>(&self) -> bool {
367 false
368 }
369
370 #[cfg(feature = "simd")]
372 fn convert_slice_simd_optimized<S, T>(
373 &self,
374 slice: &[S],
375 ) -> (Vec<T>, Vec<ElementConversionError>)
376 where
377 S: Copy + NumCast + PartialOrd + fmt::Display + 'static,
378 T: Bounded + NumCast + PartialOrd + fmt::Display + Copy + 'static,
379 {
380 use std::any::TypeId;
381
382 let src_type = TypeId::of::<S>();
383 let dst_type = TypeId::of::<T>();
384
385 if src_type == TypeId::of::<f64>() && dst_type == TypeId::of::<f32>() {
387 if let Some(f64_slice) = slice
388 .iter()
389 .map(|x| x.to_numeric::<f64>().ok())
390 .collect::<Option<Vec<_>>>()
391 {
392 let (converted, errors) = self.convert_f64_to_f32_simd_typed(&f64_slice);
393 let typed_results: Vec<T> =
395 converted.into_iter().filter_map(|f| T::from(f)).collect();
396 return (typed_results, errors);
397 }
398 }
399
400 if src_type == TypeId::of::<f32>() && dst_type == TypeId::of::<f64>() {
402 if let Some(f32_slice) = slice
403 .iter()
404 .map(|x| x.to_numeric::<f32>().ok())
405 .collect::<Option<Vec<_>>>()
406 {
407 let (converted, errors) = self.convert_f32_to_f64_simd_typed(&f32_slice);
408 let typed_results: Vec<T> =
410 converted.into_iter().filter_map(|f| T::from(f)).collect();
411 return (typed_results, errors);
412 }
413 }
414
415 if src_type == TypeId::of::<i32>() && dst_type == TypeId::of::<f32>() {
417 if let Some(i32_slice) = slice
418 .iter()
419 .map(|x| x.to_numeric::<i32>().ok())
420 .collect::<Option<Vec<_>>>()
421 {
422 let (converted, errors) = self.convert_i32_to_f32_simd_typed(&i32_slice);
423 let typed_results: Vec<T> =
425 converted.into_iter().filter_map(|f| T::from(f)).collect();
426 return (typed_results, errors);
427 }
428 }
429
430 self.convert_slice_sequential_witherrors(slice)
432 }
433
434 #[cfg(feature = "simd")]
436 fn convert_f64_to_f32_simd_typed(
437 &self,
438 slice: &[f64],
439 ) -> (Vec<f32>, Vec<ElementConversionError>) {
440 let mut converted = Vec::with_capacity(slice.len());
441 let mut errors = Vec::new();
442
443 let chunks = slice.chunks_exact(2);
445 let remainder = chunks.remainder();
446
447 for (chunk_idx, chunk) in chunks.enumerate() {
448 let vec = f64x2::new([chunk[0], chunk[1]]);
449
450 for (i, &val) in chunk.iter().enumerate() {
452 let index = chunk_idx * 2 + i;
453 if val.is_nan() || val.is_infinite() {
454 errors.push(ElementConversionError {
455 index,
456 error: NumericConversionError::NanOrInfinite,
457 });
458 } else {
459 let f32_val = val as f32;
460 if f32_val.is_infinite() && !val.is_infinite() {
461 errors.push(ElementConversionError {
462 index,
463 error: NumericConversionError::Overflow {
464 value: val.to_string(),
465 max: f32::MAX.to_string(),
466 },
467 });
468 } else {
469 converted.push(f32_val);
470 }
471 }
472 }
473 }
474
475 for (i, &val) in remainder.iter().enumerate() {
477 let index = slice.len() - remainder.len() + i;
478 if val.is_nan() || val.is_infinite() {
479 errors.push(ElementConversionError {
480 index,
481 error: NumericConversionError::NanOrInfinite,
482 });
483 } else {
484 let f32_val = val as f32;
485 if f32_val.is_infinite() && !val.is_infinite() {
486 errors.push(ElementConversionError {
487 index,
488 error: NumericConversionError::Overflow {
489 value: val.to_string(),
490 max: f32::MAX.to_string(),
491 },
492 });
493 } else {
494 converted.push(f32_val);
495 }
496 }
497 }
498
499 (converted, errors)
500 }
501
502 #[cfg(feature = "simd")]
504 fn convert_f32_to_f64_simd_typed(
505 &self,
506 slice: &[f32],
507 ) -> (Vec<f64>, Vec<ElementConversionError>) {
508 let mut converted = Vec::with_capacity(slice.len());
509 let mut errors = Vec::new();
510
511 let chunks = slice.chunks_exact(4);
513 let remainder = chunks.remainder();
514
515 for (chunk_idx, chunk) in chunks.enumerate() {
516 let vec = f32x4::new([chunk[0], chunk[1], chunk[2], chunk[3]]);
517
518 for (i, &val) in chunk.iter().enumerate() {
519 let index = chunk_idx * 4 + i;
520 if val.is_nan() || val.is_infinite() {
521 errors.push(ElementConversionError {
522 index,
523 error: NumericConversionError::NanOrInfinite,
524 });
525 } else {
526 converted.push(val as f64);
527 }
528 }
529 }
530
531 for (i, &val) in remainder.iter().enumerate() {
533 let index = slice.len() - remainder.len() + i;
534 if val.is_nan() || val.is_infinite() {
535 errors.push(ElementConversionError {
536 index,
537 error: NumericConversionError::NanOrInfinite,
538 });
539 } else {
540 converted.push(val as f64);
541 }
542 }
543
544 (converted, errors)
545 }
546
547 #[cfg(feature = "simd")]
549 fn convert_i32_to_f32_simd_typed(
550 &self,
551 slice: &[i32],
552 ) -> (Vec<f32>, Vec<ElementConversionError>) {
553 let mut converted = Vec::with_capacity(slice.len());
554 let errors = Vec::new(); let chunks = slice.chunks_exact(4);
558 let remainder = chunks.remainder();
559
560 for chunk in chunks {
561 let vec = i32x4::new([chunk[0], chunk[1], chunk[2], chunk[3]]);
562
563 for &val in chunk {
564 converted.push(val as f32);
565 }
566 }
567
568 for &val in remainder {
570 converted.push(val as f32);
571 }
572
573 (converted, errors)
574 }
575
576 pub fn convert_complex_slice<S, T>(&self, slice: &[Complex<S>]) -> CoreResult<Vec<Complex<T>>>
578 where
579 S: Float + fmt::Display + Send + Sync,
580 T: Float + Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync,
581 {
582 if slice.is_empty() {
583 return Ok(Vec::new());
584 }
585
586 let mut result = Vec::with_capacity(slice.len());
587
588 if self.config.use_parallel && slice.len() >= self.config.parallel_threshold {
589 #[cfg(feature = "parallel")]
591 {
592 let chunks: Vec<_> = slice
593 .par_chunks(self.config.parallel_chunk_size)
594 .map(|chunk| {
595 chunk
596 .iter()
597 .map(|z| {
598 let real: T = z.re.to_numeric()?;
599 let imag: T = z.im.to_numeric()?;
600 Ok(Complex::new(real, imag))
601 })
602 .collect::<Result<Vec<_>, NumericConversionError>>()
603 })
604 .collect();
605
606 for chunk_result in chunks {
607 result.extend(chunk_result.map_err(|e| {
608 CoreError::InvalidArgument(crate::error::ErrorContext::new(e.to_string()))
609 })?);
610 }
611 }
612
613 #[cfg(not(feature = "parallel"))]
614 {
615 for z in slice {
616 let real: T = z.re.to_numeric().map_err(|e| {
617 CoreError::InvalidArgument(crate::error::ErrorContext::new(e.to_string()))
618 })?;
619 let imag: T = z.im.to_numeric().map_err(|e| {
620 CoreError::InvalidArgument(crate::error::ErrorContext::new(e.to_string()))
621 })?;
622 result.push(Complex::new(real, imag));
623 }
624 }
625 } else {
626 for z in slice {
628 let real: T = z.re.to_numeric().map_err(|e| {
629 CoreError::InvalidArgument(crate::error::ErrorContext::new(e.to_string()))
630 })?;
631 let imag: T = z.im.to_numeric().map_err(|e| {
632 CoreError::InvalidArgument(crate::error::ErrorContext::new(e.to_string()))
633 })?;
634 result.push(Complex::new(real, imag));
635 }
636 }
637
638 Ok(result)
639 }
640}
641
642#[cfg(feature = "array")]
644pub mod ndarray_integration {
645 use super::*;
646 use ::ndarray::{Array, ArrayBase, Data, Dimension};
647
648 impl BatchConverter {
649 pub fn convert_array<S, T, D>(
651 &self,
652 array: &ArrayBase<S, D>,
653 ) -> CoreResult<crate::ndarray::Array<T, D>>
654 where
655 S: Data,
656 S::Elem: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
657 T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Clone + Copy + 'static,
658 D: Dimension,
659 {
660 let slice = array.as_slice().ok_or_else(|| {
661 CoreError::InvalidArgument(crate::error::ErrorContext::new(
662 "Array is not contiguous".to_string(),
663 ))
664 })?;
665
666 let converted = self.convert_slice(slice)?;
667
668 let shape = array.raw_dim();
670 Array::from_shape_vec(shape, converted).map_err(|e| {
671 CoreError::InvalidArgument(crate::error::ErrorContext::new(format!(
672 "Failed to reshape converted array: {}",
673 e
674 )))
675 })
676 }
677
678 pub fn convert_array_clamped<S, T, D>(
680 &self,
681 array: &ArrayBase<S, D>,
682 ) -> CoreResult<crate::ndarray::Array<T, D>>
683 where
684 S: Data,
685 S::Elem: Copy + NumericConversion + Send + Sync,
686 T: Bounded + NumCast + PartialOrd + Zero + Send + Sync + Clone,
687 D: Dimension,
688 {
689 let slice = array.as_slice().ok_or_else(|| {
690 CoreError::InvalidArgument(crate::error::ErrorContext::new(
691 "Array is not contiguous".to_string(),
692 ))
693 })?;
694
695 let converted = self.convert_slice_clamped(slice);
696
697 let shape = array.raw_dim();
699 Array::from_shape_vec(shape, converted).map_err(|e| {
700 CoreError::InvalidArgument(crate::error::ErrorContext::new(format!(
701 "Failed to reshape converted array: {}",
702 e
703 )))
704 })
705 }
706 }
707}
708
709pub mod utils {
711 use super::*;
712
713 pub fn f64_to_f32_batch(slice: &[f64]) -> CoreResult<Vec<f32>> {
715 let converter = BatchConverter::with_default_config();
716 converter.convert_slice(slice)
717 }
718
719 pub fn f32_to_f64_batch(slice: &[f32]) -> CoreResult<Vec<f64>> {
721 let converter = BatchConverter::with_default_config();
722 converter.convert_slice(slice)
723 }
724
725 pub fn i32_to_f32_batch(slice: &[i32]) -> Vec<f32> {
727 let converter = BatchConverter::with_default_config();
728 converter.convert_slice_clamped(slice)
729 }
730
731 pub fn i64_to_f64_batch(slice: &[i64]) -> Vec<f64> {
733 let converter = BatchConverter::with_default_config();
734 converter.convert_slice_clamped(slice)
735 }
736
737 pub fn benchmark_conversion_methods<S, T>(
739 slice: &[S],
740 ) -> std::collections::HashMap<String, std::time::Duration>
741 where
742 S: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
743 T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Copy + 'static,
744 {
745 use std::time::Instant;
746 let mut results = std::collections::HashMap::new();
747
748 let start = Instant::now();
750 let config = BatchConversionConfig::default()
751 .with_simd(false)
752 .with_parallel(false);
753 let converter = BatchConverter::new(config);
754 let _ = converter.convert_slice::<S, T>(slice);
755 results.insert("sequential".to_string(), start.elapsed());
756
757 #[cfg(feature = "simd")]
759 {
760 let start = Instant::now();
761 let config = BatchConversionConfig::default()
762 .with_simd(true)
763 .with_parallel(false);
764 let converter = BatchConverter::new(config);
765 let _ = converter.convert_slice::<S, T>(slice);
766 results.insert("simd".to_string(), start.elapsed());
767 }
768
769 #[cfg(feature = "parallel")]
771 {
772 let start = Instant::now();
773 let config = BatchConversionConfig::default()
774 .with_simd(false)
775 .with_parallel(true);
776 let converter = BatchConverter::new(config);
777 let _ = converter.convert_slice::<S, T>(slice);
778 results.insert("parallel".to_string(), start.elapsed());
779 }
780
781 #[cfg(all(feature = "simd", feature = "parallel"))]
783 {
784 let start = Instant::now();
785 let config = BatchConversionConfig::default()
786 .with_simd(true)
787 .with_parallel(true);
788 let converter = BatchConverter::new(config);
789 let _ = converter.convert_slice::<S, T>(slice);
790 results.insert("simd_parallel".to_string(), start.elapsed());
791 }
792
793 results
794 }
795}
796
797#[cfg(test)]
798mod tests {
799 use super::*;
800 use num_complex::Complex64;
801
802 #[test]
803 fn test_batch_conversion_config() {
804 let config = BatchConversionConfig::default()
805 .with_simd(true)
806 .with_parallel(false)
807 .with_chunk_size(512)
808 .with_parallel_threshold(5000);
809
810 assert!(config.use_simd);
811 assert!(!config.use_parallel);
812 assert_eq!(config.parallel_chunk_size, 512);
813 assert_eq!(config.parallel_threshold, 5000);
814 }
815
816 #[test]
817 fn test_sequential_conversion() {
818 let data: Vec<f64> = vec![1.0, 2.5, 3.7, 4.2];
819 let config = BatchConversionConfig::default()
820 .with_simd(false)
821 .with_parallel(false);
822 let converter = BatchConverter::new(config);
823
824 let result: Vec<f32> = converter.convert_slice(&data).expect("Operation failed");
825 assert_eq!(result.len(), data.len());
826 assert_eq!(result[0], 1.0f32);
827 assert_eq!(result[1], 2.5f32);
828 }
829
830 #[test]
831 fn test_conversion_witherrors() {
832 let data: Vec<f64> = vec![1.0, f64::NAN, 3.0, f64::INFINITY];
833 let converter = BatchConverter::with_default_config();
834
835 let (converted, errors) = converter.convert_slice_witherrors::<f64, f32>(&data);
836 assert_eq!(converted.len(), 2); assert_eq!(errors.len(), 2); }
839
840 #[test]
841 fn test_clamped_conversion() {
842 let data: Vec<f64> = vec![1e20, 2.5, -1e20, 100.0];
843 let converter = BatchConverter::with_default_config();
844
845 let result: Vec<f32> = converter.convert_slice_clamped(&data);
846 assert_eq!(result.len(), data.len());
847 assert_eq!(result[0], 1e20f32); assert_eq!(result[1], 2.5f32); assert_eq!(result[2], -1e20f32); assert_eq!(result[3], 100.0f32); }
853
854 #[test]
855 fn test_complex_conversion() {
856 let data: Vec<Complex64> = vec![
857 Complex64::new(1.0, 2.0),
858 Complex64::new(3.0, 4.0),
859 Complex64::new(-1.0, -2.0),
860 ];
861 let converter = BatchConverter::with_default_config();
862
863 let result: Vec<num_complex::Complex32> = converter
864 .convert_complex_slice(&data)
865 .expect("Operation failed");
866 assert_eq!(result.len(), data.len());
867 assert_eq!(result[0].re, 1.0f32);
868 assert_eq!(result[0].im, 2.0f32);
869 }
870
871 #[test]
872 fn test_empty_slice() {
873 let data: Vec<f64> = vec![];
874 let converter = BatchConverter::with_default_config();
875
876 let result: Vec<f32> = converter.convert_slice(&data).expect("Operation failed");
877 assert_eq!(result.len(), 0);
878
879 let (converted, errors) = converter.convert_slice_witherrors::<f64, f32>(&data);
880 assert_eq!(converted.len(), 0);
881 assert_eq!(errors.len(), 0);
882 }
883
884 #[cfg(feature = "simd")]
885 #[test]
886 fn test_simd_detection() {
887 let converter = BatchConverter::with_default_config();
888
889 assert!(converter.can_use_simd_for_conversion::<f64, f32>());
891 assert!(converter.can_use_simd_for_conversion::<f32, f64>());
892 assert!(converter.can_use_simd_for_conversion::<i32, f32>());
893
894 assert!(!converter.can_use_simd_for_conversion::<i8, i16>());
896 }
897
898 #[test]
899 fn test_large_dataset_threshold() {
900 let data: Vec<f64> = (0..20000).map(|_| 0 as f64 * 0.1).collect();
901 let config = BatchConversionConfig::default().with_parallel_threshold(10000);
902 let converter = BatchConverter::new(config);
903
904 let result: Vec<f32> = converter.convert_slice(&data).expect("Operation failed");
905 assert_eq!(result.len(), data.len());
906 }
907
908 #[test]
909 fn test_utils_functions() {
910 let f64_data: Vec<f64> = vec![1.0, 2.5, 3.7];
911 let f32_result = utils::f64_to_f32_batch(&f64_data).expect("Operation failed");
912 assert_eq!(f32_result.len(), f64_data.len());
913
914 let f32_data: Vec<f32> = vec![1.0, 2.5, 3.7];
915 let f64_result = utils::f32_to_f64_batch(&f32_data).expect("Operation failed");
916 assert_eq!(f64_result.len(), f32_data.len());
917
918 let i32_data: Vec<i32> = vec![1, 2, 3];
919 let f32_result = utils::i32_to_f32_batch(&i32_data);
920 assert_eq!(f32_result.len(), i32_data.len());
921 }
922}