use crate::error::{CoreError, CoreResult};
use crate::types::{NumericConversion, NumericConversionError};
use num_complex::Complex;
use num_traits::{Bounded, Float, NumCast, Zero};
use std::fmt;
#[cfg(feature = "simd")]
use wide::{f32x4, f64x2, i32x4};
#[cfg(feature = "parallel")]
use crate::parallel_ops::*;
#[derive(Debug, Clone)]
pub struct BatchConversionConfig {
pub use_simd: bool,
pub use_parallel: bool,
pub parallel_chunk_size: usize,
pub simd_vector_size: Option<usize>,
pub parallel_threshold: usize,
}
impl Default for BatchConversionConfig {
fn default() -> Self {
Self {
use_simd: cfg!(feature = "simd"),
use_parallel: cfg!(feature = "parallel"),
parallel_chunk_size: 1024,
simd_vector_size: None,
parallel_threshold: 10000,
}
}
}
impl BatchConversionConfig {
pub fn with_simd(mut self, enable: bool) -> Self {
self.use_simd = enable;
self
}
pub fn with_parallel(mut self, enable: bool) -> Self {
self.use_parallel = enable;
self
}
pub fn with_chunk_size(mut self, chunksize: usize) -> Self {
self.parallel_chunk_size = chunksize;
self
}
pub fn with_parallel_threshold(mut self, threshold: usize) -> Self {
self.parallel_threshold = threshold;
self
}
}
#[derive(Debug, Clone)]
pub struct ElementConversionError {
pub index: usize,
pub error: NumericConversionError,
}
#[derive(Debug, Clone)]
pub struct BatchConversionResult<T> {
pub converted: Vec<(usize, T)>,
pub errors: Vec<ElementConversionError>,
}
pub struct BatchConverter {
config: BatchConversionConfig,
}
impl BatchConverter {
pub fn new(config: BatchConversionConfig) -> Self {
Self { config }
}
pub fn with_default_config() -> Self {
Self::new(BatchConversionConfig::default())
}
pub fn convert_slice_witherrors<S, T>(
&self,
slice: &[S],
) -> (Vec<T>, Vec<ElementConversionError>)
where
S: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Copy + 'static,
{
if slice.is_empty() {
return (Vec::new(), Vec::new());
}
if self.config.use_parallel && slice.len() >= self.config.parallel_threshold {
self.convert_slice_parallel_witherrors(slice)
} else if self.config.use_simd {
self.convert_slice_simd_witherrors(slice)
} else {
self.convert_slice_sequential_witherrors(slice)
}
}
pub fn convert_slice<S, T>(&self, slice: &[S]) -> CoreResult<Vec<T>>
where
S: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Copy + 'static,
{
let (converted, errors) = self.convert_slice_witherrors(slice);
if !errors.is_empty() {
return Err(CoreError::InvalidArgument(crate::error::ErrorContext::new(
{
let numerrors = errors.len();
format!("Batch conversion failed for {numerrors} elements")
},
)));
}
Ok(converted)
}
pub fn convert_slice_clamped<S, T>(&self, slice: &[S]) -> Vec<T>
where
S: Copy + NumericConversion + Send + Sync,
T: Bounded + NumCast + PartialOrd + Zero + Send + Sync,
{
if slice.is_empty() {
return Vec::new();
}
if self.config.use_parallel && slice.len() >= self.config.parallel_threshold {
self.convert_slice_parallel_clamped(slice)
} else {
slice.iter().map(|&x| x.to_numeric_clamped()).collect()
}
}
fn convert_slice_sequential_witherrors<S, T>(
&self,
slice: &[S],
) -> (Vec<T>, Vec<ElementConversionError>)
where
S: Copy + NumCast + PartialOrd + fmt::Display + 'static,
T: Bounded + NumCast + PartialOrd + fmt::Display + Copy + 'static,
{
let mut converted = Vec::new();
let mut errors = Vec::new();
for (index, &value) in slice.iter().enumerate() {
match value.to_numeric() {
Ok(result) => converted.push(result),
Err(error) => errors.push(ElementConversionError { index, error }),
}
}
(converted, errors)
}
#[cfg(feature = "simd")]
fn convert_slice_simd_witherrors<S, T>(
&self,
slice: &[S],
) -> (Vec<T>, Vec<ElementConversionError>)
where
S: Copy + NumCast + PartialOrd + fmt::Display + 'static,
T: Bounded + NumCast + PartialOrd + fmt::Display + Copy + 'static,
{
if self.can_use_simd_for_conversion::<S, T>() {
self.convert_slice_simd_optimized(slice)
} else {
self.convert_slice_sequential_witherrors(slice)
}
}
#[cfg(not(feature = "simd"))]
fn convert_slice_simd_witherrors<S, T>(
&self,
slice: &[S],
) -> (Vec<T>, Vec<ElementConversionError>)
where
S: Copy + NumCast + PartialOrd + fmt::Display + 'static,
T: Bounded + NumCast + PartialOrd + fmt::Display + Copy + 'static,
{
self.convert_slice_sequential_witherrors(slice)
}
#[cfg(feature = "parallel")]
fn convert_slice_parallel_witherrors<S, T>(
&self,
slice: &[S],
) -> (Vec<T>, Vec<ElementConversionError>)
where
S: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Copy + 'static,
{
let chunk_size = self.config.parallel_chunk_size;
let chunks: Vec<_> = slice.chunks(chunk_size).enumerate().collect();
let results: Vec<_> = chunks
.into_par_iter()
.map(|(chunk_idx, chunk)| {
let base_index = chunk_idx * chunk_size;
let mut converted: Vec<T> = Vec::new();
let mut errors = Vec::new();
for (idx, &value) in chunk.iter().enumerate() {
let global_index = base_index + idx;
match value.to_numeric() {
Ok(result) => converted.push(result),
Err(error) => errors.push(ElementConversionError {
index: global_index,
error,
}),
}
}
(converted, errors)
})
.collect();
let mut all_converted = Vec::new();
let mut allerrors = Vec::new();
for (converted, errors) in results {
all_converted.extend(converted);
allerrors.extend(errors);
}
(all_converted, allerrors)
}
#[cfg(not(feature = "parallel"))]
fn convert_slice_parallel_witherrors<S, T>(
&self,
slice: &[S],
) -> (Vec<T>, Vec<ElementConversionError>)
where
S: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Copy + 'static,
{
self.convert_slice_sequential_witherrors(slice)
}
#[cfg(feature = "parallel")]
fn convert_slice_parallel_clamped<S, T>(&self, slice: &[S]) -> Vec<T>
where
S: Copy + NumericConversion + Send + Sync,
T: Bounded + NumCast + PartialOrd + Zero + Send + Sync,
{
slice
.par_chunks(self.config.parallel_chunk_size)
.flat_map(|chunk| {
chunk
.iter()
.map(|&x| x.to_numeric_clamped())
.collect::<Vec<_>>()
})
.collect()
}
#[cfg(not(feature = "parallel"))]
fn convert_slice_parallel_clamped<S, T>(&self, slice: &[S]) -> Vec<T>
where
S: Copy + NumericConversion + Send + Sync,
T: Bounded + NumCast + PartialOrd + Zero + Send + Sync,
{
slice.iter().map(|&x| x.to_numeric_clamped()).collect()
}
#[allow(dead_code)]
#[cfg(feature = "simd")]
fn can_use_simd_for_conversion<S: 'static, T: 'static>(&self) -> bool {
use std::any::TypeId;
let src_type = TypeId::of::<S>();
let dst_type = TypeId::of::<T>();
if src_type == TypeId::of::<f64>() && dst_type == TypeId::of::<f32>() {
return true;
}
if src_type == TypeId::of::<f32>() && dst_type == TypeId::of::<f64>() {
return true;
}
if src_type == TypeId::of::<i32>() && dst_type == TypeId::of::<f32>() {
return true;
}
if src_type == TypeId::of::<i64>() && dst_type == TypeId::of::<f64>() {
return true;
}
false
}
#[allow(dead_code)]
#[cfg(not(feature = "simd"))]
fn can_use_simd_for_conversion<S: 'static, T: 'static>(&self) -> bool {
false
}
#[cfg(feature = "simd")]
fn convert_slice_simd_optimized<S, T>(
&self,
slice: &[S],
) -> (Vec<T>, Vec<ElementConversionError>)
where
S: Copy + NumCast + PartialOrd + fmt::Display + 'static,
T: Bounded + NumCast + PartialOrd + fmt::Display + Copy + 'static,
{
use std::any::TypeId;
let src_type = TypeId::of::<S>();
let dst_type = TypeId::of::<T>();
if src_type == TypeId::of::<f64>() && dst_type == TypeId::of::<f32>() {
if let Some(f64_slice) = slice
.iter()
.map(|x| x.to_numeric::<f64>().ok())
.collect::<Option<Vec<_>>>()
{
let (converted, errors) = self.convert_f64_to_f32_simd_typed(&f64_slice);
let typed_results: Vec<T> =
converted.into_iter().filter_map(|f| T::from(f)).collect();
return (typed_results, errors);
}
}
if src_type == TypeId::of::<f32>() && dst_type == TypeId::of::<f64>() {
if let Some(f32_slice) = slice
.iter()
.map(|x| x.to_numeric::<f32>().ok())
.collect::<Option<Vec<_>>>()
{
let (converted, errors) = self.convert_f32_to_f64_simd_typed(&f32_slice);
let typed_results: Vec<T> =
converted.into_iter().filter_map(|f| T::from(f)).collect();
return (typed_results, errors);
}
}
if src_type == TypeId::of::<i32>() && dst_type == TypeId::of::<f32>() {
if let Some(i32_slice) = slice
.iter()
.map(|x| x.to_numeric::<i32>().ok())
.collect::<Option<Vec<_>>>()
{
let (converted, errors) = self.convert_i32_to_f32_simd_typed(&i32_slice);
let typed_results: Vec<T> =
converted.into_iter().filter_map(|f| T::from(f)).collect();
return (typed_results, errors);
}
}
self.convert_slice_sequential_witherrors(slice)
}
#[cfg(feature = "simd")]
fn convert_f64_to_f32_simd_typed(
&self,
slice: &[f64],
) -> (Vec<f32>, Vec<ElementConversionError>) {
let mut converted = Vec::with_capacity(slice.len());
let mut errors = Vec::new();
let chunks = slice.chunks_exact(2);
let remainder = chunks.remainder();
for (chunk_idx, chunk) in chunks.enumerate() {
let vec = f64x2::new([chunk[0], chunk[1]]);
for (i, &val) in chunk.iter().enumerate() {
let index = chunk_idx * 2 + i;
if val.is_nan() || val.is_infinite() {
errors.push(ElementConversionError {
index,
error: NumericConversionError::NanOrInfinite,
});
} else {
let f32_val = val as f32;
if f32_val.is_infinite() && !val.is_infinite() {
errors.push(ElementConversionError {
index,
error: NumericConversionError::Overflow {
value: val.to_string(),
max: f32::MAX.to_string(),
},
});
} else {
converted.push(f32_val);
}
}
}
}
for (i, &val) in remainder.iter().enumerate() {
let index = slice.len() - remainder.len() + i;
if val.is_nan() || val.is_infinite() {
errors.push(ElementConversionError {
index,
error: NumericConversionError::NanOrInfinite,
});
} else {
let f32_val = val as f32;
if f32_val.is_infinite() && !val.is_infinite() {
errors.push(ElementConversionError {
index,
error: NumericConversionError::Overflow {
value: val.to_string(),
max: f32::MAX.to_string(),
},
});
} else {
converted.push(f32_val);
}
}
}
(converted, errors)
}
#[cfg(feature = "simd")]
fn convert_f32_to_f64_simd_typed(
&self,
slice: &[f32],
) -> (Vec<f64>, Vec<ElementConversionError>) {
let mut converted = Vec::with_capacity(slice.len());
let mut errors = Vec::new();
let chunks = slice.chunks_exact(4);
let remainder = chunks.remainder();
for (chunk_idx, chunk) in chunks.enumerate() {
let vec = f32x4::new([chunk[0], chunk[1], chunk[2], chunk[3]]);
for (i, &val) in chunk.iter().enumerate() {
let index = chunk_idx * 4 + i;
if val.is_nan() || val.is_infinite() {
errors.push(ElementConversionError {
index,
error: NumericConversionError::NanOrInfinite,
});
} else {
converted.push(val as f64);
}
}
}
for (i, &val) in remainder.iter().enumerate() {
let index = slice.len() - remainder.len() + i;
if val.is_nan() || val.is_infinite() {
errors.push(ElementConversionError {
index,
error: NumericConversionError::NanOrInfinite,
});
} else {
converted.push(val as f64);
}
}
(converted, errors)
}
#[cfg(feature = "simd")]
fn convert_i32_to_f32_simd_typed(
&self,
slice: &[i32],
) -> (Vec<f32>, Vec<ElementConversionError>) {
let mut converted = Vec::with_capacity(slice.len());
let errors = Vec::new();
let chunks = slice.chunks_exact(4);
let remainder = chunks.remainder();
for chunk in chunks {
let vec = i32x4::new([chunk[0], chunk[1], chunk[2], chunk[3]]);
for &val in chunk {
converted.push(val as f32);
}
}
for &val in remainder {
converted.push(val as f32);
}
(converted, errors)
}
pub fn convert_complex_slice<S, T>(&self, slice: &[Complex<S>]) -> CoreResult<Vec<Complex<T>>>
where
S: Float + fmt::Display + Send + Sync,
T: Float + Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync,
{
if slice.is_empty() {
return Ok(Vec::new());
}
let mut result = Vec::with_capacity(slice.len());
if self.config.use_parallel && slice.len() >= self.config.parallel_threshold {
#[cfg(feature = "parallel")]
{
let chunks: Vec<_> = slice
.par_chunks(self.config.parallel_chunk_size)
.map(|chunk| {
chunk
.iter()
.map(|z| {
let real: T = z.re.to_numeric()?;
let imag: T = z.im.to_numeric()?;
Ok(Complex::new(real, imag))
})
.collect::<Result<Vec<_>, NumericConversionError>>()
})
.collect();
for chunk_result in chunks {
result.extend(chunk_result.map_err(|e| {
CoreError::InvalidArgument(crate::error::ErrorContext::new(e.to_string()))
})?);
}
}
#[cfg(not(feature = "parallel"))]
{
for z in slice {
let real: T = z.re.to_numeric().map_err(|e| {
CoreError::InvalidArgument(crate::error::ErrorContext::new(e.to_string()))
})?;
let imag: T = z.im.to_numeric().map_err(|e| {
CoreError::InvalidArgument(crate::error::ErrorContext::new(e.to_string()))
})?;
result.push(Complex::new(real, imag));
}
}
} else {
for z in slice {
let real: T = z.re.to_numeric().map_err(|e| {
CoreError::InvalidArgument(crate::error::ErrorContext::new(e.to_string()))
})?;
let imag: T = z.im.to_numeric().map_err(|e| {
CoreError::InvalidArgument(crate::error::ErrorContext::new(e.to_string()))
})?;
result.push(Complex::new(real, imag));
}
}
Ok(result)
}
}
#[cfg(feature = "array")]
pub mod ndarray_integration {
use super::*;
use ::ndarray::{Array, ArrayBase, Data, Dimension};
impl BatchConverter {
pub fn convert_array<S, T, D>(
&self,
array: &ArrayBase<S, D>,
) -> CoreResult<crate::ndarray::Array<T, D>>
where
S: Data,
S::Elem: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Clone + Copy + 'static,
D: Dimension,
{
let slice = array.as_slice().ok_or_else(|| {
CoreError::InvalidArgument(crate::error::ErrorContext::new(
"Array is not contiguous".to_string(),
))
})?;
let converted = self.convert_slice(slice)?;
let shape = array.raw_dim();
Array::from_shape_vec(shape, converted).map_err(|e| {
CoreError::InvalidArgument(crate::error::ErrorContext::new(format!(
"Failed to reshape converted array: {}",
e
)))
})
}
pub fn convert_array_clamped<S, T, D>(
&self,
array: &ArrayBase<S, D>,
) -> CoreResult<crate::ndarray::Array<T, D>>
where
S: Data,
S::Elem: Copy + NumericConversion + Send + Sync,
T: Bounded + NumCast + PartialOrd + Zero + Send + Sync + Clone,
D: Dimension,
{
let slice = array.as_slice().ok_or_else(|| {
CoreError::InvalidArgument(crate::error::ErrorContext::new(
"Array is not contiguous".to_string(),
))
})?;
let converted = self.convert_slice_clamped(slice);
let shape = array.raw_dim();
Array::from_shape_vec(shape, converted).map_err(|e| {
CoreError::InvalidArgument(crate::error::ErrorContext::new(format!(
"Failed to reshape converted array: {}",
e
)))
})
}
}
}
pub mod utils {
use super::*;
pub fn f64_to_f32_batch(slice: &[f64]) -> CoreResult<Vec<f32>> {
let converter = BatchConverter::with_default_config();
converter.convert_slice(slice)
}
pub fn f32_to_f64_batch(slice: &[f32]) -> CoreResult<Vec<f64>> {
let converter = BatchConverter::with_default_config();
converter.convert_slice(slice)
}
pub fn i32_to_f32_batch(slice: &[i32]) -> Vec<f32> {
let converter = BatchConverter::with_default_config();
converter.convert_slice_clamped(slice)
}
pub fn i64_to_f64_batch(slice: &[i64]) -> Vec<f64> {
let converter = BatchConverter::with_default_config();
converter.convert_slice_clamped(slice)
}
pub fn benchmark_conversion_methods<S, T>(
slice: &[S],
) -> std::collections::HashMap<String, std::time::Duration>
where
S: Copy + NumCast + PartialOrd + fmt::Display + Send + Sync + 'static,
T: Bounded + NumCast + PartialOrd + fmt::Display + Send + Sync + Copy + 'static,
{
use std::time::Instant;
let mut results = std::collections::HashMap::new();
let start = Instant::now();
let config = BatchConversionConfig::default()
.with_simd(false)
.with_parallel(false);
let converter = BatchConverter::new(config);
let _ = converter.convert_slice::<S, T>(slice);
results.insert("sequential".to_string(), start.elapsed());
#[cfg(feature = "simd")]
{
let start = Instant::now();
let config = BatchConversionConfig::default()
.with_simd(true)
.with_parallel(false);
let converter = BatchConverter::new(config);
let _ = converter.convert_slice::<S, T>(slice);
results.insert("simd".to_string(), start.elapsed());
}
#[cfg(feature = "parallel")]
{
let start = Instant::now();
let config = BatchConversionConfig::default()
.with_simd(false)
.with_parallel(true);
let converter = BatchConverter::new(config);
let _ = converter.convert_slice::<S, T>(slice);
results.insert("parallel".to_string(), start.elapsed());
}
#[cfg(all(feature = "simd", feature = "parallel"))]
{
let start = Instant::now();
let config = BatchConversionConfig::default()
.with_simd(true)
.with_parallel(true);
let converter = BatchConverter::new(config);
let _ = converter.convert_slice::<S, T>(slice);
results.insert("simd_parallel".to_string(), start.elapsed());
}
results
}
}
#[cfg(test)]
mod tests {
use super::*;
use num_complex::Complex64;
#[test]
fn test_batch_conversion_config() {
let config = BatchConversionConfig::default()
.with_simd(true)
.with_parallel(false)
.with_chunk_size(512)
.with_parallel_threshold(5000);
assert!(config.use_simd);
assert!(!config.use_parallel);
assert_eq!(config.parallel_chunk_size, 512);
assert_eq!(config.parallel_threshold, 5000);
}
#[test]
fn test_sequential_conversion() {
let data: Vec<f64> = vec![1.0, 2.5, 3.7, 4.2];
let config = BatchConversionConfig::default()
.with_simd(false)
.with_parallel(false);
let converter = BatchConverter::new(config);
let result: Vec<f32> = converter.convert_slice(&data).expect("Operation failed");
assert_eq!(result.len(), data.len());
assert_eq!(result[0], 1.0f32);
assert_eq!(result[1], 2.5f32);
}
#[test]
fn test_conversion_witherrors() {
let data: Vec<f64> = vec![1.0, f64::NAN, 3.0, f64::INFINITY];
let converter = BatchConverter::with_default_config();
let (converted, errors) = converter.convert_slice_witherrors::<f64, f32>(&data);
assert_eq!(converted.len(), 2); assert_eq!(errors.len(), 2); }
#[test]
fn test_clamped_conversion() {
let data: Vec<f64> = vec![1e20, 2.5, -1e20, 100.0];
let converter = BatchConverter::with_default_config();
let result: Vec<f32> = converter.convert_slice_clamped(&data);
assert_eq!(result.len(), data.len());
assert_eq!(result[0], 1e20f32); assert_eq!(result[1], 2.5f32); assert_eq!(result[2], -1e20f32); assert_eq!(result[3], 100.0f32); }
#[test]
fn test_complex_conversion() {
let data: Vec<Complex64> = vec![
Complex64::new(1.0, 2.0),
Complex64::new(3.0, 4.0),
Complex64::new(-1.0, -2.0),
];
let converter = BatchConverter::with_default_config();
let result: Vec<num_complex::Complex32> = converter
.convert_complex_slice(&data)
.expect("Operation failed");
assert_eq!(result.len(), data.len());
assert_eq!(result[0].re, 1.0f32);
assert_eq!(result[0].im, 2.0f32);
}
#[test]
fn test_empty_slice() {
let data: Vec<f64> = vec![];
let converter = BatchConverter::with_default_config();
let result: Vec<f32> = converter.convert_slice(&data).expect("Operation failed");
assert_eq!(result.len(), 0);
let (converted, errors) = converter.convert_slice_witherrors::<f64, f32>(&data);
assert_eq!(converted.len(), 0);
assert_eq!(errors.len(), 0);
}
#[cfg(feature = "simd")]
#[test]
fn test_simd_detection() {
let converter = BatchConverter::with_default_config();
assert!(converter.can_use_simd_for_conversion::<f64, f32>());
assert!(converter.can_use_simd_for_conversion::<f32, f64>());
assert!(converter.can_use_simd_for_conversion::<i32, f32>());
assert!(!converter.can_use_simd_for_conversion::<i8, i16>());
}
#[test]
fn test_large_dataset_threshold() {
let data: Vec<f64> = (0..20000).map(|_| 0 as f64 * 0.1).collect();
let config = BatchConversionConfig::default().with_parallel_threshold(10000);
let converter = BatchConverter::new(config);
let result: Vec<f32> = converter.convert_slice(&data).expect("Operation failed");
assert_eq!(result.len(), data.len());
}
#[test]
fn test_utils_functions() {
let f64_data: Vec<f64> = vec![1.0, 2.5, 3.7];
let f32_result = utils::f64_to_f32_batch(&f64_data).expect("Operation failed");
assert_eq!(f32_result.len(), f64_data.len());
let f32_data: Vec<f32> = vec![1.0, 2.5, 3.7];
let f64_result = utils::f32_to_f64_batch(&f32_data).expect("Operation failed");
assert_eq!(f64_result.len(), f32_data.len());
let i32_data: Vec<i32> = vec![1, 2, 3];
let f32_result = utils::i32_to_f32_batch(&i32_data);
assert_eq!(f32_result.len(), i32_data.len());
}
}