use torsh_core::error::{Result, TorshError};
pub fn validate_probability(prob: f32, name: &str) -> Result<()> {
if !(0.0..=1.0).contains(&prob) {
return Err(TorshError::InvalidArgument(format!(
"{name} must be between 0 and 1, got {prob}"
)));
}
Ok(())
}
pub fn validate_range<T: PartialOrd + std::fmt::Debug>(range: (T, T), name: &str) -> Result<()> {
if range.0 > range.1 {
return Err(TorshError::InvalidArgument(format!(
"Invalid {name} range: {range:?}"
)));
}
Ok(())
}
pub fn validate_positive<T: PartialOrd + Default + std::fmt::Debug>(
value: T,
name: &str,
) -> Result<()> {
if value <= T::default() {
return Err(TorshError::InvalidArgument(format!(
"{name} must be positive, got {value:?}"
)));
}
Ok(())
}
pub fn validate_same_length<T, U>(vec1: &[T], vec2: &[U], name1: &str, name2: &str) -> Result<()> {
if vec1.len() != vec2.len() {
return Err(TorshError::InvalidArgument(format!(
"{} and {} must have the same length, got {} and {}",
name1,
name2,
vec1.len(),
vec2.len()
)));
}
Ok(())
}
pub fn validate_not_empty<T>(vec: &[T], name: &str) -> Result<()> {
if vec.is_empty() {
return Err(TorshError::InvalidArgument(format!(
"{name} cannot be empty"
)));
}
Ok(())
}
pub mod errors {
use torsh_core::error::TorshError;
pub fn invalid_index(index: usize, size: usize) -> TorshError {
TorshError::IndexError { index, size }
}
pub fn invalid_argument(msg: impl Into<String>) -> TorshError {
TorshError::InvalidArgument(msg.into())
}
pub fn config_error(msg: impl Into<String>) -> TorshError {
TorshError::InvalidArgument(msg.into())
}
pub fn empty_batch() -> TorshError {
TorshError::InvalidArgument("Cannot process empty batch".to_string())
}
pub fn shape_mismatch(expected: &[usize], got: &[usize]) -> TorshError {
TorshError::ShapeMismatch {
expected: expected.to_vec(),
got: got.to_vec(),
}
}
pub fn file_not_found(path: &std::path::Path) -> TorshError {
TorshError::InvalidArgument(format!("File not found: {}", path.display()))
}
pub fn invalid_format(expected: &str, got: &str) -> TorshError {
TorshError::InvalidArgument(format!("Expected {expected} format, got {got}"))
}
}
#[macro_export]
macro_rules! validated_constructor {
($name:ident, $field:ident: f32, probability) => {
impl $name {
pub fn new($field: f32) -> Result<Self> {
$crate::utils::validate_probability($field, stringify!($field))?;
Ok(Self { $field })
}
}
};
($name:ident, $field:ident: ($t1:ty, $t2:ty), range) => {
impl $name {
pub fn new($field: ($t1, $t2)) -> Result<Self> {
$crate::utils::validate_range($field, stringify!($field))?;
Ok(Self { $field })
}
}
};
($name:ident, $field:ident: $t:ty, positive) => {
impl $name {
pub fn new($field: $t) -> Result<Self> {
$crate::utils::validate_positive($field, stringify!($field))?;
Ok(Self { $field })
}
}
};
($name:ident, size: ($t1:ty, $t2:ty)) => {
impl $name {
pub fn new(size: ($t1, $t2)) -> Self {
Self { size }
}
}
};
($name:ident, $($field:ident: $t:ty),+, validate = $validator:expr) => {
impl $name {
pub fn new($($field: $t),+) -> Result<Self> {
$validator(&$($field),+)?;
Ok(Self { $($field),+ })
}
}
};
}
#[macro_export]
macro_rules! builder_pattern {
($name:ident, $($field:ident: $t:ty),+) => {
};
}
#[macro_export]
macro_rules! simple_random_transform {
($name:ident, $input:ty, $output:ty, $prob_field:ident, $transform_fn:expr) => {
impl $crate::transforms::Transform<$input> for $name {
type Output = $output;
fn transform(&self, input: $input) -> Result<Self::Output> {
use scirs2_core::random::{Random, Rng};
let mut rng = Random::seed(42);
if rng.random::<f32>() < self.$prob_field {
$transform_fn(input, &mut rng)
} else {
Ok(input)
}
}
fn is_deterministic(&self) -> bool {
false
}
}
};
}
pub fn validate_dataset_path(path: &std::path::Path, name: &str) -> Result<()> {
if !path.exists() {
return Err(TorshError::InvalidArgument(format!(
"{} path does not exist: {}",
name,
path.display()
)));
}
Ok(())
}
pub fn validate_file_extension(path: &std::path::Path, extensions: &[&str]) -> Result<()> {
if let Some(ext) = path.extension() {
let ext_str = ext.to_string_lossy().to_lowercase();
if extensions.iter().any(|&e| e == ext_str) {
return Ok(());
}
}
Err(TorshError::InvalidArgument(format!(
"File must have one of these extensions: {:?}, got: {}",
extensions,
path.display()
)))
}
pub fn validate_tensor_shape(shape: &[usize], expected_dims: usize, name: &str) -> Result<()> {
if shape.len() != expected_dims {
return Err(TorshError::InvalidArgument(format!(
"{} tensor must have {} dimensions, got {}",
name,
expected_dims,
shape.len()
)));
}
Ok(())
}
pub fn create_size_tuple(width: usize, height: usize) -> Result<(usize, usize)> {
validate_positive(width, "width")?;
validate_positive(height, "height")?;
Ok((width, height))
}
pub trait Resettable {
fn reset(&mut self);
}
pub trait Configurable<T> {
fn configure(&mut self, config: T) -> Result<()>;
}
pub trait Cacheable {
fn clear_cache(&mut self);
fn cache_hit_rate(&self) -> Option<f32> {
None
}
}
pub struct ProgressTracker {
current: usize,
total: usize,
last_reported: f32,
report_interval: f32,
}
impl ProgressTracker {
pub fn new(total: usize) -> Self {
Self {
current: 0,
total,
last_reported: 0.0,
report_interval: 0.1, }
}
pub fn update(&mut self) -> bool {
self.current += 1;
let progress = self.current as f32 / self.total as f32;
if progress - self.last_reported >= self.report_interval {
self.last_reported = progress;
true
} else {
false
}
}
pub fn percentage(&self) -> f32 {
(self.current as f32 / self.total as f32) * 100.0
}
pub fn is_complete(&self) -> bool {
self.current >= self.total
}
}
pub mod performance {
use std::collections::VecDeque;
use std::time::{Duration, Instant};
pub struct Timer {
start: Instant,
measurements: VecDeque<Duration>,
max_samples: usize,
}
impl Timer {
pub fn new() -> Self {
Self {
start: Instant::now(),
measurements: VecDeque::new(),
max_samples: 100,
}
}
pub fn start(&mut self) {
self.start = Instant::now();
}
pub fn stop(&mut self) -> Duration {
let duration = self.start.elapsed();
if self.measurements.len() >= self.max_samples {
self.measurements.pop_front();
}
self.measurements.push_back(duration);
duration
}
pub fn average(&self) -> Option<Duration> {
if self.measurements.is_empty() {
None
} else {
let total: Duration = self.measurements.iter().sum();
Some(total / self.measurements.len() as u32)
}
}
pub fn throughput(&self, items: usize) -> Option<f64> {
self.average().map(|avg| items as f64 / avg.as_secs_f64())
}
}
impl Default for Timer {
fn default() -> Self {
Self::new()
}
}
}
pub mod memory {
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
pub struct MemoryPool<T> {
available: Vec<Vec<T>>,
capacity: usize,
default_size: usize,
}
impl<T: Clone + Default> MemoryPool<T> {
pub fn new(capacity: usize, default_size: usize) -> Self {
Self {
available: Vec::with_capacity(capacity),
capacity,
default_size,
}
}
pub fn get(&mut self) -> Vec<T> {
self.available
.pop()
.unwrap_or_else(|| Vec::with_capacity(self.default_size))
}
pub fn put(&mut self, mut buffer: Vec<T>) {
if self.available.len() < self.capacity {
buffer.clear();
self.available.push(buffer);
}
}
pub fn size(&self) -> usize {
self.available.len()
}
}
pub type SharedMemoryPool<T> = Arc<Mutex<MemoryPool<T>>>;
pub struct MemoryTracker {
allocations: HashMap<String, usize>,
peak_memory: usize,
current_memory: usize,
}
impl MemoryTracker {
pub fn new() -> Self {
Self {
allocations: HashMap::new(),
peak_memory: 0,
current_memory: 0,
}
}
pub fn allocate(&mut self, name: &str, size: usize) {
self.current_memory += size;
self.peak_memory = self.peak_memory.max(self.current_memory);
*self.allocations.entry(name.to_string()).or_insert(0) += size;
}
pub fn deallocate(&mut self, name: &str, size: usize) {
self.current_memory = self.current_memory.saturating_sub(size);
if let Some(total) = self.allocations.get_mut(name) {
*total = total.saturating_sub(size);
}
}
pub fn current_usage(&self) -> usize {
self.current_memory
}
pub fn peak_usage(&self) -> usize {
self.peak_memory
}
pub fn breakdown(&self) -> &HashMap<String, usize> {
&self.allocations
}
}
impl Default for MemoryTracker {
fn default() -> Self {
Self::new()
}
}
}
pub mod batch {
use scirs2_core::parallel_ops::*;
use std::sync::mpsc;
use std::thread;
pub fn parallel_batch_process<T, R, F>(data: Vec<T>, batch_size: usize, processor: F) -> Vec<R>
where
T: Send + Sync,
R: Send,
F: Fn(&[T]) -> R + Send + Sync,
{
data.par_chunks(batch_size).map(processor).collect()
}
pub struct AsyncBatchProcessor<T, R> {
sender: mpsc::Sender<Vec<T>>,
receiver: mpsc::Receiver<R>,
_handle: thread::JoinHandle<()>,
}
impl<T, R> AsyncBatchProcessor<T, R>
where
T: Send + 'static,
R: Send + 'static,
{
pub fn new<F>(batch_size: usize, processor: F) -> Self
where
F: Fn(Vec<T>) -> R + Send + 'static,
{
let (input_sender, input_receiver) = mpsc::channel();
let (output_sender, output_receiver) = mpsc::channel();
let handle = thread::spawn(move || {
let mut buffer = Vec::with_capacity(batch_size);
while let Ok(mut data) = input_receiver.recv() {
buffer.append(&mut data);
while buffer.len() >= batch_size {
let batch = buffer.drain(..batch_size).collect();
let result = processor(batch);
if output_sender.send(result).is_err() {
break;
}
}
}
if !buffer.is_empty() {
let result = processor(buffer);
let _ = output_sender.send(result);
}
});
Self {
sender: input_sender,
receiver: output_receiver,
_handle: handle,
}
}
pub fn send(&self, data: Vec<T>) -> Result<(), mpsc::SendError<Vec<T>>> {
self.sender.send(data)
}
pub fn recv(&self) -> Result<R, mpsc::RecvError> {
self.receiver.recv()
}
pub fn try_recv(&self) -> Result<R, mpsc::TryRecvError> {
self.receiver.try_recv()
}
}
}
pub mod concurrent {
use parking_lot::{Mutex, RwLock};
use std::collections::HashMap;
use std::sync::Arc;
pub struct ConcurrentCache<K, V>
where
K: Eq + std::hash::Hash,
{
data: Arc<RwLock<HashMap<K, V>>>,
max_size: usize,
}
impl<K, V> ConcurrentCache<K, V>
where
K: Eq + std::hash::Hash + Clone,
V: Clone,
{
pub fn new(max_size: usize) -> Self {
Self {
data: Arc::new(RwLock::new(HashMap::new())),
max_size,
}
}
pub fn get(&self, key: &K) -> Option<V> {
self.data.read().get(key).cloned()
}
pub fn insert(&self, key: K, value: V) {
let mut data = self.data.write();
if data.len() >= self.max_size && !data.contains_key(&key) {
if let Some(first_key) = data.keys().next().cloned() {
data.remove(&first_key);
}
}
data.insert(key, value);
}
pub fn remove(&self, key: &K) -> Option<V> {
self.data.write().remove(key)
}
pub fn clear(&self) {
self.data.write().clear();
}
pub fn len(&self) -> usize {
self.data.read().len()
}
pub fn is_empty(&self) -> bool {
self.data.read().is_empty()
}
}
pub struct StatisticsCollector {
data: Arc<Mutex<Vec<f64>>>,
max_samples: usize,
}
impl StatisticsCollector {
pub fn new(max_samples: usize) -> Self {
Self {
data: Arc::new(Mutex::new(Vec::new())),
max_samples,
}
}
pub fn add_sample(&self, value: f64) {
let mut data = self.data.lock();
if data.len() >= self.max_samples {
data.remove(0);
}
data.push(value);
}
pub fn mean(&self) -> Option<f64> {
let data = self.data.lock();
if data.is_empty() {
None
} else {
Some(data.iter().sum::<f64>() / data.len() as f64)
}
}
pub fn std_dev(&self) -> Option<f64> {
let data = self.data.lock();
if data.len() < 2 {
None
} else {
let mean = data.iter().sum::<f64>() / data.len() as f64;
let variance =
data.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / (data.len() - 1) as f64;
Some(variance.sqrt())
}
}
pub fn min_max(&self) -> Option<(f64, f64)> {
let data = self.data.lock();
if data.is_empty() {
None
} else {
let min = data.iter().fold(f64::INFINITY, |a, &b| a.min(b));
let max = data.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b));
Some((min, max))
}
}
pub fn count(&self) -> usize {
self.data.lock().len()
}
}
}
pub mod config {
use std::collections::HashMap;
use std::env;
use std::path::Path;
#[cfg(feature = "serialize")]
use std::fs;
use torsh_core::error::{Result, TorshError};
#[cfg(feature = "serialize")]
use serde::{Deserialize, Serialize};
pub struct ConfigManager {
values: HashMap<String, ConfigValue>,
env_prefix: String,
}
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ConfigValue {
String(String),
Integer(i64),
Float(f64),
Boolean(bool),
Array(Vec<ConfigValue>),
Object(HashMap<String, ConfigValue>),
}
impl ConfigValue {
pub fn as_string(&self) -> Option<&str> {
match self {
ConfigValue::String(s) => Some(s),
_ => None,
}
}
pub fn as_i64(&self) -> Option<i64> {
match self {
ConfigValue::Integer(i) => Some(*i),
_ => None,
}
}
pub fn as_f64(&self) -> Option<f64> {
match self {
ConfigValue::Float(f) => Some(*f),
ConfigValue::Integer(i) => Some(*i as f64),
_ => None,
}
}
pub fn as_bool(&self) -> Option<bool> {
match self {
ConfigValue::Boolean(b) => Some(*b),
_ => None,
}
}
pub fn as_array(&self) -> Option<&Vec<ConfigValue>> {
match self {
ConfigValue::Array(arr) => Some(arr),
_ => None,
}
}
pub fn as_object(&self) -> Option<&HashMap<String, ConfigValue>> {
match self {
ConfigValue::Object(obj) => Some(obj),
_ => None,
}
}
}
impl ConfigManager {
pub fn new() -> Self {
Self {
values: HashMap::new(),
env_prefix: "TORSH_".to_string(),
}
}
pub fn with_env_prefix(prefix: &str) -> Self {
Self {
values: HashMap::new(),
env_prefix: prefix.to_string(),
}
}
#[cfg(feature = "serialize")]
pub fn load_from_file<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
let content = fs::read_to_string(path).map_err(|e| {
TorshError::InvalidArgument(format!("Failed to read config file: {}", e))
})?;
let json_value: serde_json::Value = serde_json::from_str(&content)
.map_err(|e| TorshError::InvalidArgument(format!("Failed to parse JSON: {}", e)))?;
self.load_from_json_value("", &json_value);
Ok(())
}
#[cfg(not(feature = "serialize"))]
pub fn load_from_file<P: AsRef<Path>>(&mut self, _path: P) -> Result<()> {
Err(TorshError::InvalidArgument(
"JSON loading requires 'serialize' feature. Enable with --features serialize"
.to_string(),
))
}
pub fn load_from_env(&mut self) {
for (key, value) in env::vars() {
if key.starts_with(&self.env_prefix) {
let config_key = key
.strip_prefix(&self.env_prefix)
.expect("prefix exists as checked in starts_with")
.to_lowercase();
self.set_from_string(&config_key, &value);
}
}
}
pub fn set(&mut self, key: &str, value: ConfigValue) {
self.values.insert(key.to_string(), value);
}
pub fn get(&self, key: &str) -> Option<&ConfigValue> {
self.values.get(key)
}
pub fn get_string(&self, key: &str, default: &str) -> String {
self.get(key)
.and_then(|v| v.as_string())
.unwrap_or(default)
.to_string()
}
pub fn get_i64(&self, key: &str, default: i64) -> i64 {
self.get(key).and_then(|v| v.as_i64()).unwrap_or(default)
}
pub fn get_f64(&self, key: &str, default: f64) -> f64 {
self.get(key).and_then(|v| v.as_f64()).unwrap_or(default)
}
pub fn get_bool(&self, key: &str, default: bool) -> bool {
self.get(key).and_then(|v| v.as_bool()).unwrap_or(default)
}
pub fn contains_key(&self, key: &str) -> bool {
self.values.contains_key(key)
}
pub fn keys(&self) -> Vec<&String> {
self.values.keys().collect()
}
pub fn clear(&mut self) {
self.values.clear();
}
pub fn merge(&mut self, other: &ConfigManager) {
for (key, value) in &other.values {
self.values.insert(key.clone(), value.clone());
}
}
fn set_from_string(&mut self, key: &str, value: &str) {
if let Ok(b) = value.parse::<bool>() {
self.set(key, ConfigValue::Boolean(b));
} else if let Ok(i) = value.parse::<i64>() {
self.set(key, ConfigValue::Integer(i));
} else if let Ok(f) = value.parse::<f64>() {
self.set(key, ConfigValue::Float(f));
} else {
self.set(key, ConfigValue::String(value.to_string()));
}
}
#[cfg(feature = "serialize")]
fn load_from_json_value(&mut self, prefix: &str, value: &serde_json::Value) {
match value {
serde_json::Value::String(s) => {
self.set(prefix, ConfigValue::String(s.clone()));
}
serde_json::Value::Number(n) => {
if let Some(i) = n.as_i64() {
self.set(prefix, ConfigValue::Integer(i));
} else if let Some(f) = n.as_f64() {
self.set(prefix, ConfigValue::Float(f));
}
}
serde_json::Value::Bool(b) => {
self.set(prefix, ConfigValue::Boolean(*b));
}
serde_json::Value::Array(arr) => {
let config_arr: Vec<ConfigValue> = arr
.iter()
.map(|v| self.json_value_to_config_value(v))
.collect();
self.set(prefix, ConfigValue::Array(config_arr));
}
serde_json::Value::Object(obj) => {
for (key, val) in obj {
let new_key = if prefix.is_empty() {
key.clone()
} else {
format!("{}.{}", prefix, key)
};
self.load_from_json_value(&new_key, val);
}
}
serde_json::Value::Null => {}
}
}
#[cfg(feature = "serialize")]
fn json_value_to_config_value(&self, value: &serde_json::Value) -> ConfigValue {
match value {
serde_json::Value::String(s) => ConfigValue::String(s.clone()),
serde_json::Value::Number(n) => {
if let Some(i) = n.as_i64() {
ConfigValue::Integer(i)
} else if let Some(f) = n.as_f64() {
ConfigValue::Float(f)
} else {
ConfigValue::String(n.to_string())
}
}
serde_json::Value::Bool(b) => ConfigValue::Boolean(*b),
serde_json::Value::Array(arr) => {
let config_arr: Vec<ConfigValue> = arr
.iter()
.map(|v| self.json_value_to_config_value(v))
.collect();
ConfigValue::Array(config_arr)
}
serde_json::Value::Object(obj) => {
let mut config_obj = HashMap::new();
for (key, val) in obj {
config_obj.insert(key.clone(), self.json_value_to_config_value(val));
}
ConfigValue::Object(config_obj)
}
serde_json::Value::Null => ConfigValue::String("null".to_string()),
}
}
}
impl Default for ConfigManager {
fn default() -> Self {
Self::new()
}
}
pub struct ConfigBuilder {
manager: ConfigManager,
}
impl ConfigBuilder {
pub fn new() -> Self {
Self {
manager: ConfigManager::new(),
}
}
pub fn env_prefix(mut self, prefix: &str) -> Self {
self.manager.env_prefix = prefix.to_string();
self
}
#[cfg(feature = "serialize")]
pub fn file<P: AsRef<Path>>(mut self, path: P) -> Result<Self> {
self.manager.load_from_file(path)?;
Ok(self)
}
#[cfg(not(feature = "serialize"))]
pub fn file<P: AsRef<Path>>(self, _path: P) -> Result<Self> {
Err(TorshError::InvalidArgument(
"JSON file loading requires 'serialize' feature. Enable with --features serialize"
.to_string(),
))
}
pub fn env(mut self) -> Self {
self.manager.load_from_env();
self
}
pub fn set(mut self, key: &str, value: ConfigValue) -> Self {
self.manager.set(key, value);
self
}
pub fn set_string(mut self, key: &str, value: &str) -> Self {
self.manager
.set(key, ConfigValue::String(value.to_string()));
self
}
pub fn set_i64(mut self, key: &str, value: i64) -> Self {
self.manager.set(key, ConfigValue::Integer(value));
self
}
pub fn set_f64(mut self, key: &str, value: f64) -> Self {
self.manager.set(key, ConfigValue::Float(value));
self
}
pub fn set_bool(mut self, key: &str, value: bool) -> Self {
self.manager.set(key, ConfigValue::Boolean(value));
self
}
pub fn build(self) -> ConfigManager {
self.manager
}
}
impl Default for ConfigBuilder {
fn default() -> Self {
Self::new()
}
}
}
pub use builder_pattern;
pub use simple_random_transform;
pub use validated_constructor;
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::time::Duration;
#[test]
fn test_validate_probability() {
assert!(validate_probability(0.5, "test").is_ok());
assert!(validate_probability(0.0, "test").is_ok());
assert!(validate_probability(1.0, "test").is_ok());
assert!(validate_probability(-0.1, "test").is_err());
assert!(validate_probability(1.1, "test").is_err());
}
#[test]
fn test_validate_range() {
assert!(validate_range((0.0, 1.0), "test").is_ok());
assert!(validate_range((1.0, 1.0), "test").is_ok());
assert!(validate_range((1.0, 0.0), "test").is_err());
}
#[test]
fn test_validate_positive() {
assert!(validate_positive(1, "test").is_ok());
assert!(validate_positive(0, "test").is_err());
assert!(validate_positive(-1, "test").is_err());
}
#[test]
fn test_progress_tracker() {
let mut tracker = ProgressTracker::new(10);
assert!(!tracker.is_complete());
assert!(tracker.update()); assert!(tracker.update());
for _ in 0..8 {
tracker.update();
}
assert!(tracker.is_complete());
}
#[test]
fn test_performance_timer() {
let mut timer = performance::Timer::new();
timer.start();
thread::sleep(Duration::from_millis(10));
let duration = timer.stop();
assert!(duration >= Duration::from_millis(10));
assert!(timer.average().is_some());
assert!(timer.throughput(100).is_some());
}
#[test]
fn test_memory_pool() {
let mut pool = memory::MemoryPool::<u8>::new(5, 1024);
assert_eq!(pool.size(), 0);
let buffer1 = pool.get();
assert_eq!(buffer1.capacity(), 1024);
pool.put(buffer1);
assert_eq!(pool.size(), 1);
let buffer2 = pool.get();
assert_eq!(pool.size(), 0);
assert_eq!(buffer2.len(), 0); }
#[test]
fn test_memory_tracker() {
let mut tracker = memory::MemoryTracker::new();
assert_eq!(tracker.current_usage(), 0);
tracker.allocate("test", 1024);
assert_eq!(tracker.current_usage(), 1024);
assert_eq!(tracker.peak_usage(), 1024);
tracker.deallocate("test", 512);
assert_eq!(tracker.current_usage(), 512);
assert_eq!(tracker.peak_usage(), 1024); }
#[test]
fn test_parallel_batch_process() {
let data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let results = batch::parallel_batch_process(data, 3, |chunk| chunk.iter().sum::<i32>());
assert_eq!(results.len(), 4); assert_eq!(results[0], 6); assert_eq!(results[1], 15); assert_eq!(results[2], 24); assert_eq!(results[3], 10); }
#[test]
fn test_concurrent_cache() {
let cache = concurrent::ConcurrentCache::new(2);
cache.insert("key1", "value1");
cache.insert("key2", "value2");
assert_eq!(cache.len(), 2);
assert_eq!(cache.get(&"key1"), Some("value1"));
assert_eq!(cache.get(&"key2"), Some("value2"));
assert_eq!(cache.get(&"key3"), None);
cache.insert("key3", "value3");
assert_eq!(cache.len(), 2); }
#[test]
fn test_statistics_collector() {
let collector = concurrent::StatisticsCollector::new(100);
assert_eq!(collector.count(), 0);
collector.add_sample(1.0);
collector.add_sample(2.0);
collector.add_sample(3.0);
assert_eq!(collector.count(), 3);
assert_eq!(collector.mean(), Some(2.0));
assert_eq!(collector.min_max(), Some((1.0, 3.0)));
assert!(collector.std_dev().is_some());
}
#[test]
fn test_config_manager() {
let mut config = config::ConfigManager::new();
config.set(
"string_key",
config::ConfigValue::String("test_value".to_string()),
);
config.set("int_key", config::ConfigValue::Integer(42));
config.set("float_key", config::ConfigValue::Float(3.14));
config.set("bool_key", config::ConfigValue::Boolean(true));
assert_eq!(config.get_string("string_key", "default"), "test_value");
assert_eq!(config.get_i64("int_key", 0), 42);
assert_eq!(config.get_f64("float_key", 0.0), 3.14);
assert!(config.get_bool("bool_key", false));
assert_eq!(config.get_string("missing_key", "default"), "default");
assert_eq!(config.get_i64("missing_key", 123), 123);
assert!(config.contains_key("string_key"));
assert!(!config.contains_key("missing_key"));
let keys = config.keys();
assert!(keys.len() >= 4);
config.clear();
assert_eq!(config.keys().len(), 0);
}
#[test]
fn test_config_value_conversions() {
let string_val = config::ConfigValue::String("test".to_string());
let int_val = config::ConfigValue::Integer(42);
let float_val = config::ConfigValue::Float(3.14);
let bool_val = config::ConfigValue::Boolean(true);
assert_eq!(string_val.as_string(), Some("test"));
assert_eq!(int_val.as_i64(), Some(42));
assert_eq!(float_val.as_f64(), Some(3.14));
assert_eq!(bool_val.as_bool(), Some(true));
assert_eq!(string_val.as_i64(), None);
assert_eq!(int_val.as_string(), None);
assert_eq!(int_val.as_f64(), Some(42.0));
}
#[test]
fn test_config_builder() {
let config = config::ConfigBuilder::new()
.env_prefix("TEST_")
.set_string("app_name", "torsh-data")
.set_i64("version", 1)
.set_f64("threshold", 0.5)
.set_bool("debug", true)
.build();
assert_eq!(config.get_string("app_name", ""), "torsh-data");
assert_eq!(config.get_i64("version", 0), 1);
assert_eq!(config.get_f64("threshold", 0.0), 0.5);
assert!(config.get_bool("debug", false));
}
#[test]
fn test_config_merge() {
let mut config1 = config::ConfigManager::new();
config1.set("key1", config::ConfigValue::String("value1".to_string()));
config1.set("key2", config::ConfigValue::Integer(42));
let mut config2 = config::ConfigManager::new();
config2.set("key2", config::ConfigValue::Integer(100)); config2.set("key3", config::ConfigValue::Boolean(true));
config1.merge(&config2);
assert_eq!(config1.get_string("key1", ""), "value1");
assert_eq!(config1.get_i64("key2", 0), 100); assert!(config1.get_bool("key3", false));
}
}