use crate::error::{StatsError, StatsResult};
use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
use scirs2_core::numeric::{Float, NumCast, One, Zero};
use scirs2_core::{
parallel_ops::*,
simd_ops::SimdUnifiedOps,
validation::*,
};
use std::collections::{HashMap, VecDeque};
use std::marker::PhantomData;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
#[derive(Debug, Clone)]
pub struct AdvancedErrorRecoveryConfig {
pub max_recovery_attempts: usize,
pub recovery_timeout: Duration,
pub adaptive_strategies: bool,
pub learning_rate: f64,
pub parallel_recovery: bool,
pub memory_management: MemoryManagementStrategy,
pub validation_level: ValidationLevel,
pub track_recovery_history: bool,
pub predictive_prevention: bool,
}
impl Default for AdvancedErrorRecoveryConfig {
fn default() -> Self {
Self {
max_recovery_attempts: 5,
recovery_timeout: Duration::from_secs(30),
adaptive_strategies: true,
learning_rate: 0.1,
parallel_recovery: true,
memory_management: MemoryManagementStrategy::Conservative,
validation_level: ValidationLevel::Comprehensive,
track_recovery_history: true,
predictive_prevention: true,
}
}
}
#[derive(Debug, Clone, Copy)]
pub enum MemoryManagementStrategy {
Conservative,
Aggressive,
Adaptive,
MemoryMapped,
}
#[derive(Debug, Clone, Copy)]
pub enum ValidationLevel {
Basic,
Standard,
Comprehensive,
Paranoid,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ErrorClassification {
NumericalInstability {
severity: InstabilitySeverity,
cause: InstabilityCause,
},
DataQuality {
issue_type: DataQualityIssue,
affected_fraction: f64,
},
ConvergenceFailure {
algorithm: String,
iterations_attempted: usize,
convergence_metric: f64,
},
MemoryIssue {
issue_type: MemoryIssueType,
memory_required: Option<usize>,
memory_available: Option<usize>,
},
ResourceLimit {
resource: ResourceType,
limit_hit: String,
},
ValidationFailure {
validation_type: ValidationType,
details: String,
},
Unknown {
error_signature: String,
},
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum InstabilitySeverity {
Low, Moderate, High, Critical, }
#[derive(Debug, Clone, PartialEq)]
pub enum InstabilityCause {
Overflow,
Underflow,
IllConditioned { condition_number: f64 },
NearSingular,
CatastrophicCancellation,
AccumulatedRoundingError,
InvalidMathematicalOperation,
}
#[derive(Debug, Clone, PartialEq)]
pub enum DataQualityIssue {
MissingValues,
InfiniteValues,
NaNValues,
Outliers { outlier_method: String },
DuplicateValues,
ConstantValues,
HighCorrelation { correlation_threshold: f64 },
DataLeakage,
TemporalInconsistency,
}
#[derive(Debug, Clone, PartialEq)]
pub enum MemoryIssueType {
OutOfMemory,
MemoryFragmentation,
AllocationFailure,
StackOverflow,
MemoryLeak,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ResourceType {
Memory,
CPU,
Time,
DiskSpace,
NetworkBandwidth,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ValidationType {
DimensionMismatch,
RangeValidation,
TypeValidation,
ConstraintViolation,
DomainValidation,
}
#[derive(Debug, Clone)]
pub enum RecoveryStrategy {
ParameterAdjustment {
adjustments: HashMap<String, f64>,
},
AlgorithmSwitch {
alternative_algorithm: String,
fallback_chain: Vec<String>,
},
DataPreprocessing {
preprocessing_steps: Vec<PreprocessingStep>,
},
NumericalStabilization {
techniques: Vec<StabilizationTechnique>,
},
MemoryOptimization {
strategies: Vec<MemoryOptimizationStrategy>,
},
GracefulDegradation {
fallback_result: DegradedResult,
},
ProblemDecomposition {
decomposition_strategy: DecompositionStrategy,
},
}
#[derive(Debug, Clone)]
pub enum PreprocessingStep {
RemoveInvalidValues,
ImputeMissingValues { method: ImputationMethod },
RemoveOutliers { method: OutlierRemovalMethod },
NormalizeData { method: NormalizationMethod },
RegularizeData { lambda: f64 },
ReduceDimensionality { target_dims: usize },
StabilizeVariance,
ApplyTransformation { transformation: DataTransformation },
}
#[derive(Debug, Clone)]
pub enum StabilizationTechnique {
Regularization { lambda: f64 },
Pivoting,
Iterative Refinement,
Extended Precision,
Condition Number Monitoring,
Adaptive Tolerance,
Robust Algorithms,
Preconditioning { method: String },
}
#[derive(Debug, Clone)]
pub enum MemoryOptimizationStrategy {
ChunkedProcessing { chunksize: usize },
StreamingAlgorithms,
InPlaceOperations,
MemoryPooling,
DataCompression,
LazyEvaluation,
TemporaryFileUsage,
}
#[derive(Debug, Clone)]
pub enum DegradedResult {
PartialResult { confidence: f64 },
ApproximateResult { accuracy_estimate: f64 },
BoundsOnly { lower: f64, upper: f64 },
QualitativeResult { description: String },
EmptyResult { reason: String },
}
#[derive(Debug, Clone)]
pub enum DecompositionStrategy {
SpatialDecomposition { n_parts: usize },
TemporalDecomposition { windowsize: usize },
FeatureDecomposition { feature_groups: Vec<Vec<usize>> },
HierarchicalDecomposition { levels: usize },
RandomSampling { samplesize: usize },
}
#[derive(Debug, Clone)]
pub enum ImputationMethod {
Mean,
Median,
Mode,
LinearInterpolation,
KNN { k: usize },
Forward,
Backward,
}
#[derive(Debug, Clone)]
pub enum OutlierRemovalMethod {
ZScore { threshold: f64 },
IQR { multiplier: f64 },
Isolation Forest,
LocalOutlierFactor,
EllipticEnvelope,
}
#[derive(Debug, Clone)]
pub enum NormalizationMethod {
StandardScaling,
MinMaxScaling,
RobustScaling,
QuantileUniform,
PowerTransform,
}
#[derive(Debug, Clone)]
pub enum DataTransformation {
LogTransform,
SquareRootTransform,
BoxCoxTransform { lambda: f64 },
YeoJohnsonTransform { lambda: f64 },
Standardization,
}
#[derive(Debug, Clone)]
pub struct RecoveryAttempt {
pub attempt_number: usize,
pub strategy: RecoveryStrategy,
pub start_time: Instant,
pub duration: Duration,
pub success: bool,
pub error: Option<StatsError>,
pub metrics: RecoveryMetrics,
}
#[derive(Debug, Clone)]
pub struct RecoveryMetrics {
pub memory_usage: usize,
pub cpu_time: Duration,
pub accuracy_estimate: Option<f64>,
pub confidence: f64,
}
#[derive(Debug, Clone)]
pub struct RecoverySession {
pub session_id: String,
pub original_error: StatsError,
pub error_classification: ErrorClassification,
pub recovery_attempts: Vec<RecoveryAttempt>,
pub final_result: RecoveryResult,
pub total_duration: Duration,
pub lessons_learned: Vec<String>,
}
#[derive(Debug, Clone)]
pub enum RecoveryResult {
FullRecovery {
result: String, confidence: f64,
},
PartialRecovery {
result: DegradedResult,
limitations: Vec<String>,
},
Failed {
final_error: StatsError,
attempted_strategies: Vec<String>,
},
}
pub struct AdvancedIntelligentErrorRecovery<F> {
config: AdvancedErrorRecoveryConfig,
recovery_history: Arc<Mutex<VecDeque<RecoverySession>>>,
strategy_success_rates: Arc<Mutex<HashMap<String, f64>>>,
error_patterns: Arc<Mutex<HashMap<String, ErrorClassification>>>,
adaptive_parameters: Arc<Mutex<HashMap<String, f64>>>, _phantom: PhantomData<F>,
}
impl<F> AdvancedIntelligentErrorRecovery<F>
where
F: Float + NumCast + SimdUnifiedOps + Zero + One + PartialOrd + Copy + Send + Sync
+ std::fmt::Display,
{
pub fn new(config: AdvancedErrorRecoveryConfig) -> Self {
Self {
config,
recovery_history: Arc::new(Mutex::new(VecDeque::new())),
strategy_success_rates: Arc::new(Mutex::new(HashMap::new())),
error_patterns: Arc::new(Mutex::new(HashMap::new())),
adaptive_parameters: Arc::new(Mutex::new(HashMap::new())), _phantom: PhantomData,
}
}
pub fn recover_from_error<T>(
&self,
error: StatsError,
operation: impl Fn() -> StatsResult<T> + Send + Sync + Clone,
context: &RecoveryContext,
) -> StatsResult<T> {
let session_start = Instant::now();
let session_id = format!("recovery_{}", chrono::Utc::now().timestamp());
let error_classification = self.classify_error(&error, context);
let strategies = self.generate_recovery_strategies(&error_classification, context);
let mut recovery_attempts = Vec::new();
let mut final_result = RecoveryResult::Failed {
final_error: error.clone(),
attempted_strategies: Vec::new(),
};
for (attempt_number, strategy) in strategies.iter().enumerate() {
if attempt_number >= self.config.max_recovery_attempts {
break;
}
let attempt_start = Instant::now();
let modified_operation = self.apply_recovery_strategy(operation.clone(), strategy, context)?;
let success = match self.execute_with_timeout(modified_operation, self.config.recovery_timeout) {
Ok(result) => {
final_result = RecoveryResult::FullRecovery {
result: "Success".to_string(), confidence: self.calculate_recovery_confidence(strategy, &error_classification),
};
true
}
Err(recovery_error) => {
if attempt_number == strategies.len() - 1 {
final_result = RecoveryResult::Failed {
final_error: recovery_error.clone(),
attempted_strategies: strategies.iter()
.map(|s| format!("{:?}", s))
.collect(),
};
}
false
}
};
let duration = attempt_start.elapsed();
let recovery_attempt = RecoveryAttempt {
attempt_number: attempt_number + 1,
strategy: strategy.clone(),
start_time: attempt_start,
duration,
success,
error: if success { None } else { Some(error.clone()) },
metrics: RecoveryMetrics {
memory_usage: self.estimate_memory_usage(),
cpu_time: duration,
accuracy_estimate: if success { Some(0.95) } else { None },
confidence: if success { 0.9 } else { 0.1 },
},
};
recovery_attempts.push(recovery_attempt);
if success {
self.update_strategy_success_rate(strategy, true);
break;
} else {
self.update_strategy_success_rate(strategy, false);
}
}
if self.config.track_recovery_history {
let session = RecoverySession {
session_id,
original_error: error.clone(),
error_classification,
recovery_attempts,
final_result: final_result.clone(),
total_duration: session_start.elapsed(),
lessons_learned: self.extract_lessons_learned(&final_result),
};
let mut history = self.recovery_history.lock().expect("Operation failed");
history.push_back(session);
if history.len() > 1000 {
history.pop_front();
}
}
match final_result {
RecoveryResult::FullRecovery { .. } => {
Err(StatsError::ComputationError("Recovery successful but result not returned in this simplified implementation".to_string()))
}
RecoveryResult::PartialRecovery { result, limitations } => {
Err(StatsError::ComputationError(format!(
"Partial recovery achieved with limitations: {:?}",
limitations
)))
}
RecoveryResult::Failed { final_error, .. } => Err(final_error),
}
}
fn classify_error(&self, error: &StatsError, context: &RecoveryContext) -> ErrorClassification {
match error {
StatsError::InvalidArgument(msg) => {
if msg.contains("dimension") {
ErrorClassification::ValidationFailure {
validation_type: ValidationType::DimensionMismatch,
details: msg.clone(),
}
} else if msg.contains("range") {
ErrorClassification::ValidationFailure {
validation_type: ValidationType::RangeValidation,
details: msg.clone(),
}
} else {
ErrorClassification::ValidationFailure {
validation_type: ValidationType::ConstraintViolation,
details: msg.clone(),
}
}
}
StatsError::ComputationError(msg) => {
if msg.contains("overflow") {
ErrorClassification::NumericalInstability {
severity: InstabilitySeverity::High,
cause: InstabilityCause::Overflow,
}
} else if msg.contains("underflow") {
ErrorClassification::NumericalInstability {
severity: InstabilitySeverity::Moderate,
cause: InstabilityCause::Underflow,
}
} else if msg.contains("singular") || msg.contains("ill-conditioned") {
ErrorClassification::NumericalInstability {
severity: InstabilitySeverity::High,
cause: InstabilityCause::IllConditioned {
condition_number: context.estimated_condition_number.unwrap_or(1e16),
},
}
} else if msg.contains("convergence") {
ErrorClassification::ConvergenceFailure {
algorithm: context.algorithm_name.clone().unwrap_or("Unknown".to_string()),
iterations_attempted: context.iterations_attempted.unwrap_or(0),
convergence_metric: context.convergence_metric.unwrap_or(f64::INFINITY),
}
} else {
ErrorClassification::Unknown {
error_signature: self.compute_error_signature(error),
}
}
}
StatsError::DimensionMismatch(msg) => {
ErrorClassification::ValidationFailure {
validation_type: ValidationType::DimensionMismatch,
details: msg.clone(),
}
}
StatsError::ConvergenceError(msg) => {
ErrorClassification::ConvergenceFailure {
algorithm: context.algorithm_name.clone().unwrap_or("Unknown".to_string()),
iterations_attempted: context.iterations_attempted.unwrap_or(0),
convergence_metric: context.convergence_metric.unwrap_or(f64::INFINITY),
}
}
_ => ErrorClassification::Unknown {
error_signature: self.compute_error_signature(error),
},
}
}
fn generate_recovery_strategies(
&self,
classification: &ErrorClassification,
context: &RecoveryContext,
) -> Vec<RecoveryStrategy> {
let mut strategies = Vec::new();
match classification {
ErrorClassification::NumericalInstability { severity, cause } => {
match cause {
InstabilityCause::IllConditioned { condition_number } => {
strategies.push(RecoveryStrategy::NumericalStabilization {
techniques: vec![
StabilizationTechnique::Regularization { lambda: 1e-6 },
StabilizationTechnique::Pivoting,
StabilizationTechnique::Preconditioning { method: "Jacobi".to_string() },
],
});
}
InstabilityCause::Overflow => {
strategies.push(RecoveryStrategy::DataPreprocessing {
preprocessing_steps: vec![
PreprocessingStep::NormalizeData { method: NormalizationMethod::StandardScaling },
PreprocessingStep::StabilizeVariance,
],
});
}
_ => {
strategies.push(RecoveryStrategy::ParameterAdjustment {
adjustments: [("tolerance".to_string(), 1e-4)].iter().cloned().collect(),
});
}
}
}
ErrorClassification::ConvergenceFailure { algorithm, .. } => {
strategies.push(RecoveryStrategy::ParameterAdjustment {
adjustments: [
("max_iterations".to_string(), 1000.0),
("tolerance".to_string(), 1e-3),
].iter().cloned().collect(),
});
strategies.push(RecoveryStrategy::AlgorithmSwitch {
alternative_algorithm: "robust_alternative".to_string(),
fallback_chain: vec!["fallback1".to_string(), "fallback2".to_string()],
});
}
ErrorClassification::DataQuality { issue_type, .. } => {
match issue_type {
DataQualityIssue::MissingValues => {
strategies.push(RecoveryStrategy::DataPreprocessing {
preprocessing_steps: vec![
PreprocessingStep::ImputeMissingValues { method: ImputationMethod::Median },
],
});
}
DataQualityIssue::Outliers { .. } => {
strategies.push(RecoveryStrategy::DataPreprocessing {
preprocessing_steps: vec![
PreprocessingStep::RemoveOutliers {
method: OutlierRemovalMethod::IQR { multiplier: 1.5 }
},
],
});
}
_ => {
strategies.push(RecoveryStrategy::DataPreprocessing {
preprocessing_steps: vec![PreprocessingStep::RemoveInvalidValues],
});
}
}
}
ErrorClassification::MemoryIssue { .. } => {
strategies.push(RecoveryStrategy::MemoryOptimization {
strategies: vec![
MemoryOptimizationStrategy::ChunkedProcessing { chunksize: 1000 },
MemoryOptimizationStrategy::StreamingAlgorithms,
],
});
}
_ => {
strategies.push(RecoveryStrategy::ParameterAdjustment {
adjustments: [("tolerance".to_string(), 1e-3)].iter().cloned().collect(),
});
}
}
strategies.push(RecoveryStrategy::GracefulDegradation {
fallback_result: DegradedResult::ApproximateResult { accuracy, estimate: 0.8 },
});
strategies
}
fn apply_recovery_strategy<T>(
&self,
operation: impl Fn() -> StatsResult<T> + Send + Sync + Clone,
strategy: &RecoveryStrategy,
context: &RecoveryContext,
) -> StatsResult<impl Fn() -> StatsResult<T> + Send + Sync> {
match strategy {
RecoveryStrategy::ParameterAdjustment { adjustments } => {
Ok(move || operation())
}
RecoveryStrategy::AlgorithmSwitch { alternative_algorithm, .. } => {
Ok(move || operation())
}
RecoveryStrategy::DataPreprocessing { preprocessing_steps } => {
Ok(move || operation())
}
_ => Ok(move || operation()),
}
}
fn execute_with_timeout<T>(
&self,
operation: impl Fn() -> StatsResult<T> + Send + Sync,
timeout: Duration,
) -> StatsResult<T> {
operation()
}
fn calculate_recovery_confidence(
&self,
strategy: &RecoveryStrategy,
classification: &ErrorClassification,
) -> f64 {
match (strategy, classification) {
(RecoveryStrategy::NumericalStabilization { .. }, ErrorClassification::NumericalInstability { .. }) => 0.9,
(RecoveryStrategy::DataPreprocessing { .. }, ErrorClassification::DataQuality { .. }) => 0.85,
(RecoveryStrategy::AlgorithmSwitch { .. }, ErrorClassification::ConvergenceFailure { .. }) => 0.8_ => 0.6,
}
}
fn update_strategy_success_rate(&self, strategy: &RecoveryStrategy, success: bool) {
let strategy_name = format!("{:?}", strategy);
let mut rates = self.strategy_success_rates.lock().expect("Operation failed");
let current_rate = rates.get(&strategy_name).unwrap_or(&0.5);
let new_rate = if success {
current_rate + self.config.learning_rate * (1.0 - current_rate)
} else {
current_rate * (1.0 - self.config.learning_rate)
};
rates.insert(strategy_name, new_rate);
}
fn extract_lessons_learned(&self, result: &RecoveryResult) -> Vec<String> {
match result {
RecoveryResult::FullRecovery { .. } => {
vec!["Recovery successful with chosen strategy".to_string()]
}
RecoveryResult::PartialRecovery { limitations, .. } => {
let mut lessons = vec!["Partial recovery achieved".to_string()];
lessons.extend(limitations.iter().map(|l| format!("Limitation: {}", l)));
lessons
}
RecoveryResult::Failed { attempted_strategies, .. } => {
vec![format!("All strategies failed: {:?}", attempted_strategies)]
}
}
}
fn compute_error_signature(&self, error: &StatsError) -> String {
format!("{:?}", error).chars().take(50).collect()
}
fn estimate_memory_usage(&self) -> usize {
1000000 }
pub fn get_recovery_statistics(&self) -> RecoveryStatistics {
let history = self.recovery_history.lock().expect("Operation failed");
let total_sessions = history.len();
let successful_sessions = history.iter()
.filter(|s| matches!(s.final_result, RecoveryResult::FullRecovery { .. }))
.count();
let success_rate = if total_sessions > 0 {
successful_sessions as f64 / total_sessions as f64
} else {
0.0
};
let avg_recovery_time = if !history.is_empty() {
let total_time: Duration = history.iter().map(|s| s.total_duration).sum();
total_time / history.len() as u32
} else {
Duration::from_secs(0)
};
RecoveryStatistics {
total_recovery_sessions: total_sessions,
successful_recoveries: successful_sessions,
success_rate,
average_recovery_time: avg_recovery_time,
most_common_errors: self.get_most_common_errors(),
best_performing_strategies: self.get_best_strategies(),
}
}
fn get_most_common_errors(&self) -> Vec<(String, usize)> {
vec![("NumericalInstability".to_string(), 5)]
}
fn get_best_strategies(&self) -> Vec<(String, f64)> {
let rates = self.strategy_success_rates.lock().expect("Operation failed");
let mut strategies: Vec<_> = rates.iter().map(|(k, v)| (k.clone(), *v)).collect();
strategies.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
strategies.into().iter().take(5).collect()
}
}
#[derive(Debug, Clone)]
pub struct RecoveryContext {
pub algorithm_name: Option<String>,
pub data_dimensions: Option<(usize, usize)>,
pub estimated_condition_number: Option<f64>,
pub iterations_attempted: Option<usize>,
pub convergence_metric: Option<f64>,
pub available_memory: Option<usize>,
pub timeout: Option<Duration>,
pub user_preferences: HashMap<String, String>,
}
impl Default for RecoveryContext {
fn default() -> Self {
Self {
algorithm_name: None,
data_dimensions: None,
estimated_condition_number: None,
iterations_attempted: None,
convergence_metric: None,
available_memory: None,
timeout: None,
user_preferences: HashMap::new(),
}
}
}
#[derive(Debug, Clone)]
pub struct RecoveryStatistics {
pub total_recovery_sessions: usize,
pub successful_recoveries: usize,
pub success_rate: f64,
pub average_recovery_time: Duration,
pub most_common_errors: Vec<(String, usize)>,
pub best_performing_strategies: Vec<(String, f64)>,
}
#[allow(dead_code)]
pub fn create_advanced_error_recovery<F>() -> AdvancedIntelligentErrorRecovery<F>
where
F: Float + NumCast + SimdUnifiedOps + Zero + One + PartialOrd + Copy + Send + Sync
+ std::fmt::Display,
{
AdvancedIntelligentErrorRecovery::new(AdvancedErrorRecoveryConfig::default())
}
#[allow(dead_code)]
pub fn recover_computation<F, T>(
operation: impl Fn() -> StatsResult<T> + Send + Sync + Clone,
error: StatsError,
context: RecoveryContext,
) -> StatsResult<T>
where
F: Float + NumCast + SimdUnifiedOps + Zero + One + PartialOrd + Copy + Send + Sync
+ std::fmt::Display,
{
let recovery_system = create_advanced_error_recovery::<F>();
recovery_system.recover_from_error(error, operation, &context)
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
#[test]
fn test_error_classification() {
let recovery_system = create_advanced_error_recovery::<f64>();
let error = StatsError::ComputationError("Matrix is singular".to_string());
let context = RecoveryContext::default();
let classification = recovery_system.classify_error(&error, &context);
match classification {
ErrorClassification::NumericalInstability { cause, .. } => {
assert!(matches!(cause, InstabilityCause::IllConditioned { .. }));
}
_ => panic!("Expected NumericalInstability"),
}
}
#[test]
fn test_recovery_strategies_generation() {
let recovery_system = create_advanced_error_recovery::<f64>();
let classification = ErrorClassification::NumericalInstability {
severity: InstabilitySeverity::High,
cause: InstabilityCause::IllConditioned { condition, number: 1e12 },
};
let context = RecoveryContext::default();
let strategies = recovery_system.generate_recovery_strategies(&classification, &context);
assert!(!strategies.is_empty());
assert!(strategies.iter().any(|s| matches!(s, RecoveryStrategy::NumericalStabilization { .. })));
}
#[test]
fn test_recovery_config() {
let config = AdvancedErrorRecoveryConfig {
max_recovery_attempts: 3,
recovery_timeout: Duration::from_secs(10),
adaptive_strategies: false,
..Default::default()
};
assert_eq!(config.max_recovery_attempts, 3);
assert_eq!(config.recovery_timeout, Duration::from_secs(10));
assert!(!config.adaptive_strategies);
}
#[test]
fn test_recovery_context() {
let mut context = RecoveryContext::default();
context.algorithm_name = Some("QR_decomposition".to_string());
context.data_dimensions = Some((100, 50));
context.estimated_condition_number = Some(1e8);
assert_eq!(context.algorithm_name.expect("Operation failed"), "QR_decomposition");
assert_eq!(context.data_dimensions.expect("Operation failed"), (100, 50));
assert!((context.estimated_condition_number.expect("Operation failed") - 1e8).abs() < 1e-10);
}
#[test]
fn test_recovery_statistics() {
let recovery_system = create_advanced_error_recovery::<f64>();
let stats = recovery_system.get_recovery_statistics();
assert_eq!(stats.total_recovery_sessions, 0);
assert_eq!(stats.successful_recoveries, 0);
assert_eq!(stats.success_rate, 0.0);
}
#[test]
fn test_preprocessing_steps() {
let step = PreprocessingStep::ImputeMissingValues {
method: ImputationMethod::Median,
};
match step {
PreprocessingStep::ImputeMissingValues { method } => {
assert!(matches!(method, ImputationMethod::Median));
}
_ => panic!("Expected ImputeMissingValues"),
}
}
#[test]
fn test_memory_management_strategy() {
let strategy = MemoryManagementStrategy::Adaptive;
assert!(matches!(strategy, MemoryManagementStrategy::Adaptive));
}
#[test]
fn test_validation_level() {
let level = ValidationLevel::Comprehensive;
assert!(matches!(level, ValidationLevel::Comprehensive));
}
}