pub mod config;
pub mod engine;
pub mod enhanced_ml_predictor;
pub mod feature_extraction;
pub mod ml_predictor;
pub mod optimization;
pub mod pattern_analysis;
pub mod quality_assessment;
pub mod results;
pub use config::*;
pub use engine::*;
pub use enhanced_ml_predictor::{EnhancedMLPredictor, UncertaintyEstimate};
pub use ml_predictor::*;
pub use optimization::*;
pub use pattern_analysis::*;
pub use quality_assessment::{
DegradationDetector, QualityAssessor, QualityMeasurement,
QualityMetrics as QualityAssessmentMetrics, QualityStatistics,
};
pub use results::*;
#[cfg(test)]
mod tests {
use super::*;
use crate::TorshResult;
use std::time::Instant;
use torsh_tensor::creation::tensor_1d;
#[test]
fn test_adaptive_config_default() {
let config = AdaptiveQuantConfig::default();
assert!(config.enable_ml_prediction);
assert!(config.enable_quality_assessment);
assert!(config.enable_pattern_recognition);
assert_eq!(config.update_frequency, 100);
assert_eq!(config.quality_tolerance, 0.02);
assert_eq!(config.performance_weight, 0.3);
assert_eq!(config.energy_weight, 0.3);
assert_eq!(config.accuracy_weight, 0.4);
assert_eq!(config.max_adaptation_rate, 0.1);
}
#[test]
fn test_quantization_parameters_default() {
let params = QuantizationParameters::default();
assert_eq!(params.scale, 1.0);
assert_eq!(params.zero_point, 0);
assert_eq!(params.bit_width, 8);
assert_eq!(params.scheme, "symmetric");
}
#[test]
fn test_ml_parameter_predictor() -> TorshResult<()> {
let predictor = MLParameterPredictor::new();
let features = vec![
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6,
];
let params = predictor.predict_parameters(&features)?;
assert!(params.scale > 0.0);
assert!(params.bit_width >= 4 && params.bit_width <= 16);
assert!(params.zero_point >= -128 && params.zero_point <= 127);
Ok(())
}
#[test]
fn test_predictor_network() -> TorshResult<()> {
let mut network = PredictorNetwork::new(4, 2, 0.01);
let input = vec![0.1, 0.2, 0.3, 0.4];
let target = vec![0.5, 0.6];
let prediction = network.predict(&input)?;
assert_eq!(prediction.len(), 2);
let loss = network.train_step(&input, &target)?;
assert!(loss >= 0.0);
Ok(())
}
#[test]
fn test_feature_extraction() -> TorshResult<()> {
let extractor = FeatureExtractor::new();
let tensor = tensor_1d(&[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]).unwrap();
let features = extractor.extract_features(&tensor)?;
assert_eq!(features.len(), 16);
let data = vec![0.1, 0.2, 0.3, 0.4, 0.5];
let quick_features = extractor.extract_quick_features(&data);
assert_eq!(quick_features.len(), 4);
Ok(())
}
#[test]
fn test_quality_assessment() -> TorshResult<()> {
let mut assessor = QualityAssessor::new();
let original = tensor_1d(&[0.1, 0.2, 0.3, 0.4, 0.5]).unwrap();
let quantized = tensor_1d(&[0.1, 0.2, 0.3, 0.4, 0.5]).unwrap();
let params = QuantizationParameters::default();
let quality = assessor.assess_quality(&original, &quantized, ¶ms)?;
assert!(quality.perceptual_score > 0.9);
assert!(quality.ssim > 0.9);
let degradation = assessor.detect_degradation();
assert!(!degradation);
Ok(())
}
#[test]
fn test_pattern_analysis() -> TorshResult<()> {
let mut analyzer = WorkloadPatternAnalyzer::new();
let features = vec![
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6,
];
let pattern = analyzer.analyze_pattern(&features)?;
assert!(pattern.is_some());
let stats = analyzer.get_pattern_statistics();
assert!(stats.total_patterns > 0);
Ok(())
}
#[test]
fn test_multi_objective_optimization() -> TorshResult<()> {
let mut optimizer = MultiObjectiveOptimizer::new();
let initial_params = QuantizationParameters::default();
let config = AdaptiveQuantConfig::default();
let optimized = optimizer.optimize_parameters(&initial_params, &None, &config)?;
assert!(optimized.scale > 0.0);
assert!(optimized.bit_width >= 4 && optimized.bit_width <= 16);
Ok(())
}
#[test]
fn test_adaptive_quantization_engine() -> TorshResult<()> {
let mut engine = AdaptiveQuantizationEngine::new(AdaptiveQuantConfig::default());
let tensor = tensor_1d(&[0.1, 0.2, 0.3, 0.4, 0.5]).unwrap();
let result = engine.adaptive_quantize(&tensor)?;
assert!(result.parameters.scale > 0.0);
assert!(result.quality_metrics.perceptual_score >= 0.0);
assert_eq!(
result.quantized_tensor.shape().dims(),
tensor.shape().dims()
);
Ok(())
}
#[test]
fn test_runtime_statistics() {
let mut stats = RuntimeStatistics::default();
assert_eq!(stats.total_operations, 0);
assert_eq!(stats.adaptation_events, 0);
assert_eq!(stats.avg_quality, 1.0);
stats.update_avg_quality(0.95);
assert_eq!(stats.avg_quality, 0.95);
stats.add_performance_improvement(0.1);
assert_eq!(stats.avg_performance_improvement(), 0.1);
stats.add_energy_savings(0.2);
assert_eq!(stats.avg_energy_savings(), 0.2);
}
#[test]
fn test_optimization_recommendations() {
let engine = AdaptiveQuantizationEngine::new(AdaptiveQuantConfig::default());
let recommendations = engine.get_optimization_recommendations();
assert!(!recommendations.is_empty());
for rec in &recommendations {
assert!(!rec.category.is_empty());
assert!(!rec.suggestion.is_empty());
assert!(rec.expected_improvement >= 0.0);
}
}
#[test]
fn test_training_example() {
let example = TrainingExample {
features: vec![0.1, 0.2, 0.3],
target: vec![1.0, 0.0, 8.0],
quality_score: 0.95,
timestamp: Instant::now(),
};
assert_eq!(example.features.len(), 3);
assert_eq!(example.target.len(), 3);
assert!(example.quality_score > 0.9);
}
#[test]
fn test_ml_predictor_training() -> TorshResult<()> {
let mut predictor = MLParameterPredictor::new();
let examples = vec![
TrainingExample {
features: vec![0.1; 16],
target: vec![1.0, 0.0, 8.0],
quality_score: 0.9,
timestamp: Instant::now(),
},
TrainingExample {
features: vec![0.2; 16],
target: vec![0.5, 10.0, 12.0],
quality_score: 0.8,
timestamp: Instant::now(),
},
];
let results = predictor.train(&examples)?;
assert_eq!(results.examples_processed, 2);
assert!(results.average_loss >= 0.0);
Ok(())
}
#[test]
fn test_quality_statistics() -> TorshResult<()> {
let mut assessor = QualityAssessor::new();
let original = tensor_1d(&[0.1, 0.2, 0.3, 0.4, 0.5]).unwrap();
let quantized = tensor_1d(&[0.1, 0.2, 0.3, 0.4, 0.5]).unwrap();
let params = QuantizationParameters::default();
for _ in 0..5 {
assessor.assess_quality(&original, &quantized, ¶ms)?;
}
let stats = assessor.get_quality_statistics();
assert_eq!(stats.sample_count, 5);
assert!(stats.avg_perceptual_score > 0.0);
assert!(stats.avg_ssim > 0.0);
Ok(())
}
#[test]
fn test_pattern_learning() {
let mut analyzer = WorkloadPatternAnalyzer::new();
let features = vec![0.8; 16]; let performance = PerformanceProfile {
avg_execution_time: 10.0,
memory_usage: 500.0,
energy_consumption: 30.0,
cache_efficiency: 0.6,
};
analyzer.learn_pattern("custom_pattern".to_string(), features, performance);
let pattern = analyzer.get_pattern("custom_pattern");
assert!(pattern.is_some());
assert_eq!(pattern.unwrap().name, "custom_pattern");
}
#[test]
fn test_constraint_handling() {
let constraints = ConstraintHandler::default();
assert!(!constraints
.hardware_constraints
.supported_bit_widths
.is_empty());
assert!(constraints.hardware_constraints.max_memory_bandwidth > 0.0);
assert!(constraints.quality_constraints.min_snr > 0.0);
assert!(constraints.quality_constraints.max_mse > 0.0);
assert!(constraints.performance_constraints.max_latency > 0.0);
assert!(constraints.performance_constraints.min_throughput > 0.0);
}
#[test]
fn test_report_generation() -> TorshResult<()> {
let mut engine = AdaptiveQuantizationEngine::new(AdaptiveQuantConfig::default());
let tensor = tensor_1d(&[0.1, 0.2, 0.3, 0.4, 0.5]).unwrap();
let result = engine.adaptive_quantize(&tensor)?;
let report = result.generate_report();
assert!(report.contains("Adaptive Quantization Report"));
assert!(report.contains("Quantization Parameters"));
assert!(report.contains("Quality Metrics"));
let json_report = result.generate_json_report();
assert!(json_report.contains("adaptive_quantization_report"));
assert!(json_report.contains("parameters"));
assert!(json_report.contains("quality_metrics"));
let csv_line = result.generate_csv_line();
assert!(!csv_line.is_empty());
let csv_header = AdaptiveQuantizationResult::csv_header();
assert!(csv_header.contains("scale"));
assert!(csv_header.contains("quality"));
Ok(())
}
#[test]
fn test_modular_structure_integrity() {
let config = AdaptiveQuantConfig::default();
assert!(config.enable_ml_prediction);
let _predictor = MLParameterPredictor::new();
let extractor = FeatureExtractor::new();
assert_eq!(extractor.get_feature_dimension(), 16);
let _assessor = QualityAssessor::new();
let analyzer = WorkloadPatternAnalyzer::new();
assert!(analyzer.get_all_patterns().len() > 0);
let _optimizer = MultiObjectiveOptimizer::new();
let engine = AdaptiveQuantizationEngine::new(config);
assert!(engine.get_runtime_stats().total_operations == 0);
println!("Phase 83 modular structure integrity verified");
}
#[test]
fn test_comprehensive_adaptive_quantization_workflow() -> TorshResult<()> {
let mut engine = AdaptiveQuantizationEngine::new(AdaptiveQuantConfig::default());
let test_cases = vec![
tensor_1d(&[0.1, 0.2, 0.3, 0.4, 0.5]).unwrap(),
tensor_1d(&[1.0, 2.0, 3.0, 4.0, 5.0]).unwrap(),
tensor_1d(&[0.01, 0.02, 0.03, 0.04, 0.05]).unwrap(),
];
for (i, tensor) in test_cases.iter().enumerate() {
let result = engine.adaptive_quantize(tensor)?;
assert!(result.parameters.scale > 0.0);
assert!(result.parameters.bit_width >= 4 && result.parameters.bit_width <= 16);
assert_eq!(
result.quantized_tensor.shape().dims(),
tensor.shape().dims()
);
let stats = engine.get_runtime_stats();
assert_eq!(stats.total_operations, i + 1);
println!(
"Test case {}: Scale={:.4}, Bit-width={}, Pattern={:?}",
i + 1,
result.parameters.scale,
result.parameters.bit_width,
result.pattern_info
);
}
let training_examples = vec![TrainingExample {
features: vec![0.5; 16],
target: vec![1.0, 0.0, 8.0],
quality_score: 0.95,
timestamp: Instant::now(),
}];
let training_results = engine.train_predictor(&training_examples)?;
assert!(training_results.examples_processed > 0);
let recommendations = engine.get_optimization_recommendations();
assert!(!recommendations.is_empty());
for rec in recommendations {
println!(
"💡 {}: {} (Priority: {:?})",
rec.category, rec.suggestion, rec.priority
);
}
Ok(())
}
#[test]
fn test_edge_cases_and_error_handling() -> TorshResult<()> {
let empty_tensor = tensor_1d(&[]).unwrap_or_else(|_| tensor_1d(&[0.0]).unwrap());
let mut engine = AdaptiveQuantizationEngine::new(AdaptiveQuantConfig::default());
let _result = engine.adaptive_quantize(&empty_tensor)?;
let extractor = FeatureExtractor::new();
let small_tensor = tensor_1d(&[0.1]).unwrap();
let features = extractor.extract_features(&small_tensor)?;
assert_eq!(features.len(), 16);
let mut assessor = QualityAssessor::new();
let tensor = tensor_1d(&[0.5; 10]).unwrap();
let quality =
assessor.assess_quality(&tensor, &tensor, &QuantizationParameters::default())?;
assert!(quality.perceptual_score > 0.99);
Ok(())
}
}