#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod sprint30_integration_tests {
use crate::tdg::{
AdaptiveThresholdFactory, OperationPriority, ResourceControllerFactory, SchedulerFactory,
TdgAnalyzer, TdgConfig, TieredStorageFactory,
};
use std::path::Path;
use tokio::time::{sleep, Duration};
#[tokio::test]
async fn test_sprint30_complete_integration() {
let config = TdgConfig::default();
let analyzer = TdgAnalyzer::with_full_resource_management(config)
.await
.expect("Failed to create analyzer with full resource management");
let test_file = Path::new("server/src/tdg/resource_control.rs");
let commit_score = analyzer
.analyze_file_commit(test_file)
.await
.expect("Failed to analyze file with commit priority");
println!(
"Commit analysis completed - Score: {:.1}",
commit_score.total
);
let background_score = analyzer
.analyze_file_background(test_file)
.await
.expect("Failed to analyze file with background priority");
println!(
"Background analysis completed - Score: {:.1}",
background_score.total
);
if let Some(scheduler_stats) = analyzer.get_scheduler_stats().await {
println!("Scheduler Stats: {}", scheduler_stats.format_diagnostic());
}
if let Some(adaptive_stats) = analyzer.get_adaptive_stats().await {
println!("Adaptive Stats: {}", adaptive_stats.format_diagnostic());
}
if let Some(resource_stats) = analyzer.get_resource_stats().await {
println!("Resource Stats: {}", resource_stats.format_diagnostic());
}
if let Some(resource_usage) = analyzer.get_resource_usage().await {
println!(
"Current Resource Usage: {:.1}MB memory, {:.1}% CPU",
resource_usage.memory_mb,
resource_usage.cpu_utilization * 100.0
);
}
assert!(
commit_score.total > 0.0,
"Commit analysis should produce valid score"
);
assert!(
background_score.total > 0.0,
"Background analysis should produce valid score"
);
assert!(
commit_score.grade != crate::tdg::Grade::F,
"Analysis should not fail"
);
assert!(
background_score.grade != crate::tdg::Grade::F,
"Analysis should not fail"
);
println!("✅ Sprint 30 Week 5 Complete: All transactional hashed TDG components integrated successfully!");
}
#[tokio::test]
async fn test_resource_enforcement_under_pressure() {
let resource_controller = ResourceControllerFactory::create_dev_optimized(); resource_controller.start_monitoring().await.unwrap();
let mut allocations = Vec::new();
for i in 0..5 {
let result = resource_controller
.request_resources(
format!("test-op-{}", i),
crate::tdg::resource_control::OperationType::Analysis,
OperationPriority::Medium,
100.0, )
.await;
match result {
Ok(allocation) => {
println!("Operation {} allowed", i);
allocations.push(allocation);
}
Err(e) => {
println!("Operation {} rejected: {}", i, e);
break; }
}
}
let stats = resource_controller.get_enforcement_stats().await;
println!("Final enforcement stats: {}", stats.format_diagnostic());
resource_controller.stop_monitoring().await;
assert!(
stats.total_requests > 0,
"Should have processed some requests"
);
println!("✅ Resource enforcement working correctly under pressure");
}
#[tokio::test]
async fn test_adaptive_performance_optimization() {
let adaptive_manager = AdaptiveThresholdFactory::create_dev_optimized();
for i in 0..15 {
let sample = adaptive_manager
.create_sample(
Duration::from_millis(150 + i * 10), false, 5, )
.await;
adaptive_manager
.record_sample(sample)
.await
.expect("Failed to record performance sample");
}
let stats = adaptive_manager.get_performance_stats().await;
println!(
"Performance stats after slow samples: {}",
stats.format_diagnostic()
);
let thresholds = adaptive_manager.get_current_thresholds().await;
println!(
"Current thresholds - Cache: {}, Permits: {}/{}, Compression: {}",
thresholds.hot_cache_size,
thresholds.high_priority_permits,
thresholds.low_priority_permits,
thresholds.compression_level
);
assert!(
stats.total_samples > 10,
"Should have recorded multiple samples"
);
println!("✅ Adaptive performance optimization working correctly");
}
#[tokio::test]
async fn test_tiered_storage_with_compression() {
let storage =
TieredStorageFactory::create_default().expect("Failed to create tiered storage");
use crate::tdg::{
AnalysisMetadata, ComponentScores, FileIdentity, FullTdgRecord, Grade, Language,
SemanticSignature, TdgScore,
};
use blake3;
use std::time::SystemTime;
let test_content = "fn main() { println!(\"Hello, world!\"); }";
let content_hash = blake3::hash(test_content.as_bytes());
let record = FullTdgRecord {
identity: FileIdentity {
path: Path::new("test.rs").to_path_buf(),
content_hash,
size_bytes: test_content.len() as u64,
modified_time: SystemTime::now(),
},
score: TdgScore {
total: 85.5,
grade: Grade::AMinus,
language: Language::Rust,
confidence: 0.95,
file_path: Some(Path::new("test.rs").to_path_buf()),
..Default::default()
},
components: ComponentScores {
complexity_breakdown: std::collections::HashMap::new(),
duplication_sources: Vec::new(),
coupling_dependencies: Vec::new(),
doc_missing_items: Vec::new(),
consistency_violations: Vec::new(),
},
semantic_sig: SemanticSignature {
ast_structure_hash: u64::from_le_bytes(
content_hash.as_bytes()[0..8].try_into().unwrap(),
),
identifier_pattern: "main".to_string(),
control_flow_pattern: "sequential".to_string(),
import_dependencies: Vec::new(),
},
metadata: AnalysisMetadata {
analyzer_version: "2.37.3".to_string(),
analysis_duration_ms: 50,
language_confidence: 0.95,
analysis_timestamp: SystemTime::now(),
cache_hit: false,
},
};
storage
.store(record)
.await
.expect("Failed to store record in tiered storage");
if let Some(hot_entry) = storage.get_hot(&content_hash) {
println!(
"Retrieved from hot cache: score = {:.1}",
hot_entry.total_score
);
assert!(
(hot_entry.total_score - 85.5).abs() < 0.1,
"Score should match stored value"
);
} else {
panic!("Record should be in hot cache after storage");
}
let stats = storage.get_statistics();
println!("Storage stats: {}", stats.format_diagnostic());
assert!(
stats.total_entries > 0,
"Should have stored at least one record"
);
println!("✅ Tiered storage with compression working correctly");
}
#[tokio::test]
async fn test_fair_scheduling_preemption() {
let scheduler = SchedulerFactory::create_balanced();
let background_guard = scheduler
.schedule_background(Path::new("background_file.rs").to_path_buf())
.await
.expect("Failed to schedule background operation");
sleep(Duration::from_millis(10)).await;
let commit_guard = scheduler
.schedule_commit(Path::new("commit_file.rs").to_path_buf())
.await
.expect("Failed to schedule commit operation");
let stats = scheduler.get_statistics().await;
println!("Scheduler stats: {}", stats.format_diagnostic());
drop(commit_guard);
drop(background_guard);
println!("✅ Fair scheduling with preemption working correctly");
}
}