use crate::unified_quality::automation::ConservativeAutomator;
use crate::unified_quality::enforcement::ErrorBudgetEnforcer;
use crate::unified_quality::foundation::QualityMonitor;
use crate::unified_quality::intelligence::QualityAssistant;
pub struct ProductionSystem {
pub monitor: QualityMonitor,
pub assistant: QualityAssistant,
pub enforcer: ErrorBudgetEnforcer,
pub automator: ConservativeAutomator,
}
#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod tests {
use super::super::*;
use super::ProductionSystem;
use crate::unified_quality::automation::ConservativeAutomator;
use crate::unified_quality::enforcement::{EnforcerConfig, ErrorBudgetEnforcer};
use crate::unified_quality::foundation::{MonitorConfig, QualityMonitor};
use crate::unified_quality::github_actions::{GitHubActionsIntegration, GitHubConfig};
use crate::unified_quality::intelligence::QualityAssistant;
use crate::unified_quality::onboarding::{OnboardingConfig, TeamOnboarding};
use crate::unified_quality::performance::{PerformanceConfig, PerformanceMonitor};
use anyhow::Result;
use std::path::PathBuf;
use std::time::Duration;
use tempfile::TempDir;
use tokio::fs;
#[tokio::test]
async fn test_complete_system_integration() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let project_path = temp_dir.path().to_path_buf();
let test_file = project_path.join("test.rs");
fs::write(
&test_file,
r#"
fn simple_function() -> i32 {
42
}
// TODO: Refactor complex function for better maintainability
fn complex_function(x: i32) -> i32 {
if x > 0 {
if x > 10 {
if x > 100 {
x * 3
} else {
x * 2
}
} else {
x + 1
}
} else {
0
}
}
"#,
)
.await
.expect("Failed to write test file");
let mut production_system =
create_production_system().expect("Failed to create production system");
test_monitoring_phase(&mut production_system.monitor, &project_path).await;
test_intelligence_phase(&mut production_system.assistant, &test_file).await;
test_enforcement_phase(&mut production_system.enforcer).await;
test_automation_phase(&mut production_system.automator, &test_file).await;
test_integrated_workflow(&mut production_system, &project_path).await;
test_external_integrations(&project_path).await;
println!("✅ Complete system integration test passed!");
}
#[tokio::test]
#[ignore = "requires quality framework setup"]
async fn test_progressive_quality_adoption() {
let mut onboarding = create_onboarding_system();
let team_preferences = create_test_team_preferences();
let session = onboarding
.start_onboarding("test-team".to_string(), team_preferences)
.expect("Failed to start onboarding");
assert_eq!(session.quality_mode, QualityMode::Observe);
let team_id: crate::unified_quality::enforcement::TeamId = "test-team".to_string();
onboarding
.complete_tutorial(&team_id, "quality_philosophy".to_string(), 1)
.expect("Failed to complete tutorial");
let report = onboarding
.generate_progress_report(&team_id)
.expect("Failed to generate progress report");
assert!(report.overall_completion > 0.0);
assert!(!report.achievements.is_empty());
println!("✅ Progressive quality adoption test passed!");
}
#[tokio::test]
async fn test_performance_monitoring() {
let config = create_performance_config();
let mut monitor = PerformanceMonitor::new(config);
let baseline = monitor
.establish_baseline("test-baseline".to_string())
.await
.expect("Failed to establish baseline");
assert!(!baseline.measurements.is_empty());
assert!(baseline.measurements.contains_key("analysis_time_ms"));
let report = monitor.generate_performance_report();
assert!(report.current_statistics.analysis.avg_analysis_time_ms > 0.0);
assert!(report.current_statistics.memory.avg_memory_mb > 0.0);
println!("✅ Performance monitoring test passed!");
}
#[tokio::test]
async fn test_github_actions_workflow() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let project_path = temp_dir.path().to_path_buf();
create_test_project(&project_path).await;
let monitor =
QualityMonitor::new(MonitorConfig::default()).expect("Failed to create monitor");
let enforcer = ErrorBudgetEnforcer::new(EnforcerConfig::default());
let github_config = create_github_config();
let mut integration = GitHubActionsIntegration::new(monitor, enforcer, github_config);
let changed_files = vec![project_path.join("src/main.rs")];
let result = integration
.analyze_pull_request(
123,
"main".to_string(),
"feature-branch".to_string(),
changed_files,
)
.await
.expect("Failed to analyze pull request");
assert!(matches!(
result.status,
crate::unified_quality::github_actions::WorkflowStatus::Success
));
assert!(result.comment.is_some());
assert!(!result.outputs.is_empty());
let yaml = integration.generate_workflow_yaml();
assert!(yaml.contains("name: Quality Gate"));
assert!(yaml.contains("pmat unified-quality analyze"));
println!("✅ GitHub Actions integration test passed!");
}
#[tokio::test]
async fn test_cross_team_budget_sharing() {
let mut enforcer = ErrorBudgetEnforcer::new(EnforcerConfig::default());
enforcer.register_team("team-a".to_string(), None);
enforcer.register_team("team-b".to_string(), None);
let diff_a = crate::unified_quality::enforcement::DiffAnalysis {
complexity_change: 30,
satd_change: 2,
coverage_change: -0.01,
files_changed: vec!["file1.rs".to_string()],
};
let decision_a = enforcer.check_commit(&"team-a".to_string(), &diff_a);
assert!(matches!(
decision_a,
crate::unified_quality::enforcement::Decision::Approved
));
println!("✅ Cross-team budget sharing test passed!");
}
#[tokio::test]
#[ignore = "requires quality framework setup"]
async fn test_ml_refactoring_integration() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let complex_file = temp_dir.path().join("complex.rs");
fs::write(
&complex_file,
r#"
// TODO: This function is too complex
fn very_complex_function(a: i32, b: i32, c: i32) -> i32 {
if a > 0 {
if b > 0 {
if c > 0 {
if a > b {
if b > c {
// FIXME: Nested conditions are hard to read
match (a, b, c) {
(1, 2, 3) => 1,
(2, 3, 4) => 2,
(3, 4, 5) => 3,
_ => {
let mut result = 0;
for i in 0..a {
for j in 0..b {
for k in 0..c {
if i+j+k > 10 { result += 1; }
}}}
result
}
}
} else {
a + b - c
}
} else {
b + c - a
}
} else {
a + b
}
} else {
a
}
} else {
0
}
}
"#,
)
.await
.expect("Failed to write complex file");
let assistant = QualityAssistant::new();
let suggestions = assistant
.analyze_file(&complex_file)
.await
.expect("Failed to analyze file");
assert!(!suggestions.is_empty());
let has_complexity_suggestion = suggestions
.iter()
.any(|s| s.pattern.name.contains("Extract") || s.preview.contains("Extract"));
assert!(has_complexity_suggestion);
println!("✅ ML-driven refactoring integration test passed!");
}
#[tokio::test]
async fn test_realtime_monitoring() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let project_path = temp_dir.path().to_path_buf();
let test_file = project_path.join("monitored.rs");
fs::write(&test_file, "fn simple() { }")
.await
.expect("Failed to write initial file");
let config = MonitorConfig::default();
let mut monitor = QualityMonitor::new(config).expect("Failed to create monitor");
monitor
.start_monitoring(project_path.clone())
.await
.expect("Failed to start monitoring");
fs::write(
&test_file,
r#"
fn more_complex() -> i32 {
if true {
42
} else {
0
}
}
"#,
)
.await
.expect("Failed to update file");
tokio::time::sleep(Duration::from_millis(100)).await;
let metrics = monitor.get_all_metrics();
assert!(!metrics.is_empty());
println!("✅ Real-time monitoring test passed!");
}
#[tokio::test]
async fn test_production_deployment_scenario() {
let mut onboarding = create_onboarding_system();
let team_prefs = create_test_team_preferences();
let _session = onboarding
.start_onboarding("prod-team".to_string(), team_prefs)
.expect("Failed to start onboarding");
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let prod_path = temp_dir.path().to_path_buf();
create_production_codebase(&prod_path).await;
let mut monitor =
QualityMonitor::new(MonitorConfig::default()).expect("Failed to create monitor");
monitor
.start_monitoring(prod_path.clone())
.await
.expect("Failed to start monitoring");
let mut enforcer = ErrorBudgetEnforcer::new(EnforcerConfig::default());
enforcer.register_team("prod-team".to_string(), None);
let perf_config = create_performance_config();
let mut perf_monitor = PerformanceMonitor::new(perf_config);
let _baseline = perf_monitor
.establish_baseline("prod-baseline".to_string())
.await
.expect("Failed to establish performance baseline");
let prod_diff = crate::unified_quality::enforcement::DiffAnalysis {
complexity_change: 15,
satd_change: 1,
coverage_change: 0.02,
files_changed: vec!["src/critical_module.rs".to_string()],
};
let decision = enforcer.check_commit(&"prod-team".to_string(), &prod_diff);
assert!(matches!(
decision,
crate::unified_quality::enforcement::Decision::Approved
));
let all_metrics = monitor.get_all_metrics();
assert!(
!all_metrics.is_empty(),
"Production monitoring should have collected metrics"
);
let perf_report = perf_monitor.generate_performance_report();
assert!(perf_report.current_statistics.analysis.avg_analysis_time_ms > 0.0);
println!("✅ Production deployment scenario test passed!");
println!(
"📊 Production metrics collected: {} files",
all_metrics.len()
);
}
fn create_production_system() -> Result<ProductionSystem> {
let monitor = QualityMonitor::new(MonitorConfig::default())?;
let assistant = QualityAssistant::new();
let enforcer = ErrorBudgetEnforcer::new(EnforcerConfig::default());
let automator = ConservativeAutomator::new(
crate::unified_quality::automation::AutomatorConfig::default(),
);
Ok(ProductionSystem {
monitor,
assistant,
enforcer,
automator,
})
}
fn create_onboarding_system() -> TeamOnboarding {
let config = OnboardingConfig {
interactive_mode: true,
personalization: true,
track_progress: true,
gamification: crate::unified_quality::onboarding::GamificationConfig {
achievements: true,
badges: true,
leaderboards: false,
points: true,
},
};
TeamOnboarding::new(config)
}
fn create_test_team_preferences() -> crate::unified_quality::onboarding::TeamPreferences {
crate::unified_quality::onboarding::TeamPreferences {
languages: vec!["rust".to_string(), "python".to_string()],
learning_style: crate::unified_quality::onboarding::LearningStyle::Practical,
notifications: crate::unified_quality::onboarding::NotificationPreference {
daily_updates: true,
weekly_summaries: true,
achievements: true,
celebrations: true,
},
philosophy: QualityPhilosophy::default(),
team_info: crate::unified_quality::onboarding::TeamInfo {
size: 5,
experience_level: crate::unified_quality::onboarding::ExperienceLevel::Intermediate,
project_type: crate::unified_quality::onboarding::ProjectType::WebApplication,
quality_maturity: crate::unified_quality::onboarding::QualityMaturity::Basic,
},
}
}
fn create_performance_config() -> PerformanceConfig {
use crate::unified_quality::performance::*;
PerformanceConfig {
continuous_monitoring: false, benchmark_interval: Duration::from_secs(60),
thresholds: PerformanceThresholds::default(),
optimization: OptimizationConfig {
auto_optimize: false,
strategies: vec![OptimizationStrategy::CacheOptimization],
min_improvement_percent: 5.0,
experimental: false,
},
retention: RetentionConfig::default(),
}
}
fn create_github_config() -> GitHubConfig {
use crate::unified_quality::github_actions::*;
GitHubConfig {
repository: "test-org/test-repo".to_string(),
token: "test-token".to_string(),
quality_thresholds: QualityThresholds::default(),
triggers: WorkflowTriggers::default(),
comments: CommentConfig::default(),
}
}
async fn create_test_project(path: &PathBuf) {
let src_dir = path.join("src");
fs::create_dir_all(&src_dir)
.await
.expect("Failed to create src directory");
fs::write(
src_dir.join("main.rs"),
r#"
fn main() {
println!("Hello, world!");
}
fn add(a: i32, b: i32) -> i32 {
a + b
}
"#,
)
.await
.expect("Failed to write main.rs");
}
async fn create_production_codebase(path: &PathBuf) {
let src_dir = path.join("src");
fs::create_dir_all(&src_dir)
.await
.expect("Failed to create src directory");
fs::write(
src_dir.join("lib.rs"),
r#"
pub mod auth;
pub mod database;
pub mod api;
pub mod utils;
"#,
)
.await
.expect("Failed to write lib.rs");
fs::write(
src_dir.join("auth.rs"),
r#"
/// User.
pub struct User {
pub id: u64,
pub username: String,
}
/// Authenticate user.
pub fn authenticate_user(username: &str, password: &str) -> Option<User> {
if username == "admin" && password == "secret" {
Some(User {
id: 1,
username: username.to_string(),
})
} else {
None
}
}
"#,
)
.await
.expect("Failed to write auth.rs");
fs::write(
src_dir.join("critical_module.rs"),
r#"
/// Critical business logic module
pub fn process_payment(amount: f64) -> Result<String, String> {
if amount <= 0.0 {
Err("Invalid amount".to_string())
} else if amount > 10000.0 {
Err("Amount too large".to_string())
} else {
Ok(format!("Payment of ${:.2} processed", amount))
}
}
"#,
)
.await
.expect("Failed to write critical_module.rs");
}
async fn test_monitoring_phase(monitor: &mut QualityMonitor, project_path: &PathBuf) {
monitor
.start_monitoring(project_path.clone())
.await
.expect("Failed to start monitoring");
tokio::time::sleep(Duration::from_millis(50)).await;
let metrics = monitor.get_all_metrics();
println!("📊 Monitoring phase: {} files tracked", metrics.len());
}
async fn test_intelligence_phase(assistant: &mut QualityAssistant, test_file: &PathBuf) {
let suggestions = assistant
.generate_suggestions(test_file)
.expect("Failed to get intelligence suggestions");
assert!(
!suggestions.is_empty(),
"Intelligence should provide suggestions"
);
println!(
"🧠 Intelligence phase: {} suggestions generated",
suggestions.len()
);
}
async fn test_enforcement_phase(enforcer: &mut ErrorBudgetEnforcer) {
enforcer.register_team("test-team".to_string(), None);
let diff = crate::unified_quality::enforcement::DiffAnalysis {
complexity_change: 5,
satd_change: 0,
coverage_change: 0.01,
files_changed: vec!["test.rs".to_string()],
};
let decision = enforcer.check_commit(&"test-team".to_string(), &diff);
assert!(matches!(
decision,
crate::unified_quality::enforcement::Decision::Approved
));
println!("⚖️ Enforcement phase: Decision rendered");
}
async fn test_automation_phase(automator: &mut ConservativeAutomator, _test_file: &PathBuf) {
let transforms = automator.get_safe_transforms();
println!(
"🤖 Automation phase: {} safe transforms suggested",
transforms.len()
);
}
async fn test_integrated_workflow(
production_system: &mut ProductionSystem,
_project_path: &PathBuf,
) {
let all_metrics = production_system.monitor.get_all_metrics();
if !all_metrics.is_empty() {
let total_complexity: u32 = all_metrics.values().map(|m| m.complexity).sum();
let diff = crate::unified_quality::enforcement::DiffAnalysis {
complexity_change: total_complexity as i32,
satd_change: 0,
coverage_change: 0.0,
files_changed: all_metrics
.keys()
.map(|p| p.to_string_lossy().to_string())
.collect(),
};
let _decision = production_system
.enforcer
.check_commit(&"integrated-test".to_string(), &diff);
}
println!("🔗 Integrated workflow: All components working together");
}
async fn test_external_integrations(_project_path: &PathBuf) {
println!("🌐 External integrations: Configuration validated");
}
}