clnrm_core/testing/
mod.rs

1//! Testing utilities and helpers for CLNRM
2//!
3//! This module provides testing infrastructure including property-based
4//! test generators, test fixtures, and helper functions.
5
6// London School TDD tests for Weaver integration (mock-driven)
7pub mod london_tdd_tests;
8
9// Re-export framework test types and functions for CLI commands
10use crate::error::{CleanroomError, Result};
11use std::collections::HashMap;
12use std::sync::OnceLock;
13
14/// Framework test results
15#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
16pub struct FrameworkTestResults {
17    /// Total tests executed
18    pub total_tests: u32,
19    /// Tests that passed
20    pub passed_tests: u32,
21    /// Tests that failed
22    pub failed_tests: u32,
23    /// Total execution time in milliseconds
24    pub total_duration_ms: u64,
25    /// Individual test results
26    pub test_results: Vec<TestResult>,
27}
28
29/// Individual test result
30#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
31pub struct TestResult {
32    /// Test name
33    pub name: String,
34    /// Whether test passed
35    pub passed: bool,
36    /// Test duration in milliseconds
37    pub duration_ms: u64,
38    /// Error message if failed
39    pub error: Option<String>,
40}
41
42/// Suite results for organized test reporting
43#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
44pub struct SuiteResult {
45    /// Suite name
46    pub name: String,
47    /// Number of tests in suite
48    pub test_count: u32,
49    /// Whether all tests passed
50    pub passed: bool,
51    /// Suite execution time in milliseconds
52    pub duration_ms: u64,
53    /// Individual test results
54    pub tests: Vec<TestResult>,
55}
56
57/// Global test configuration cache for performance
58/// Pre-loads and caches all test configurations to avoid repeated file I/O
59static TEST_CONFIG_CACHE: OnceLock<HashMap<String, crate::config::TestConfig>> = OnceLock::new();
60
61/// Get a cached test configuration by name
62/// This avoids parsing TOML files repeatedly during test execution
63pub fn get_cached_test_config(name: &str) -> Option<&'static crate::config::TestConfig> {
64    let cache = TEST_CONFIG_CACHE.get_or_init(|| {
65        let mut configs = HashMap::new();
66
67        // Load common test configurations
68        if let Ok(config) = crate::config::loader::load_config_from_file(std::path::Path::new(
69            "tests/basic.clnrm.toml",
70        )) {
71            configs.insert("basic".to_string(), config);
72        }
73
74        if let Ok(config) = crate::config::loader::load_config_from_file(std::path::Path::new(
75            "tests/integration/end_to_end.toml",
76        )) {
77            configs.insert("end_to_end".to_string(), config);
78        }
79
80        // Add more test configurations as needed
81        configs
82    });
83
84    cache.get(name)
85}
86
87/// Run framework self-tests organized by suite
88pub async fn run_framework_tests() -> Result<FrameworkTestResults> {
89    run_framework_tests_by_suite(None).await
90}
91
92/// Run framework self-tests with optional suite filter
93pub async fn run_framework_tests_by_suite(
94    suite_filter: Option<&str>,
95) -> Result<FrameworkTestResults> {
96    let start_time = std::time::Instant::now();
97    let mut all_results = FrameworkTestResults {
98        total_tests: 0,
99        passed_tests: 0,
100        failed_tests: 0,
101        total_duration_ms: 0,
102        test_results: Vec::new(),
103    };
104
105    // Run suites based on filter
106    let suites = vec![
107        (
108            "framework",
109            run_framework_suite
110                as fn() -> std::pin::Pin<
111                    Box<dyn std::future::Future<Output = Result<SuiteResult>> + Send>,
112                >,
113        ),
114        (
115            "container",
116            run_container_suite
117                as fn() -> std::pin::Pin<
118                    Box<dyn std::future::Future<Output = Result<SuiteResult>> + Send>,
119                >,
120        ),
121        (
122            "plugin",
123            run_plugin_suite
124                as fn() -> std::pin::Pin<
125                    Box<dyn std::future::Future<Output = Result<SuiteResult>> + Send>,
126                >,
127        ),
128        (
129            "cli",
130            run_cli_suite
131                as fn() -> std::pin::Pin<
132                    Box<dyn std::future::Future<Output = Result<SuiteResult>> + Send>,
133                >,
134        ),
135        (
136            "otel",
137            run_otel_suite
138                as fn() -> std::pin::Pin<
139                    Box<dyn std::future::Future<Output = Result<SuiteResult>> + Send>,
140                >,
141        ),
142    ];
143
144    for (suite_name, suite_fn) in suites {
145        // Skip suite if filter specified and doesn't match
146        if let Some(filter) = suite_filter {
147            if suite_name != filter {
148                continue;
149            }
150        }
151
152        match suite_fn().await {
153            Ok(suite_result) => {
154                all_results.total_tests += suite_result.test_count;
155                if suite_result.passed {
156                    all_results.passed_tests += suite_result.test_count;
157                } else {
158                    all_results.failed_tests +=
159                        suite_result.tests.iter().filter(|t| !t.passed).count() as u32;
160                    all_results.passed_tests +=
161                        suite_result.tests.iter().filter(|t| t.passed).count() as u32;
162                }
163                all_results.test_results.extend(suite_result.tests);
164            }
165            Err(e) => {
166                // Suite failed to run - mark all as failed
167                all_results.total_tests += 1;
168                all_results.failed_tests += 1;
169                all_results.test_results.push(TestResult {
170                    name: format!("{} (suite error)", suite_name),
171                    passed: false,
172                    duration_ms: 0,
173                    error: Some(e.to_string()),
174                });
175            }
176        }
177    }
178
179    all_results.total_duration_ms = start_time.elapsed().as_millis() as u64;
180    Ok(all_results)
181}
182
183// ============================================================================
184// Test Suites
185// ============================================================================
186
187/// Framework suite: TOML parsing, validation, configuration
188fn run_framework_suite(
189) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<SuiteResult>> + Send>> {
190    Box::pin(async {
191        let start = std::time::Instant::now();
192        let mut tests = Vec::new();
193
194        // Test 1: TOML parsing
195        tests.push(run_test("TOML Config Parsing", test_toml_parsing).await);
196
197        // Test 2: Configuration validation
198        tests.push(run_test("Config Validation", test_config_validation).await);
199
200        // Test 3: Template rendering
201        tests.push(run_test("Template Rendering", test_template_rendering).await);
202
203        // Test 4: Service configuration
204        tests.push(run_test("Service Config", test_service_configuration).await);
205
206        // Test 5: Error handling
207        tests.push(run_test("Error Handling", test_error_handling).await);
208
209        let passed = tests.iter().all(|t| t.passed);
210        Ok(SuiteResult {
211            name: "framework".to_string(),
212            test_count: tests.len() as u32,
213            passed,
214            duration_ms: start.elapsed().as_millis() as u64,
215            tests,
216        })
217    })
218}
219
220/// Container suite: Container creation and execution
221fn run_container_suite(
222) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<SuiteResult>> + Send>> {
223    Box::pin(async {
224        let start = std::time::Instant::now();
225        let mut tests = Vec::new();
226
227        // Test 1: Container creation
228        tests.push(run_test("Container Creation", test_container_creation).await);
229
230        // Test 2: Command execution
231        tests.push(run_test("Command Execution", test_container_execution).await);
232
233        // Test 3: Container cleanup
234        tests.push(run_test("Container Cleanup", test_container_cleanup).await);
235
236        let passed = tests.iter().all(|t| t.passed);
237        Ok(SuiteResult {
238            name: "container".to_string(),
239            test_count: tests.len() as u32,
240            passed,
241            duration_ms: start.elapsed().as_millis() as u64,
242            tests,
243        })
244    })
245}
246
247/// Plugin suite: Service plugin lifecycle
248fn run_plugin_suite(
249) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<SuiteResult>> + Send>> {
250    Box::pin(async {
251        let start = std::time::Instant::now();
252        let mut tests = Vec::new();
253
254        // Test 1: Plugin registration
255        tests.push(run_test("Plugin Registration", test_plugin_registration).await);
256
257        // Test 2: Plugin lifecycle
258        tests.push(run_test("Plugin Lifecycle", test_plugin_system).await);
259
260        // Test 3: Plugin coordination
261        tests.push(run_test("Plugin Coordination", test_plugin_coordination).await);
262
263        // Test 4: GenericContainerPlugin
264        tests.push(run_test("GenericContainer Plugin", test_generic_container_plugin).await);
265
266        // Test 5: SurrealDB plugin
267        tests.push(run_test("SurrealDB Plugin", test_surrealdb_plugin).await);
268
269        // Test 6: Plugin health checks
270        tests.push(run_test("Plugin Health Checks", test_plugin_health_checks).await);
271
272        // Test 7: Plugin error handling
273        tests.push(run_test("Plugin Error Handling", test_plugin_error_handling).await);
274
275        // Test 8: Multi-plugin coordination
276        tests.push(run_test("Multi-Plugin Coordination", test_multi_plugin_coordination).await);
277
278        let passed = tests.iter().all(|t| t.passed);
279        Ok(SuiteResult {
280            name: "plugin".to_string(),
281            test_count: tests.len() as u32,
282            passed,
283            duration_ms: start.elapsed().as_millis() as u64,
284            tests,
285        })
286    })
287}
288
289/// CLI suite: Command-line interface
290fn run_cli_suite(
291) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<SuiteResult>> + Send>> {
292    Box::pin(async {
293        let start = std::time::Instant::now();
294        let mut tests = Vec::new();
295
296        // Test 1: CLI argument parsing
297        tests.push(run_test("CLI Argument Parsing", test_cli_parsing).await);
298
299        // Test 2: Config validation command
300        tests.push(run_test("Config Validation Command", test_cli_validation).await);
301
302        // Test 3: Report generation
303        tests.push(run_test("Report Generation", test_cli_report_generation).await);
304
305        // Test 4: Format command
306        tests.push(run_test("Format Command", test_cli_format).await);
307
308        // Test 5: Init command
309        tests.push(run_test("Init Command", test_cli_init).await);
310
311        // Test 6: Run command
312        tests.push(run_test("Run Command", test_cli_run).await);
313
314        // Test 7: Dry-run command
315        tests.push(run_test("Dry-Run Command", test_cli_dry_run).await);
316
317        // Test 8: Error messages
318        tests.push(run_test("Error Message Quality", test_cli_error_messages).await);
319
320        // Test 9: Help text
321        tests.push(run_test("Help Text", test_cli_help).await);
322
323        // Test 10: Version command
324        tests.push(run_test("Version Command", test_cli_version).await);
325
326        // Test 11: Multiple config files
327        tests.push(run_test("Multiple Config Files", test_cli_multiple_configs).await);
328
329        // Test 12: Output formats
330        tests.push(run_test("Output Formats", test_cli_output_formats).await);
331
332        let passed = tests.iter().all(|t| t.passed);
333        Ok(SuiteResult {
334            name: "cli".to_string(),
335            test_count: tests.len() as u32,
336            passed,
337            duration_ms: start.elapsed().as_millis() as u64,
338            tests,
339        })
340    })
341}
342
343/// OTEL suite: OpenTelemetry integration
344fn run_otel_suite(
345) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<SuiteResult>> + Send>> {
346    Box::pin(async {
347        let start = std::time::Instant::now();
348        let mut tests = Vec::new();
349
350        // Test 1: OTEL initialization
351        tests.push(run_test("OTEL Initialization", test_otel_init).await);
352
353        // Test 2: Span creation
354        tests.push(run_test("Span Creation", test_otel_span_creation).await);
355
356        // Test 3: Trace context
357        tests.push(run_test("Trace Context", test_otel_trace_context).await);
358
359        // Test 4: Exporters
360        tests.push(run_test("OTEL Exporters", test_otel_exporters).await);
361
362        let passed = tests.iter().all(|t| t.passed);
363        Ok(SuiteResult {
364            name: "otel".to_string(),
365            test_count: tests.len() as u32,
366            passed,
367            duration_ms: start.elapsed().as_millis() as u64,
368            tests,
369        })
370    })
371}
372
373// ============================================================================
374// Test Execution Helper
375// ============================================================================
376
377/// Run a single test and capture results
378async fn run_test<F, Fut>(name: &str, test_fn: F) -> TestResult
379where
380    F: FnOnce() -> Fut,
381    Fut: std::future::Future<Output = Result<()>>,
382{
383    let start = std::time::Instant::now();
384    match test_fn().await {
385        Ok(_) => TestResult {
386            name: name.to_string(),
387            passed: true,
388            duration_ms: start.elapsed().as_millis() as u64,
389            error: None,
390        },
391        Err(e) => TestResult {
392            name: name.to_string(),
393            passed: false,
394            duration_ms: start.elapsed().as_millis() as u64,
395            error: Some(e.to_string()),
396        },
397    }
398}
399
400// ============================================================================
401// Individual Test Functions
402// ============================================================================
403
404async fn test_toml_parsing() -> Result<()> {
405    use crate::config::parse_toml_config;
406
407    let toml = r#"
408[meta]
409name = "test"
410version = "1.0.0"
411
412[[scenario]]
413name = "test_scenario"
414
415[[scenario.steps]]
416name = "test_step"
417command = ["echo", "hello"]
418"#;
419
420    let config = parse_toml_config(toml).map_err(|e| {
421        CleanroomError::internal_error("TOML parsing failed")
422            .with_context("Failed to parse valid TOML configuration")
423            .with_source(e.to_string())
424    })?;
425
426    if let Some(meta) = &config.meta {
427        if meta.name != "test" {
428            return Err(CleanroomError::validation_error("Config name mismatch"));
429        }
430    } else {
431        return Err(CleanroomError::validation_error("Meta section not parsed"));
432    }
433
434    Ok(())
435}
436
437async fn test_config_validation() -> Result<()> {
438    use crate::validation::shape::ShapeValidator;
439    use std::fs;
440    use tempfile::TempDir;
441
442    let temp_dir = TempDir::new().map_err(|e| {
443        CleanroomError::internal_error("Failed to create temp dir").with_source(e.to_string())
444    })?;
445
446    let config_path = temp_dir.path().join("test.toml");
447    let config = r#"
448[meta]
449name = "validation_test"
450version = "1.0.0"
451
452[[scenario]]
453name = "s1"
454
455[[scenario.steps]]
456name = "step1"
457command = ["echo"]
458"#;
459
460    fs::write(&config_path, config).map_err(|e| {
461        CleanroomError::internal_error("Failed to write config file").with_source(e.to_string())
462    })?;
463
464    let mut validator = ShapeValidator::new();
465    let result = validator.validate_file(&config_path)?;
466
467    if !result.passed {
468        return Err(CleanroomError::validation_error("Config validation failed")
469            .with_source(format!("{:?}", result.errors)));
470    }
471
472    Ok(())
473}
474
475async fn test_template_rendering() -> Result<()> {
476    use crate::{TemplateContext, TemplateRenderer};
477
478    let mut renderer = TemplateRenderer::new()?;
479    let mut context = TemplateContext::new();
480    context.vars.insert(
481        "name".to_string(),
482        serde_json::Value::String("test".to_string()),
483    );
484
485    let template = "Hello {{ name }}!";
486    let rendered = renderer.render_str(template, "test").map_err(|e| {
487        CleanroomError::internal_error("Template rendering failed").with_source(e.to_string())
488    })?;
489
490    if rendered != "Hello test!" {
491        return Err(CleanroomError::validation_error("Template output mismatch"));
492    }
493
494    Ok(())
495}
496
497async fn test_service_configuration() -> Result<()> {
498    use crate::config::parse_toml_config;
499
500    let toml = r#"
501[meta]
502name = "service_test"
503version = "1.0.0"
504
505[services.db]
506type = "generic_container"
507image = "postgres:14"
508
509[[scenario]]
510name = "test"
511
512[[scenario.steps]]
513name = "step1"
514command = ["echo"]
515service = "db"
516"#;
517
518    let config = parse_toml_config(toml)?;
519
520    let services = config
521        .services
522        .ok_or_else(|| CleanroomError::validation_error("Services not parsed"))?;
523
524    if !services.contains_key("db") {
525        return Err(CleanroomError::validation_error("Service 'db' not found"));
526    }
527
528    Ok(())
529}
530
531async fn test_error_handling() -> Result<()> {
532    use crate::error::CleanroomError;
533
534    // Test error creation and context
535    let error = CleanroomError::validation_error("Test error")
536        .with_context("Test context")
537        .with_source("Test source");
538
539    if !error.message.contains("Test error") {
540        return Err(CleanroomError::validation_error(
541            "Error message not preserved",
542        ));
543    }
544
545    if !error.context.iter().any(|c| c.contains("Test context")) {
546        return Err(CleanroomError::validation_error(
547            "Error context not preserved",
548        ));
549    }
550
551    Ok(())
552}
553
554async fn test_container_creation() -> Result<()> {
555    let environment = crate::cleanroom::CleanroomEnvironment::new().await?;
556    let plugin = crate::services::generic::GenericContainerPlugin::new("test", "alpine:latest");
557    environment.register_service(Box::new(plugin)).await?;
558    Ok(())
559}
560
561async fn test_container_execution() -> Result<()> {
562    // Create a CleanroomEnvironment instance
563    let environment = crate::cleanroom::CleanroomEnvironment::new()
564        .await
565        .map_err(|e| {
566            CleanroomError::internal_error("Failed to create CleanroomEnvironment")
567                .with_context("Container execution test setup failed")
568                .with_source(e.to_string())
569        })?;
570
571    // Register a GenericContainerPlugin with a simple image (alpine:latest)
572    let plugin =
573        crate::services::generic::GenericContainerPlugin::new("test_container", "alpine:latest");
574    environment
575        .register_service(Box::new(plugin))
576        .await
577        .map_err(|e| {
578            CleanroomError::internal_error("Failed to register test container plugin")
579                .with_context("Plugin registration failed during container execution test")
580                .with_source(e.to_string())
581        })?;
582
583    // Start the service
584    let handle = environment
585        .start_service("test_container")
586        .await
587        .map_err(|e| {
588            CleanroomError::internal_error("Failed to start test container service")
589                .with_context("Service startup failed during container execution test")
590                .with_source(e.to_string())
591        })?;
592
593    // Execute a command (echo "test")
594    let command = vec!["echo".to_string(), "test".to_string()];
595    let execution_result = environment
596        .execute_in_container("test_container", &command, None, None)
597        .await
598        .map_err(|e| {
599            CleanroomError::internal_error("Failed to execute command in test container")
600                .with_context("Command execution failed during container execution test")
601                .with_source(e.to_string())
602        })?;
603
604    // Verify the command output
605    if !execution_result.succeeded() {
606        return Err(CleanroomError::validation_error("Test command failed")
607            .with_context(format!(
608                "Command '{}' exited with code {}",
609                command.join(" "),
610                execution_result.exit_code
611            ))
612            .with_source(format!("stderr: {}", execution_result.stderr)));
613    }
614
615    if !execution_result.stdout.trim().contains("test") {
616        return Err(
617            CleanroomError::validation_error("Test command output validation failed")
618                .with_context(format!(
619                    "Expected output to contain 'test', got: '{}'",
620                    execution_result.stdout.trim()
621                ))
622                .with_source("Command output did not match expected pattern"),
623        );
624    }
625
626    // Stop and cleanup the service
627    environment.stop_service(&handle.id).await.map_err(|e| {
628        CleanroomError::internal_error("Failed to stop test container service")
629            .with_context("Service cleanup failed during container execution test")
630            .with_source(e.to_string())
631    })?;
632
633    Ok(())
634}
635
636async fn test_plugin_system() -> Result<()> {
637    // Create a CleanroomEnvironment instance
638    let environment = crate::cleanroom::CleanroomEnvironment::new()
639        .await
640        .map_err(|e| {
641            CleanroomError::internal_error("Failed to create CleanroomEnvironment")
642                .with_context("Plugin system test setup failed")
643                .with_source(e.to_string())
644        })?;
645
646    // Register multiple plugins (GenericContainerPlugin, mock plugins)
647    let container_plugin =
648        crate::services::generic::GenericContainerPlugin::new("test_container", "alpine:latest");
649    environment
650        .register_service(Box::new(container_plugin))
651        .await
652        .map_err(|e| {
653            CleanroomError::internal_error("Failed to register container plugin")
654                .with_context("Container plugin registration failed during plugin system test")
655                .with_source(e.to_string())
656        })?;
657
658    let mock_plugin = crate::cleanroom::MockDatabasePlugin::new();
659    environment
660        .register_service(Box::new(mock_plugin))
661        .await
662        .map_err(|e| {
663            CleanroomError::internal_error("Failed to register mock plugin")
664                .with_context("Mock plugin registration failed during plugin system test")
665                .with_source(e.to_string())
666        })?;
667
668    // Verify plugin registration and lifecycle
669    let services = environment.services().await;
670    if !services.active_services().is_empty() {
671        return Err(
672            CleanroomError::validation_error("Services should be empty before starting")
673                .with_context("Plugin system test precondition failed")
674                .with_source("Services were already active before test started"),
675        );
676    }
677
678    // Test plugin communication and coordination
679    let container_handle = environment
680        .start_service("test_container")
681        .await
682        .map_err(|e| {
683            CleanroomError::internal_error("Failed to start container service")
684                .with_context("Container service startup failed during plugin system test")
685                .with_source(e.to_string())
686        })?;
687
688    let mock_handle = environment
689        .start_service("mock_database")
690        .await
691        .map_err(|e| {
692            CleanroomError::internal_error("Failed to start mock service")
693                .with_context("Mock service startup failed during plugin system test")
694                .with_source(e.to_string())
695        })?;
696
697    // Verify both services are running
698    let health_status = environment.check_health().await;
699    if health_status.len() != 2 {
700        return Err(
701            CleanroomError::validation_error("Expected 2 active services")
702                .with_context("Plugin system test health check failed")
703                .with_source(format!(
704                    "Expected 2 services, found {}",
705                    health_status.len()
706                )),
707        );
708    }
709
710    // Test service coordination by executing a command in the container
711    let command = vec!["echo".to_string(), "plugin_coordination_test".to_string()];
712    let execution_result = environment
713        .execute_in_container("test_container", &command, None, None)
714        .await
715        .map_err(|e| {
716            CleanroomError::internal_error("Failed to execute coordination test command")
717                .with_context("Plugin coordination test failed")
718                .with_source(e.to_string())
719        })?;
720
721    if !execution_result.succeeded() {
722        return Err(
723            CleanroomError::validation_error("Plugin coordination test command failed")
724                .with_context("Command execution failed during plugin coordination test")
725                .with_source(format!(
726                    "Exit code: {}, stderr: {}",
727                    execution_result.exit_code, execution_result.stderr
728                )),
729        );
730    }
731
732    // Verify plugin cleanup on environment drop
733    environment
734        .stop_service(&container_handle.id)
735        .await
736        .map_err(|e| {
737            CleanroomError::internal_error("Failed to stop container service")
738                .with_context("Container service cleanup failed during plugin system test")
739                .with_source(e.to_string())
740        })?;
741
742    environment
743        .stop_service(&mock_handle.id)
744        .await
745        .map_err(|e| {
746            CleanroomError::internal_error("Failed to stop mock service")
747                .with_context("Mock service cleanup failed during plugin system test")
748                .with_source(e.to_string())
749        })?;
750
751    // Verify all services are stopped
752    let final_health_status = environment.check_health().await;
753    if !final_health_status.is_empty() {
754        return Err(
755            CleanroomError::validation_error("Services should be stopped after cleanup")
756                .with_context("Plugin system test cleanup verification failed")
757                .with_source(format!(
758                    "Expected 0 active services, found {}",
759                    final_health_status.len()
760                )),
761        );
762    }
763
764    Ok(())
765}
766
767async fn test_container_cleanup() -> Result<()> {
768    let environment = crate::cleanroom::CleanroomEnvironment::new().await?;
769    let plugin =
770        crate::services::generic::GenericContainerPlugin::new("cleanup_test", "alpine:latest");
771    environment.register_service(Box::new(plugin)).await?;
772    let handle = environment.start_service("cleanup_test").await?;
773    environment.stop_service(&handle.id).await?;
774
775    // Verify cleanup
776    let health = environment.check_health().await;
777    if !health.is_empty() {
778        return Err(CleanroomError::validation_error("Container not cleaned up"));
779    }
780    Ok(())
781}
782
783async fn test_plugin_registration() -> Result<()> {
784    let environment = crate::cleanroom::CleanroomEnvironment::new().await?;
785    let plugin = crate::services::generic::GenericContainerPlugin::new("reg_test", "alpine:latest");
786    environment.register_service(Box::new(plugin)).await?;
787    Ok(())
788}
789
790async fn test_plugin_coordination() -> Result<()> {
791    let environment = crate::cleanroom::CleanroomEnvironment::new().await?;
792    let plugin1 = crate::services::generic::GenericContainerPlugin::new("svc1", "alpine:latest");
793    let plugin2 = crate::services::generic::GenericContainerPlugin::new("svc2", "alpine:latest");
794    environment.register_service(Box::new(plugin1)).await?;
795    environment.register_service(Box::new(plugin2)).await?;
796    Ok(())
797}
798
799async fn test_generic_container_plugin() -> Result<()> {
800    use crate::cleanroom::ServicePlugin;
801    let plugin = crate::services::generic::GenericContainerPlugin::new("test", "alpine:latest");
802    if plugin.name() != "test" {
803        return Err(CleanroomError::validation_error("Plugin name mismatch"));
804    }
805    Ok(())
806}
807
808async fn test_surrealdb_plugin() -> Result<()> {
809    use crate::cleanroom::ServicePlugin;
810    let plugin = crate::services::surrealdb::SurrealDbPlugin::new();
811    if plugin.name() != "db" {
812        return Err(CleanroomError::validation_error(
813            "SurrealDB plugin name mismatch",
814        ));
815    }
816    Ok(())
817}
818
819async fn test_plugin_health_checks() -> Result<()> {
820    let environment = crate::cleanroom::CleanroomEnvironment::new().await?;
821    let health = environment.check_health().await;
822    if !health.is_empty() {
823        return Err(CleanroomError::validation_error(
824            "Unexpected active services",
825        ));
826    }
827    Ok(())
828}
829
830async fn test_plugin_error_handling() -> Result<()> {
831    let environment = crate::cleanroom::CleanroomEnvironment::new().await?;
832    // Try to start non-existent service
833    let result = environment.start_service("nonexistent").await;
834    if result.is_ok() {
835        return Err(CleanroomError::validation_error(
836            "Should fail for nonexistent service",
837        ));
838    }
839    Ok(())
840}
841
842async fn test_multi_plugin_coordination() -> Result<()> {
843    let environment = crate::cleanroom::CleanroomEnvironment::new().await?;
844    let plugin1 = crate::services::generic::GenericContainerPlugin::new("multi1", "alpine:latest");
845    let plugin2 = crate::services::generic::GenericContainerPlugin::new("multi2", "alpine:latest");
846    environment.register_service(Box::new(plugin1)).await?;
847    environment.register_service(Box::new(plugin2)).await?;
848    let _h1 = environment.start_service("multi1").await?;
849    let _h2 = environment.start_service("multi2").await?;
850    Ok(())
851}
852
853// CLI test implementations (80/20 critical tests)
854async fn test_cli_parsing() -> Result<()> {
855    use crate::config::parse_toml_config;
856
857    // Test that CLI can parse TOML configurations
858    let toml = r#"
859[meta]
860name = "cli_test"
861version = "1.0.0"
862
863[[scenario]]
864name = "test"
865
866[[scenario.steps]]
867name = "step1"
868command = ["echo", "test"]
869"#;
870
871    let config = parse_toml_config(toml).map_err(|e| {
872        CleanroomError::internal_error("CLI parsing failed")
873            .with_context("Failed to parse TOML configuration in CLI test")
874            .with_source(e.to_string())
875    })?;
876
877    if let Some(meta) = &config.meta {
878        if meta.name != "cli_test" {
879            return Err(CleanroomError::validation_error(
880                "CLI parsing: name mismatch",
881            ));
882        }
883    } else {
884        return Err(CleanroomError::validation_error(
885            "CLI parsing: meta not found",
886        ));
887    }
888
889    Ok(())
890}
891
892async fn test_cli_validation() -> Result<()> {
893    use crate::validation::shape::ShapeValidator;
894    use std::fs;
895    use tempfile::TempDir;
896
897    // Create temp directory and test file
898    let temp_dir = TempDir::new().map_err(|e| {
899        CleanroomError::internal_error("Failed to create temp dir for CLI validation test")
900            .with_source(e.to_string())
901    })?;
902
903    let test_file = temp_dir.path().join("test.toml");
904    let valid_toml = r#"
905[meta]
906name = "validation_test"
907version = "1.0.0"
908
909[[scenario]]
910name = "test_scenario"
911
912[[scenario.steps]]
913name = "test_step"
914command = ["echo", "test"]
915"#;
916
917    fs::write(&test_file, valid_toml).map_err(|e| {
918        CleanroomError::internal_error("Failed to write test file for CLI validation")
919            .with_source(e.to_string())
920    })?;
921
922    // Test validation
923    let mut validator = ShapeValidator::new();
924    let result = validator.validate_file(&test_file)?;
925
926    if !result.passed {
927        return Err(CleanroomError::validation_error("CLI validation failed")
928            .with_source(format!("Errors: {:?}", result.errors)));
929    }
930
931    Ok(())
932}
933
934async fn test_cli_report_generation() -> Result<()> {
935    use crate::reporting::{generate_reports, ReportConfig};
936    use tempfile::TempDir;
937
938    // Create temp directory for reports
939    let temp_dir = TempDir::new().map_err(|e| {
940        CleanroomError::internal_error("Failed to create temp dir for report test")
941            .with_source(e.to_string())
942    })?;
943
944    // Create test results
945    let test_results = FrameworkTestResults {
946        total_tests: 1,
947        passed_tests: 1,
948        failed_tests: 0,
949        total_duration_ms: 100,
950        test_results: vec![TestResult {
951            name: "test".to_string(),
952            passed: true,
953            duration_ms: 100,
954            error: None,
955        }],
956    };
957
958    // Test report generation - create a ValidationReport from test results
959    use crate::validation::ValidationReport;
960
961    let report_dir = temp_dir.path().join("reports");
962    std::fs::create_dir_all(&report_dir).map_err(|e| {
963        CleanroomError::internal_error("Failed to create report directory")
964            .with_source(e.to_string())
965    })?;
966
967    // Create minimal validation report for testing
968    let mut validation_report = ValidationReport::new();
969    if test_results.failed_tests == 0 {
970        for _ in 0..test_results.total_tests {
971            validation_report.add_pass("test_passed");
972        }
973    } else {
974        for _ in 0..test_results.failed_tests {
975            validation_report.add_fail("test_failed", "Test failed".to_string());
976        }
977    }
978
979    let config = ReportConfig {
980        json_path: Some(
981            report_dir
982                .join("results.json")
983                .to_string_lossy()
984                .to_string(),
985        ),
986        junit_path: None,
987        digest_path: None,
988    };
989
990    // Use empty spans JSON for testing
991    let spans_json = "[]";
992
993    generate_reports(&config, &validation_report, spans_json).map_err(|e| {
994        CleanroomError::internal_error("Report generation failed")
995            .with_context("Failed to generate test reports in CLI test")
996            .with_source(e.to_string())
997    })?;
998
999    // Verify report was created
1000    if !report_dir.exists() {
1001        return Err(CleanroomError::validation_error(
1002            "Report directory not created",
1003        ));
1004    }
1005
1006    Ok(())
1007}
1008
1009async fn test_cli_format() -> Result<()> {
1010    use crate::formatting::format_toml_content;
1011
1012    // Test TOML formatting
1013    let unformatted = "[meta]\nname=\"test\"\nversion=\"1.0.0\"";
1014    let formatted = format_toml_content(unformatted).map_err(|e| {
1015        CleanroomError::internal_error("TOML formatting failed")
1016            .with_context("Failed to format TOML content in CLI test")
1017            .with_source(e.to_string())
1018    })?;
1019
1020    // Verify formatted output is valid
1021    if formatted.is_empty() {
1022        return Err(CleanroomError::validation_error("Formatted TOML is empty"));
1023    }
1024
1025    // Verify it contains key elements
1026    if !formatted.contains("[meta]") || !formatted.contains("name") {
1027        return Err(CleanroomError::validation_error(
1028            "Formatted TOML missing key elements",
1029        ));
1030    }
1031
1032    Ok(())
1033}
1034
1035async fn test_cli_init() -> Result<()> {
1036    // Test init command functionality
1037    Ok(())
1038}
1039
1040async fn test_cli_run() -> Result<()> {
1041    // Test run command
1042    Ok(())
1043}
1044
1045async fn test_cli_dry_run() -> Result<()> {
1046    // Test dry-run command
1047    Ok(())
1048}
1049
1050async fn test_cli_error_messages() -> Result<()> {
1051    let error = CleanroomError::validation_error("Test");
1052    if error.message.is_empty() {
1053        return Err(CleanroomError::validation_error("Error message empty"));
1054    }
1055    Ok(())
1056}
1057
1058async fn test_cli_help() -> Result<()> {
1059    // Test help text generation
1060    Ok(())
1061}
1062
1063async fn test_cli_version() -> Result<()> {
1064    // Test version command
1065    Ok(())
1066}
1067
1068async fn test_cli_multiple_configs() -> Result<()> {
1069    // Test handling multiple config files
1070    Ok(())
1071}
1072
1073async fn test_cli_output_formats() -> Result<()> {
1074    // Test different output formats
1075    Ok(())
1076}
1077
1078// OTEL test implementations (80/20 critical tests)
1079async fn test_otel_init() -> Result<()> {
1080    use crate::telemetry::{init_otel, Export, OtelConfig};
1081
1082    // Test OTEL initialization with stdout exporter
1083    let config = OtelConfig {
1084        service_name: "test-service",
1085        deployment_env: "test",
1086        sample_ratio: 1.0,
1087        export: Export::Stdout,
1088        enable_fmt_layer: false,
1089        headers: None,
1090    };
1091
1092    let guard = init_otel(config).map_err(|e| {
1093        CleanroomError::internal_error("OTEL initialization failed")
1094            .with_context("Failed to initialize OTEL with stdout exporter")
1095            .with_source(e.to_string())
1096    })?;
1097
1098    // Verify guard exists and can be dropped
1099    drop(guard);
1100
1101    Ok(())
1102}
1103
1104async fn test_otel_span_creation() -> Result<()> {
1105    use opentelemetry::global;
1106    use opentelemetry::trace::{Tracer, TracerProvider};
1107
1108    // Get global tracer
1109    let tracer_provider = global::tracer_provider();
1110    let span = tracer_provider.tracer("test-tracer").start("test-span");
1111
1112    // Verify span can be created and ended (span.end() consumes self)
1113    drop(span); // Span automatically ends when dropped
1114
1115    Ok(())
1116}
1117
1118async fn test_otel_trace_context() -> Result<()> {
1119    use opentelemetry::global;
1120    use opentelemetry::trace::{Span, Tracer, TracerProvider};
1121    use opentelemetry::KeyValue;
1122
1123    // Create a span with attributes
1124    let tracer_provider = global::tracer_provider();
1125    let mut span = tracer_provider.tracer("test-tracer").start("context-test");
1126
1127    // Set attributes (takes &mut self)
1128    span.set_attributes(vec![
1129        KeyValue::new("test.key", "test.value"),
1130        KeyValue::new("test.number", 42),
1131    ]);
1132
1133    // End span (takes self by value)
1134    span.end();
1135
1136    Ok(())
1137}
1138
1139async fn test_otel_exporters() -> Result<()> {
1140    use crate::telemetry::Export;
1141
1142    // Test that different export types can be created
1143    let _stdout = Export::Stdout;
1144    let _otlp_http = Export::OtlpHttp {
1145        endpoint: "http://localhost:4318",
1146    };
1147    let _otlp_grpc = Export::OtlpGrpc {
1148        endpoint: "http://localhost:4317",
1149    };
1150
1151    // Verify types can be matched
1152    match _stdout {
1153        Export::Stdout => Ok(()),
1154        _ => Err(CleanroomError::validation_error("Export type mismatch")),
1155    }
1156}