1use crate::error::{CleanroomError, Result};
7use crate::testing::FrameworkTestResults;
8use crate::config::{load_cleanroom_config, CleanroomConfig};
9use crate::cleanroom::{CleanroomEnvironment, ServicePlugin, ServiceHandle, HealthStatus};
10use clap::{Parser, Subcommand, ValueEnum, ArgAction};
11use std::path::PathBuf;
12use std::collections::HashMap;
13use serde::Deserialize;
14use tracing::{info, debug, warn, error};
15use walkdir::WalkDir;
16use std::future::Future;
17use std::pin::Pin;
18
19static mut CLEANROOM_CONFIG: Option<CleanroomConfig> = None;
21
22fn get_cleanroom_config() -> &'static CleanroomConfig {
24 unsafe {
25 CLEANROOM_CONFIG.as_ref().expect("Cleanroom config not initialized")
26 }
27}
28
29fn init_cleanroom_config() -> Result<()> {
31 let config = load_cleanroom_config()?;
32 unsafe {
33 CLEANROOM_CONFIG = Some(config);
34 }
35 Ok(())
36}
37
38#[derive(Parser)]
40#[command(name = "clnrm")]
41#[command(about = "Hermetic integration testing platform")]
42#[command(version, long_about = None)]
43#[command(styles = clap::builder::styling::Styles::styled()
44 .header(clap::builder::styling::AnsiColor::Green.on_default().bold())
45 .usage(clap::builder::styling::AnsiColor::Blue.on_default().bold())
46 .literal(clap::builder::styling::AnsiColor::Cyan.on_default().bold())
47 .placeholder(clap::builder::styling::AnsiColor::Yellow.on_default()))]
48struct Cli {
49 #[arg(short, long, action = ArgAction::Count)]
51 verbose: u8,
52
53 #[arg(short, long, value_name = "FILE")]
55 config: Option<PathBuf>,
56
57 #[arg(short, long, default_value = "auto")]
59 format: OutputFormat,
60
61 #[command(subcommand)]
62 command: Commands,
63}
64
65#[derive(Subcommand)]
66enum Commands {
67 Run {
69 #[arg(required = true)]
71 paths: Vec<PathBuf>,
72
73 #[arg(short, long)]
75 parallel: bool,
76
77 #[arg(short = 'j', long, default_value = "4")]
79 jobs: usize,
80
81 #[arg(short, long)]
83 fail_fast: bool,
84
85 #[arg(short, long)]
87 watch: bool,
88
89 #[arg(short, long)]
91 interactive: bool,
92 },
93
94 Init {
96 #[arg(value_name = "NAME")]
98 name: Option<String>,
99
100 #[arg(short, long, default_value = "default")]
102 template: String,
103 },
104
105 Validate {
107 #[arg(required = true)]
109 files: Vec<PathBuf>,
110 },
111
112 Plugins,
114
115 Services {
117 #[command(subcommand)]
118 command: ServiceCommands,
119 },
120
121 Report {
123 #[arg(short, long)]
125 input: Option<PathBuf>,
126
127 #[arg(short, long)]
129 output: Option<PathBuf>,
130
131 #[arg(short, long, default_value = "html")]
133 format: ReportFormat,
134 },
135
136 SelfTest {
138 #[arg(short, long)]
140 suite: Option<String>,
141
142 #[arg(short, long)]
144 report: bool,
145 },
146}
147
148#[derive(Subcommand)]
149enum ServiceCommands {
150 Status,
152
153 Logs {
155 service: String,
157
158 #[arg(short, long, default_value = "50")]
160 lines: usize,
161 },
162
163 Restart {
165 service: String,
167 },
168}
169
170#[derive(Clone, Debug, ValueEnum)]
171pub enum OutputFormat {
172 Auto,
174 Human,
176 Json,
178 Junit,
180 Tap,
182}
183
184#[derive(Clone, Debug, ValueEnum)]
185enum ReportFormat {
186 Html,
188 Markdown,
190 Json,
192 Pdf,
194}
195
196#[derive(Debug, Clone)]
198pub struct CliConfig {
199 pub parallel: bool,
201 pub jobs: usize,
203 pub format: OutputFormat,
205 pub fail_fast: bool,
207 pub watch: bool,
209 pub interactive: bool,
211 pub verbose: u8,
213}
214
215impl Default for CliConfig {
216 fn default() -> Self {
217 Self {
218 parallel: false,
219 jobs: 4,
220 format: OutputFormat::Auto,
221 fail_fast: false,
222 watch: false,
223 interactive: false,
224 verbose: 0,
225 }
226 }
227}
228
229#[derive(Debug, Clone)]
231struct CliTestResults {
232 tests: Vec<CliTestResult>,
233 total_duration_ms: u64,
234}
235
236#[derive(Debug, Clone)]
238struct CliTestResult {
239 name: String,
240 passed: bool,
241 duration_ms: u64,
242 error: Option<String>,
243}
244
245#[derive(Debug, Deserialize)]
247struct TestConfig {
248 #[serde(rename = "test")]
249 metadata: TestMetadata,
250 #[serde(default)]
251 services: Option<Vec<ServiceConfig>>,
252 #[serde(default)]
253 steps: Vec<TestStep>,
254 assertions: Option<HashMap<String, toml::Value>>,
255}
256
257#[derive(Debug, Deserialize)]
259struct TestMetadata {
260 name: String,
261 description: String,
262}
263
264#[derive(Debug, Deserialize)]
266struct ServiceConfig {
267 name: String,
268 #[serde(rename = "type")]
269 service_type: String,
270 plugin: String,
271 image: String,
272}
273
274#[derive(Debug, Deserialize)]
276struct TestStep {
277 name: String,
278 command: Vec<String>,
279 expected_output_regex: Option<String>,
280}
281
282const TOML_FILE_EXTENSION: &str = ".toml";
284const CLNRM_TOML_EXTENSION: &str = ".clnrm.toml";
285const ACCEPTED_EXTENSIONS: &[&str] = &[".toml", ".clnrm.toml"];
286
287fn discover_test_files(path: &PathBuf) -> Result<Vec<PathBuf>> {
294 let mut test_files = Vec::new();
295
296 if path.is_file() {
297 let path_str = path.to_str().unwrap_or("");
299 if ACCEPTED_EXTENSIONS.iter().any(|ext| path_str.ends_with(ext)) {
300 test_files.push(path.clone());
301 } else {
302 return Err(CleanroomError::validation_error(&format!(
303 "File must have .toml or .clnrm.toml extension: {}",
304 path.display()
305 )));
306 }
307 } else if path.is_dir() {
308 info!("Discovering test files in: {}", path.display());
310
311 for entry in WalkDir::new(path)
312 .follow_links(true)
313 .into_iter()
314 .filter_map(|e| e.ok())
315 {
316 let entry_path = entry.path();
317 let path_str = entry_path.to_str().unwrap_or("");
318
319 if ACCEPTED_EXTENSIONS.iter().any(|ext| path_str.ends_with(ext)) && entry_path.is_file() {
321 test_files.push(entry_path.to_path_buf());
322 debug!("Found test file: {}", entry_path.display());
323 }
324 }
325
326 if test_files.is_empty() {
327 return Err(CleanroomError::validation_error(&format!(
328 "No test files (.toml or .clnrm.toml) found in directory: {}",
329 path.display()
330 )));
331 }
332
333 info!("Discovered {} test file(s)", test_files.len());
334 } else {
335 return Err(CleanroomError::validation_error(&format!(
336 "Path is neither a file nor a directory: {}",
337 path.display()
338 )));
339 }
340
341 Ok(test_files)
342}
343
344fn parse_toml_test(path: &PathBuf) -> Result<crate::config::TestConfig> {
346 crate::config::load_config_from_file(path)
347}
348
349pub async fn run_cli() -> Result<()> {
351 let cli = Cli::parse();
352
353 setup_logging(cli.verbose)?;
355
356 let result = match cli.command {
357 Commands::Run {
358 paths,
359 parallel,
360 jobs,
361 fail_fast,
362 watch,
363 interactive,
364 } => {
365 let config = CliConfig {
366 parallel,
367 jobs,
368 format: cli.format.clone(),
369 fail_fast,
370 watch,
371 interactive,
372 verbose: cli.verbose,
373 };
374 run_tests(&paths, &config).await
375 }
376
377 Commands::Validate { files } => {
378 for file in files {
379 validate_config(&file)?;
380 }
381 Ok(())
382 }
383
384 Commands::Init { name, template } => {
385 init_project(name.as_deref(), &template)?;
386 Ok(())
387 }
388
389 Commands::Plugins => {
390 list_plugins()?;
391 Ok(())
392 }
393
394 Commands::Services { command } => match command {
395 ServiceCommands::Status => {
396 show_service_status().await?;
397 Ok(())
398 }
399 ServiceCommands::Logs { service, lines } => {
400 show_service_logs(&service, lines).await?;
401 Ok(())
402 }
403 ServiceCommands::Restart { service } => {
404 restart_service(&service).await?;
405 Ok(())
406 }
407 },
408
409 Commands::Report { input, output, format } => {
410 let format_str = match format {
411 ReportFormat::Html => "html",
412 ReportFormat::Markdown => "markdown",
413 ReportFormat::Json => "json",
414 ReportFormat::Pdf => "pdf",
415 };
416 generate_report(input.as_ref(), output.as_ref(), format_str).await?;
417 Ok(())
418 }
419
420 Commands::SelfTest { suite, report } => {
421 run_self_tests(suite, report).await?;
422 Ok(())
423 }
424 };
425
426 if let Err(e) = result {
427 error!("Command failed: {}", e);
428 std::process::exit(1);
429 }
430
431 Ok(())
432}
433
434fn setup_logging(verbosity: u8) -> Result<()> {
436 use tracing_subscriber::{fmt, EnvFilter};
437
438 let filter_level = match verbosity {
439 0 => "info",
440 1 => "debug",
441 _ => "trace",
442 };
443
444 let filter = EnvFilter::try_from_default_env()
445 .unwrap_or_else(|_| EnvFilter::new(filter_level));
446
447 fmt()
448 .with_env_filter(filter)
449 .with_target(false)
450 .with_thread_ids(false)
451 .with_file(false)
452 .with_line_number(false)
453 .init();
454
455 Ok(())
456}
457
458pub async fn run_tests(paths: &[PathBuf], config: &CliConfig) -> Result<()> {
460 info!("Running cleanroom tests (framework self-testing)");
461 debug!("Test paths: {:?}", paths);
462 debug!("Config: parallel={}, jobs={}", config.parallel, config.jobs);
463
464 if config.watch {
466 return Err(CleanroomError::validation_error("Watch mode not yet implemented"));
467 }
468
469 if config.interactive {
471 warn!("Interactive mode requested but not yet fully implemented");
472 info!("Tests will run normally - interactive mode coming in v0.4.0");
473 }
474
475 let mut all_test_files = Vec::new();
477 for path in paths {
478 let discovered = discover_test_files(path)?;
479 all_test_files.extend(discovered);
480 }
481
482 info!("Found {} test file(s) to execute", all_test_files.len());
483
484 let start_time = std::time::Instant::now();
485 let results = if config.parallel {
486 run_tests_parallel_with_results(&all_test_files, config).await?
487 } else {
488 run_tests_sequential_with_results(&all_test_files, config).await?
489 };
490
491 let total_duration = start_time.elapsed().as_millis() as u64;
492 let cli_results = CliTestResults {
493 tests: results,
494 total_duration_ms: total_duration,
495 };
496
497 match config.format {
499 OutputFormat::Junit => {
500 let junit_xml = generate_junit_xml(&cli_results)?;
501 println!("{}", junit_xml);
502 }
503 _ => {
504 let passed = cli_results.tests.iter().filter(|t| t.passed).count();
506 let failed = cli_results.tests.iter().filter(|t| !t.passed).count();
507 info!("Test Results: {} passed, {} failed", passed, failed);
508
509 if failed > 0 {
510 return Err(CleanroomError::validation_error(&format!(
511 "{} test(s) failed", failed
512 )));
513 }
514 }
515 }
516
517 Ok(())
518}
519
520async fn run_tests_sequential_with_results(paths: &[PathBuf], config: &CliConfig) -> Result<Vec<CliTestResult>> {
522 let mut results = Vec::new();
523
524 for path in paths {
525 debug!("Processing test file: {}", path.display());
526 let test_name = path.file_name()
527 .and_then(|n| n.to_str())
528 .unwrap_or("unknown")
529 .to_string();
530
531 let start_time = std::time::Instant::now();
532 match run_single_test(path, config).await {
533 Ok(_) => {
534 let duration = start_time.elapsed().as_millis() as u64;
535 info!("Test passed: {}", path.display());
536 results.push(CliTestResult {
537 name: test_name,
538 passed: true,
539 duration_ms: duration,
540 error: None,
541 });
542 }
543 Err(e) => {
544 let duration = start_time.elapsed().as_millis() as u64;
545 error!("Test failed: {} - {}", path.display(), e);
546 results.push(CliTestResult {
547 name: test_name,
548 passed: false,
549 duration_ms: duration,
550 error: Some(e.to_string()),
551 });
552 if config.fail_fast {
553 break;
554 }
555 }
556 }
557 }
558
559 Ok(results)
560}
561
562async fn run_tests_sequential(paths: &[PathBuf], config: &CliConfig) -> Result<()> {
564 let results = run_tests_sequential_with_results(paths, config).await?;
565 let tests_passed = results.iter().filter(|r| r.passed).count();
566 let tests_failed = results.iter().filter(|r| !r.passed).count();
567
568 info!("Test Results: {} passed, {} failed", tests_passed, tests_failed);
569
570 if tests_failed > 0 {
571 Err(CleanroomError::validation_error(&format!(
572 "{} test(s) failed", tests_failed
573 )))
574 } else {
575 info!("All tests passed! Framework self-testing successful.");
576 Ok(())
577 }
578}
579
580async fn run_tests_parallel_with_results(paths: &[PathBuf], config: &CliConfig) -> Result<Vec<CliTestResult>> {
582 use tokio::task::JoinSet;
583
584 let mut join_set = JoinSet::new();
585 let mut results = Vec::new();
586
587 for path in paths {
589 let path_clone = path.clone();
590 let config_clone = config.clone();
591 let test_name = path.file_name()
592 .and_then(|n| n.to_str())
593 .unwrap_or("unknown")
594 .to_string();
595
596 join_set.spawn(async move {
597 let start_time = std::time::Instant::now();
598 let result = run_single_test(&path_clone, &config_clone).await;
599 let duration = start_time.elapsed().as_millis() as u64;
600 (test_name, result, duration)
601 });
602 }
603
604 while let Some(result) = join_set.join_next().await {
606 match result {
607 Ok((test_name, Ok(_), duration)) => {
608 results.push(CliTestResult {
609 name: test_name,
610 passed: true,
611 duration_ms: duration,
612 error: None,
613 });
614 }
615 Ok((test_name, Err(e), duration)) => {
616 error!("Test failed: {}", e);
617 results.push(CliTestResult {
618 name: test_name,
619 passed: false,
620 duration_ms: duration,
621 error: Some(e.to_string()),
622 });
623 if config.fail_fast {
624 join_set.abort_all();
625 break;
626 }
627 }
628 Err(e) => {
629 error!("Task failed: {}", e);
630 results.push(CliTestResult {
631 name: "unknown".to_string(),
632 passed: false,
633 duration_ms: 0,
634 error: Some(e.to_string()),
635 });
636 }
637 }
638 }
639
640 Ok(results)
641}
642
643async fn run_tests_parallel(paths: &[PathBuf], config: &CliConfig) -> Result<()> {
645 let results = run_tests_parallel_with_results(paths, config).await?;
646 let tests_passed = results.iter().filter(|r| r.passed).count();
647 let tests_failed = results.iter().filter(|r| !r.passed).count();
648
649 info!("Test Results: {} passed, {} failed", tests_passed, tests_failed);
650
651 if tests_failed > 0 {
652 Err(CleanroomError::validation_error(&format!(
653 "{} test(s) failed", tests_failed
654 )))
655 } else {
656 info!("All tests passed! Framework self-testing successful.");
657 Ok(())
658 }
659}
660
661async fn run_single_test(path: &PathBuf, _config: &CliConfig) -> Result<()> {
669 use tracing::{info, debug, warn, error};
670
671 let content = std::fs::read_to_string(path)
673 .map_err(|e| CleanroomError::config_error(format!("Failed to read config file: {}", e)))?;
674
675 let test_config: TestConfig = toml::from_str(&content)
676 .map_err(|e| CleanroomError::config_error(format!("TOML parse error: {}", e)))?;
677
678 info!("🚀 Executing test: {}", test_config.metadata.name);
679 debug!("Test description: {}", test_config.metadata.description);
680
681 let environment = CleanroomEnvironment::new().await
683 .map_err(|e| CleanroomError::internal_error("Failed to create test environment")
684 .with_context("Test execution requires cleanroom environment")
685 .with_source(e.to_string()))?;
686
687 if let Some(services) = &test_config.services {
689 for service in services {
690 debug!("Registering service: {} ({})", service.service_type, service.service_type);
691
692 info!("Service '{}' will be handled through container execution", service.service_type);
695 }
696 }
697
698 for (i, step) in test_config.steps.iter().enumerate() {
700 info!("📋 Step {}: {}", i + 1, step.name);
701
702 let execution_result = environment.execute_in_container(
704 &step.name,
705 &step.command,
706 ).await.map_err(|e| {
707 error!("Step '{}' failed: {}", step.name, e);
708 CleanroomError::deterministic_error(format!("Step '{}' execution failed", step.name))
709 .with_context(format!("Command: {}", step.command.join(" ")))
710 .with_source(e.to_string())
711 })?;
712
713 if let Some(expected_regex) = &step.expected_output_regex {
715 let regex_matches = execution_result.matches_regex(expected_regex)
716 .map_err(|e| CleanroomError::validation_error(format!("Invalid regex pattern '{}': {}", expected_regex, e)))?;
717
718 if !regex_matches {
719 error!("Step '{}' regex validation failed", step.name);
720 error!("Expected pattern: {}", expected_regex);
721 error!("Actual output: {}", execution_result.stdout);
722
723 return Err(CleanroomError::deterministic_error(format!("Step '{}' output does not match expected pattern", step.name))
724 .with_context(format!("Expected regex: {}", expected_regex)));
725 }
726
727 info!("✅ Regex validation passed for step '{}'", step.name);
728 }
729
730 let expected_exit_code = 0;
732 if execution_result.exit_code != expected_exit_code {
733 error!("Step '{}' exit code mismatch", step.name);
734 error!("Expected exit code: {}", expected_exit_code);
735 error!("Actual exit code: {}", execution_result.exit_code);
736
737 return Err(CleanroomError::deterministic_error(format!("Step '{}' failed with exit code {}", step.name, execution_result.exit_code))
738 .with_context(format!("Expected exit code: {}", expected_exit_code)));
739 }
740
741 info!("✅ Step '{}' completed successfully (exit code: {})", step.name, execution_result.exit_code);
742 }
743
744 info!("🎉 Test '{}' completed successfully", test_config.metadata.name);
745
746 if let Some(assertions) = &test_config.assertions {
748 validate_test_assertions(&environment, assertions).await?;
749 info!("✅ All assertions passed");
750 }
751
752 Ok(())
753}
754
755async fn validate_test_assertions(
760 environment: &CleanroomEnvironment,
761 assertions: &HashMap<String, toml::Value>,
762) -> Result<()> {
763 use tracing::{info, warn};
764
765 for (assertion_key, assertion_value) in assertions {
766 match assertion_key.as_str() {
767 "container_should_have_executed_commands" => {
768 if let Some(expected_count) = assertion_value.as_integer() {
769 let (created, reused) = environment.get_container_reuse_stats().await;
770 let total_commands = created + reused;
771
772 if total_commands < expected_count as u32 {
773 return Err(CleanroomError::deterministic_error(format!(
774 "Assertion failed: expected at least {} commands executed, got {}",
775 expected_count, total_commands
776 )));
777 }
778
779 info!("✅ Container command execution assertion passed ({} commands)", total_commands);
780 }
781 }
782 "execution_should_be_hermetic" => {
783 if let Some(true) = assertion_value.as_bool() {
784 let session_id = environment.session_id();
786 if session_id.is_nil() {
787 return Err(CleanroomError::deterministic_error("Hermetic isolation assertion failed: invalid session ID"));
788 }
789
790 info!("✅ Hermetic isolation assertion passed (session: {})", session_id);
791 }
792 }
793 _ => {
794 warn!("Unknown assertion type: {}", assertion_key);
795 }
796 }
797 }
798
799 Ok(())
800}
801
802async fn watch_and_run(paths: &[PathBuf], config: &CliConfig) -> Result<()> {
804 use notify::{Watcher, RecursiveMode, Event, event::EventKind};
805 use std::sync::mpsc::channel;
806 use std::time::Duration;
807
808 info!("Watch mode enabled - monitoring test files for changes");
809 info!("Press Ctrl+C to stop watching");
810
811 info!("Running initial test suite...");
813 let mut watch_config = config.clone();
814 watch_config.watch = false; if let Err(e) = run_tests(paths, &watch_config).await {
817 warn!("Initial test run failed: {}", e);
818 }
819
820 let (tx, rx) = channel();
822 let mut watcher = notify::recommended_watcher(move |res: std::result::Result<Event, notify::Error>| {
823 if let Ok(event) = res {
824 if matches!(event.kind, EventKind::Modify(_) | EventKind::Create(_)) {
825 let _ = tx.send(event);
826 }
827 }
828 })
829 .map_err(|e| CleanroomError::internal_error("Failed to create file watcher")
830 .with_context("Watch mode initialization failed")
831 .with_source(e.to_string()))?;
832
833 for path in paths {
835 watcher.watch(path.as_ref(), RecursiveMode::Recursive)
836 .map_err(|e| CleanroomError::internal_error("Failed to watch path")
837 .with_context(format!("Path: {}", path.display()))
838 .with_source(e.to_string()))?;
839 info!("Watching: {}", path.display());
840 }
841
842 loop {
844 match rx.recv_timeout(Duration::from_secs(1)) {
845 Ok(event) => {
846 info!("File change detected: {:?}", event.paths);
847 info!("Rerunning tests...");
848
849 tokio::time::sleep(Duration::from_millis(100)).await;
851
852 if let Err(e) = run_tests(paths, &watch_config).await {
853 error!("Test run failed: {}", e);
854 } else {
855 info!("All tests passed!");
856 }
857 }
858 Err(std::sync::mpsc::RecvTimeoutError::Timeout) => {
859 continue;
861 }
862 Err(e) => {
863 return Err(CleanroomError::internal_error("File watcher error")
864 .with_context("Watch mode encountered an error")
865 .with_source(e.to_string()));
866 }
867 }
868 }
869}
870
871pub fn validate_config(path: &PathBuf) -> Result<()> {
873 debug!("Validating test configuration: {}", path.display());
874
875 if !path.exists() {
877 return Err(CleanroomError::validation_error(&format!(
878 "Path does not exist: {}", path.display()
879 )));
880 }
881
882 debug!("Checking path: {}, is_file: {}, is_dir: {}", path.display(), path.is_file(), path.is_dir());
883 if path.is_file() {
884 debug!("Validating single file: {}", path.display());
886 validate_single_config(path)?;
887 println!("✅ Configuration valid: {}", path.display());
888 } else if path.is_dir() {
889 let test_files = discover_test_files(path)?;
891
892 info!("Validating {} test file(s)", test_files.len());
893
894 for test_file in &test_files {
895 debug!("Validating: {}", test_file.display());
896 validate_single_config(test_file)?;
897 }
898
899 println!("✅ All configurations valid");
900 } else {
901 return Err(CleanroomError::validation_error(&format!(
902 "Path is neither a file nor a directory: {}",
903 path.display()
904 )));
905 }
906
907 Ok(())
908}
909
910fn validate_single_config(path: &PathBuf) -> Result<()> {
912 if !path.exists() {
914 return Err(CleanroomError::validation_error(&format!(
915 "Test file does not exist: {}", path.display()
916 )));
917 }
918
919 let path_str = path.to_str().unwrap_or("");
921 if !ACCEPTED_EXTENSIONS.iter().any(|ext| path_str.ends_with(ext)) {
922 return Err(CleanroomError::validation_error(&format!(
923 "File must have .toml or .clnrm.toml extension: {}",
924 path.display()
925 )));
926 }
927
928 let content = std::fs::read_to_string(path)
930 .map_err(|e| CleanroomError::config_error(format!("Failed to read config file: {}", e)))?;
931
932 let test_config: TestConfig = toml::from_str(&content)
933 .map_err(|e| CleanroomError::config_error(format!("TOML parse error: {}", e)))?;
934
935 if test_config.metadata.name.is_empty() {
937 return Err(CleanroomError::validation_error("Test name cannot be empty"));
938 }
939
940 if test_config.steps.is_empty() {
941 return Err(CleanroomError::validation_error("At least one step is required"));
942 }
943
944 info!("✅ Configuration valid: {} ({} steps)",
946 test_config.metadata.name, test_config.steps.len());
947
948 Ok(())
949}
950
951pub fn init_project(name: Option<&str>, template: &str) -> Result<()> {
953 let project_name = name.unwrap_or("cleanroom-test");
954
955 info!("Initializing new cleanroom test project: {}", project_name);
956 debug!("Template: {}", template);
957
958 let project_dir = std::path::Path::new(project_name);
960
961 if project_dir.exists() {
962 return Err(CleanroomError::validation_error("Project directory already exists")
963 .with_context(format!("Directory: {}", project_name)));
964 }
965
966 std::fs::create_dir_all(project_dir)?;
968 std::fs::create_dir_all(project_dir.join("tests"))?;
969 std::fs::create_dir_all(project_dir.join("scenarios"))?;
970
971 let test_content = format!(
973 r#"# Cleanroom Test Configuration
974# Generated by clnrm init
975
976name = "{}"
977
978[[scenarios]]
979name = "basic_test"
980steps = [
981 {{ name = "setup", cmd = ["echo", "Setting up test environment"] }},
982 {{ name = "test", cmd = ["echo", "Running test"] }},
983 {{ name = "cleanup", cmd = ["echo", "Cleaning up"] }}
984]
985
986[policy]
987security_level = "medium"
988max_execution_time = 300
989
990# Optional: Add services
991# [[services]]
992# name = "database"
993# service_type = "database"
994# image = "postgres:15"
995# env = {{ POSTGRES_PASSWORD = "testpass" }}
996"#,
997 project_name
998 );
999
1000 std::fs::write(project_dir.join("tests").join("basic.toml"), test_content)?;
1001
1002 let readme_content = format!(
1004 r#"# {} - Cleanroom Test Project
1005
1006This project uses the cleanroom testing framework for hermetic integration testing.
1007
1008## Quick Start
1009
1010```bash
1011# Run tests
1012clnrm run tests/
1013
1014# Validate configuration
1015clnrm validate tests/
1016
1017# Show available plugins
1018clnrm plugins
1019```
1020
1021## Project Structure
1022
1023- `tests/` - Test configuration files
1024- `scenarios/` - Test scenario definitions
1025- `README.md` - This file
1026
1027## Framework Self-Testing
1028
1029This project demonstrates the cleanroom framework testing itself through the "eat your own dog food" principle.
1030"#,
1031 project_name
1032 );
1033
1034 std::fs::write(project_dir.join("README.md"), readme_content)?;
1035
1036 info!("Project initialized successfully: {}", project_dir.display());
1037 debug!("Created test file: tests/basic.toml, Documentation: README.md");
1038
1039 Ok(())
1040}
1041
1042pub fn list_plugins() -> Result<()> {
1044 use tracing::info;
1045
1046 info!("Available Service Plugins:");
1047 info!("");
1048 info!("🔧 GenericContainerPlugin");
1049 info!(" Description: Generic container service that can run any Docker image");
1050 info!(" Features: Environment variables, port mapping, custom configuration");
1051 info!(" Usage: service_type = \"generic\"");
1052 info!("");
1053 info!("🗄️ SurrealDbPlugin");
1054 info!(" Description: SurrealDB database service with WebSocket support");
1055 info!(" Features: Persistent storage, WebSocket API, authentication");
1056 info!(" Usage: service_type = \"surrealdb\"");
1057 info!("");
1058 info!("📦 Total: 2 plugins available");
1059
1060 Ok(())
1061}
1062
1063pub async fn show_service_status() -> Result<()> {
1065 use crate::cleanroom::CleanroomEnvironment;
1066
1067 info!("Service Status:");
1068
1069 let environment = CleanroomEnvironment::default();
1071 let services = environment.services().await;
1072
1073 if services.active_services().is_empty() {
1074 info!("No services currently running");
1075 debug!("Run 'clnrm run <test_file>' to start services");
1076 } else {
1077 info!("Active Services: {}", services.active_services().len());
1078 for (_handle_id, handle) in services.active_services() {
1079 debug!("Service: {} (ID: {})", handle.service_name, handle.id);
1080 if !handle.metadata.is_empty() {
1081 for (key, value) in &handle.metadata {
1082 debug!(" {}: {}", key, value);
1083 }
1084 }
1085 }
1086 }
1087
1088 Ok(())
1089}
1090
1091pub async fn show_service_logs(service: &str, lines: usize) -> Result<()> {
1093 use crate::cleanroom::CleanroomEnvironment;
1094
1095 info!("Service Logs for '{}':", service);
1096
1097 let environment = CleanroomEnvironment::default();
1099 let services = environment.services().await;
1100
1101 let service_handle = services.active_services()
1103 .values()
1104 .find(|handle| handle.service_name == service);
1105
1106 match service_handle {
1107 Some(handle) => {
1108 info!("Service found: {} (ID: {})", handle.service_name, handle.id);
1109
1110 unimplemented!("Service log retrieval: Cannot retrieve logs for service '{}' because log retrieval from container backend is not implemented", service);
1112
1113 if !handle.metadata.is_empty() {
1114 debug!("Metadata:");
1115 for (key, value) in &handle.metadata {
1116 debug!(" {}: {}", key, value);
1117 }
1118 }
1119 }
1120 None => {
1121 warn!("Service '{}' not found in active services", service);
1122 debug!("Available services:");
1123 for (_, handle) in services.active_services() {
1124 debug!(" - {}", handle.service_name);
1125 }
1126 if services.active_services().is_empty() {
1127 debug!("No services currently running");
1128 debug!("Run 'clnrm run <test_file>' to start services");
1129 }
1130 }
1131 }
1132
1133 Ok(())
1134}
1135
1136pub async fn restart_service(service: &str) -> Result<()> {
1138 use crate::cleanroom::CleanroomEnvironment;
1139
1140 info!("Restarting service '{}':", service);
1141
1142 let environment = CleanroomEnvironment::default();
1144 let services = environment.services().await;
1145
1146 let service_handle = services.active_services()
1148 .values()
1149 .find(|handle| handle.service_name == service);
1150
1151 match service_handle {
1152 Some(handle) => {
1153 info!("Service found: {} (ID: {})", handle.service_name, handle.id);
1154
1155 debug!("Stopping service...");
1161 debug!("Service stopped");
1163
1164 debug!("Starting service...");
1165 debug!("Service restarted");
1167 debug!("New service ID: {}", handle.id); info!("Service '{}' restarted successfully", service);
1170 }
1171 None => {
1172 warn!("Service '{}' not found in active services", service);
1173 debug!("Available services:");
1174 for (_, handle) in services.active_services() {
1175 debug!(" - {}", handle.service_name);
1176 }
1177 if services.active_services().is_empty() {
1178 debug!("No services currently running");
1179 debug!("Run 'clnrm run <test_file>' to start services");
1180 }
1181 }
1182 }
1183
1184 Ok(())
1185}
1186
1187pub async fn generate_report(_input: Option<&PathBuf>, _output: Option<&PathBuf>, _format: &str) -> Result<()> {
1189 info!("Report generation not implemented - framework self-testing required");
1190 Ok(())
1191}
1192
1193pub async fn run_self_tests(suite: Option<String>, report: bool) -> Result<()> {
1201 use crate::testing::run_framework_tests;
1202 use tracing::info;
1203
1204 info!("Starting framework self-tests");
1206
1207 if let Some(ref suite_name) = suite {
1209 const VALID_SUITES: &[&str] = &["framework", "container", "plugin", "cli", "otel"];
1210 if !VALID_SUITES.contains(&suite_name.as_str()) {
1211 return Err(CleanroomError::validation_error(&format!(
1212 "Invalid test suite '{}'. Valid suites: {}",
1213 suite_name, VALID_SUITES.join(", ")
1214 )));
1215 }
1216 }
1217
1218 let results = run_framework_tests().await
1220 .map_err(|e| CleanroomError::internal_error("Framework self-tests failed")
1221 .with_context("Failed to execute framework test suite")
1222 .with_source(e.to_string()))?;
1223
1224 display_test_results(&results);
1226
1227 if report {
1229 generate_framework_report(&results).await
1230 .map_err(|e| CleanroomError::internal_error("Report generation failed")
1231 .with_context("Failed to generate test report")
1232 .with_source(e.to_string()))?;
1233 }
1234
1235 if results.failed_tests > 0 {
1237 Err(CleanroomError::validation_error(&format!(
1238 "{} test(s) failed out of {}",
1239 results.failed_tests, results.total_tests
1240 )))
1241 } else {
1242 Ok(())
1243 }
1244}
1245
1246fn display_test_results(results: &FrameworkTestResults) {
1253 info!("Framework Self-Test Results:");
1255 info!("Total Tests: {}", results.total_tests);
1256 info!("Passed: {}", results.passed_tests);
1257 info!("Failed: {}", results.failed_tests);
1258 info!("Duration: {}ms", results.total_duration_ms);
1259
1260 for test in &results.test_results {
1262 if test.passed {
1263 info!("✅ {} ({}ms)", test.name, test.duration_ms);
1264 } else {
1265 error!("❌ {} ({}ms)", test.name, test.duration_ms);
1266 if let Some(error) = &test.error {
1267 error!(" Error: {}", error);
1268 }
1269 }
1270 }
1271}
1272
1273async fn generate_framework_report(results: &FrameworkTestResults) -> Result<()> {
1280 use tokio::fs;
1281 use serde_json;
1282
1283 let json_report = serde_json::to_string_pretty(results)
1285 .map_err(|e| CleanroomError::internal_error("JSON serialization failed")
1286 .with_context("Failed to serialize test results to JSON")
1287 .with_source(e.to_string()))?;
1288
1289 let report_path = "framework-test-report.json";
1290 fs::write(report_path, json_report).await
1291 .map_err(|e| CleanroomError::internal_error("File write failed")
1292 .with_context("Failed to write test report file")
1293 .with_source(e.to_string()))?;
1294
1295 info!("Report generated: {}", report_path);
1296 Ok(())
1297}
1298
1299fn generate_junit_xml(results: &CliTestResults) -> Result<String> {
1301 use junit_report::{Report, TestSuite, TestCase, Duration, OffsetDateTime};
1302
1303 let mut test_suite = TestSuite::new("cleanroom_tests");
1304 test_suite.set_timestamp(OffsetDateTime::now_utc());
1305
1306 for test in &results.tests {
1307 let duration_secs = test.duration_ms as f64 / 1000.0;
1308 let test_case = if !test.passed {
1309 if let Some(error) = &test.error {
1310 TestCase::failure(&test.name, Duration::seconds(duration_secs as i64), "test_failure", error)
1311 } else {
1312 TestCase::failure(&test.name, Duration::seconds(duration_secs as i64), "test_failure", "Test failed without error message")
1313 }
1314 } else {
1315 TestCase::success(&test.name, Duration::seconds(duration_secs as i64))
1316 };
1317
1318 test_suite.add_testcase(test_case);
1319 }
1320
1321 let mut report = Report::new();
1322 report.add_testsuite(test_suite);
1323
1324 let mut xml_output = Vec::new();
1325 report.write_xml(&mut xml_output)
1326 .map_err(|e| CleanroomError::internal_error("JUnit XML generation failed")
1327 .with_context("Failed to serialize test results to JUnit XML")
1328 .with_source(e.to_string()))?;
1329
1330 String::from_utf8(xml_output)
1331 .map_err(|e| CleanroomError::internal_error("JUnit XML encoding failed")
1332 .with_context("Failed to convert JUnit XML to UTF-8 string")
1333 .with_source(e.to_string()))
1334}
1335
1336#[cfg(test)]
1337mod tests {
1338 use super::*;
1339 use std::fs;
1340 use tempfile::TempDir;
1341
1342 #[test]
1343 fn test_cli_config_default() -> Result<()> {
1344 let config = CliConfig::default();
1346
1347 assert_eq!(config.jobs, 4);
1349 assert!(!config.parallel);
1350 assert!(!config.fail_fast);
1351
1352 Ok(())
1353 }
1354
1355 #[test]
1356 fn test_list_plugins() -> Result<()> {
1357 let result = list_plugins();
1359
1360 assert!(result.is_ok());
1362
1363 Ok(())
1364 }
1365
1366 #[tokio::test]
1367 async fn test_show_service_status() -> Result<()> {
1368 let result = show_service_status().await;
1370
1371 assert!(result.is_ok());
1373
1374 Ok(())
1375 }
1376
1377 #[tokio::test]
1378 async fn test_run_self_tests_succeeds() -> Result<()> {
1379 let suite = None;
1381 let report = false;
1382
1383 let result = run_self_tests(suite, report).await;
1385
1386 assert!(result.is_ok(), "Framework self-tests should succeed: {:?}", result.err());
1388 Ok(())
1389 }
1390
1391 #[tokio::test]
1392 async fn test_run_self_tests_with_invalid_suite_fails() -> Result<()> {
1393 let suite = Some("invalid_suite".to_string());
1395 let report = false;
1396
1397 let result = run_self_tests(suite, report).await;
1399
1400 assert!(result.is_err(), "Invalid suite should cause validation error");
1402 assert!(result.unwrap_err().message.contains("Invalid test suite"));
1403 Ok(())
1404 }
1405
1406 #[tokio::test]
1407 async fn test_run_self_tests_with_valid_suite_succeeds() -> Result<()> {
1408 let suite = Some("framework".to_string());
1410 let report = false;
1411
1412 let result = run_self_tests(suite, report).await;
1414
1415 assert!(result.is_ok(), "Valid suite should succeed: {:?}", result.err());
1417 Ok(())
1418 }
1419
1420 #[tokio::test]
1421 async fn test_display_test_results_formats_correctly() {
1422 use crate::testing::{FrameworkTestResults, TestResult};
1424
1425 let results = FrameworkTestResults {
1426 total_tests: 3,
1427 passed_tests: 2,
1428 failed_tests: 1,
1429 total_duration_ms: 1500,
1430 test_results: vec![
1431 TestResult {
1432 name: "test1".to_string(),
1433 passed: true,
1434 duration_ms: 500,
1435 error: None,
1436 },
1437 TestResult {
1438 name: "test2".to_string(),
1439 passed: true,
1440 duration_ms: 300,
1441 error: None,
1442 },
1443 TestResult {
1444 name: "test3".to_string(),
1445 passed: false,
1446 duration_ms: 700,
1447 error: Some("Test failed".to_string()),
1448 },
1449 ],
1450 };
1451
1452 display_test_results(&results);
1454
1455 }
1458
1459 #[tokio::test]
1460 async fn test_generate_framework_report_creates_file() -> Result<()> {
1461 use crate::testing::{FrameworkTestResults, TestResult};
1463 use std::fs;
1464
1465 let results = FrameworkTestResults {
1466 total_tests: 1,
1467 passed_tests: 1,
1468 failed_tests: 0,
1469 total_duration_ms: 1000,
1470 test_results: vec![
1471 TestResult {
1472 name: "test1".to_string(),
1473 passed: true,
1474 duration_ms: 1000,
1475 error: None,
1476 },
1477 ],
1478 };
1479
1480 let result = generate_framework_report(&results).await;
1482
1483 assert!(result.is_ok(), "Report generation should succeed: {:?}", result.err());
1485
1486 let report_exists = fs::metadata("framework-test-report.json").is_ok();
1488 assert!(report_exists, "Report file should be created");
1489
1490 let _ = fs::remove_file("framework-test-report.json");
1492
1493 Ok(())
1494 }
1495
1496 #[test]
1497 fn test_parse_toml_test_valid() -> Result<()> {
1498 let temp_dir = TempDir::new()
1500 .map_err(|e| CleanroomError::internal_error("Failed to create temp dir")
1501 .with_source(e.to_string()))?;
1502 let test_file = temp_dir.path().join("test.toml");
1503
1504 let toml_content = r#"
1505name = "test_example"
1506
1507[[scenarios]]
1508name = "basic_test"
1509steps = [
1510 { name = "test_step", cmd = ["echo", "hello world"] }
1511]
1512"#;
1513
1514 fs::write(&test_file, toml_content)
1515 .map_err(|e| CleanroomError::internal_error("Failed to write test file")
1516 .with_source(e.to_string()))?;
1517
1518 let config = parse_toml_test(&test_file)?;
1520
1521 assert_eq!(config.name, "test_example");
1523 assert_eq!(config.scenarios.len(), 1);
1524 assert_eq!(config.scenarios[0].name, "basic_test");
1525 assert_eq!(config.scenarios[0].steps.len(), 1);
1526 assert_eq!(config.scenarios[0].steps[0].name, "test_step");
1527 assert_eq!(config.scenarios[0].steps[0].cmd, vec!["echo", "hello world"]);
1528
1529 Ok(())
1530 }
1531
1532 #[test]
1533 fn test_parse_toml_test_invalid_toml() -> Result<()> {
1534 let temp_dir = TempDir::new()
1536 .map_err(|e| CleanroomError::internal_error("Failed to create temp dir")
1537 .with_source(e.to_string()))?;
1538 let test_file = temp_dir.path().join("invalid.toml");
1539
1540 let invalid_toml = r#"
1541[test
1542name = "invalid"
1543"#;
1544
1545 fs::write(&test_file, invalid_toml)
1546 .map_err(|e| CleanroomError::internal_error("Failed to write test file")
1547 .with_source(e.to_string()))?;
1548
1549 let result = parse_toml_test(&test_file);
1551 assert!(result.is_err());
1552
1553 Ok(())
1554 }
1555
1556 #[test]
1557 fn test_parse_toml_test_file_not_found() -> Result<()> {
1558 let non_existent_file = PathBuf::from("non_existent.toml");
1560
1561 let result = parse_toml_test(&non_existent_file);
1563 assert!(result.is_err());
1564
1565 Ok(())
1566 }
1567
1568 #[test]
1569 fn test_validate_config_valid() -> Result<()> {
1570 let temp_dir = TempDir::new()
1572 .map_err(|e| CleanroomError::internal_error("Failed to create temp dir")
1573 .with_source(e.to_string()))?;
1574 let test_file = temp_dir.path().join("valid.toml");
1575
1576 let toml_content = r#"
1577[test]
1578name = "valid_test"
1579description = "A valid test configuration"
1580
1581# Test container
1582[[services]]
1583name = "test_container"
1584type = "generic_container"
1585plugin = "alpine"
1586image = "alpine:latest"
1587
1588# Test steps
1589[[steps]]
1590name = "test_step"
1591command = ["echo", "test"]
1592"#;
1593
1594 fs::write(&test_file, toml_content)
1595 .map_err(|e| CleanroomError::internal_error("Failed to write test file")
1596 .with_source(e.to_string()))?;
1597
1598 let result = validate_config(&test_file);
1600
1601 assert!(result.is_ok());
1603
1604 Ok(())
1605 }
1606
1607 #[test]
1608 fn test_validate_config_missing_name() -> Result<()> {
1609 let temp_dir = TempDir::new()
1611 .map_err(|e| CleanroomError::internal_error("Failed to create temp dir")
1612 .with_source(e.to_string()))?;
1613 let test_file = temp_dir.path().join("missing_name.toml");
1614
1615 let toml_content = r#"
1616[test]
1617name = ""
1618description = "Test with empty name"
1619
1620[[steps]]
1621name = "test_step"
1622command = ["echo", "test"]
1623"#;
1624
1625 fs::write(&test_file, toml_content)
1626 .map_err(|e| CleanroomError::internal_error("Failed to write test file")
1627 .with_source(e.to_string()))?;
1628
1629 let result = validate_config(&test_file);
1631
1632 assert!(result.is_err());
1634 assert!(result.unwrap_err().to_string().contains("Test name cannot be empty"));
1635
1636 Ok(())
1637 }
1638
1639 #[test]
1640 fn test_validate_config_invalid_regex() -> Result<()> {
1641 let temp_dir = TempDir::new()
1643 .map_err(|e| CleanroomError::internal_error("Failed to create temp dir")
1644 .with_source(e.to_string()))?;
1645 let test_file = temp_dir.path().join("invalid_regex.toml");
1646
1647 let toml_content = r#"
1648[test]
1649name = "regex_test"
1650description = "Test with regex validation"
1651
1652[[steps]]
1653name = "test_step"
1654command = ["echo", "test"]
1655"#;
1656
1657 fs::write(&test_file, toml_content)
1658 .map_err(|e| CleanroomError::internal_error("Failed to write test file")
1659 .with_source(e.to_string()))?;
1660
1661 let result = validate_config(&test_file);
1663
1664 assert!(result.is_ok());
1666
1667 Ok(())
1668 }
1669
1670 #[test]
1671 fn test_validate_config_missing_steps() -> Result<()> {
1672 let temp_dir = TempDir::new()
1674 .map_err(|e| CleanroomError::internal_error("Failed to create temp dir")
1675 .with_source(e.to_string()))?;
1676 let test_file = temp_dir.path().join("missing_steps.toml");
1677
1678 let toml_content = r#"
1679[test]
1680name = "missing_steps_test"
1681description = "Test with missing steps"
1682"#;
1683
1684 fs::write(&test_file, toml_content)
1685 .map_err(|e| CleanroomError::internal_error("Failed to write test file")
1686 .with_source(e.to_string()))?;
1687
1688 let result = validate_config(&test_file);
1690
1691 assert!(result.is_err());
1693 assert!(result.unwrap_err().to_string().contains("At least one step is required"));
1694
1695 Ok(())
1696 }
1697
1698 #[tokio::test]
1699 async fn test_show_service_logs() -> Result<()> {
1700 let result = show_service_logs("test_service", 10).await;
1702
1703 assert!(result.is_ok());
1705
1706 Ok(())
1707 }
1708
1709 #[tokio::test]
1710 async fn test_restart_service() -> Result<()> {
1711 let result = restart_service("test_service").await;
1713
1714 assert!(result.is_ok());
1716
1717 Ok(())
1718 }
1719}