clnrm_core/
cleanroom.rs

1//! Cleanroom Environment - Framework Self-Testing Implementation
2//!
3//! Core cleanroom environment that tests itself through the "eat your own dog food"
4//! principle. Every feature of this framework is validated by using the framework
5//! to test its own functionality.
6
7use crate::backend::{Backend, Cmd, TestcontainerBackend};
8use crate::error::{CleanroomError, Result};
9#[cfg(feature = "otel-traces")]
10use opentelemetry::global;
11#[cfg(feature = "otel-traces")]
12use opentelemetry::trace::{Span, Tracer, TracerProvider};
13#[cfg(feature = "otel-traces")]
14use opentelemetry::KeyValue;
15use std::any::Any;
16use std::collections::HashMap;
17use std::sync::Arc;
18use testcontainers::runners::AsyncRunner;
19use testcontainers_modules::surrealdb::{SurrealDb, SURREALDB_PORT};
20use tokio::sync::RwLock;
21use uuid::Uuid;
22
23/// Plugin-based service registry (no hardcoded postgres/redis)
24pub trait ServicePlugin: Send + Sync + std::fmt::Debug {
25    /// Get service name
26    fn name(&self) -> &str;
27
28    /// Start the service
29    fn start(&self) -> Result<ServiceHandle>;
30
31    /// Stop the service
32    fn stop(&self, handle: ServiceHandle) -> Result<()>;
33
34    /// Check service health
35    fn health_check(&self, handle: &ServiceHandle) -> HealthStatus;
36}
37
38/// Service handle for managing service instances
39#[derive(Debug, Clone)]
40pub struct ServiceHandle {
41    /// Unique service instance ID
42    pub id: String,
43    /// Service name
44    pub service_name: String,
45    /// Service metadata
46    pub metadata: HashMap<String, String>,
47}
48
49/// Service health status
50#[derive(Debug, Clone, PartialEq)]
51pub enum HealthStatus {
52    /// Service is healthy and running
53    Healthy,
54    /// Service is unhealthy or not responding
55    Unhealthy,
56    /// Service status is unknown
57    Unknown,
58}
59
60/// Plugin-based service registry
61#[derive(Debug, Default)]
62pub struct ServiceRegistry {
63    /// Registered service plugins
64    plugins: HashMap<String, Box<dyn ServicePlugin>>,
65    /// Active service instances
66    active_services: HashMap<String, ServiceHandle>,
67}
68
69impl ServiceRegistry {
70    /// Create a new service registry
71    pub fn new() -> Self {
72        Self::default()
73    }
74
75    /// Initialize default plugins
76    pub fn with_default_plugins(mut self) -> Self {
77        use crate::services::{
78            generic::GenericContainerPlugin, ollama::OllamaPlugin, tgi::TgiPlugin, vllm::VllmPlugin,
79        };
80
81        // Register core plugins
82        let generic_plugin = Box::new(GenericContainerPlugin::new(
83            "generic_container",
84            "alpine:latest",
85        ));
86        self.register_plugin(generic_plugin);
87
88        // Register AI/LLM proxy plugins for automated rollout testing
89        let ollama_config = crate::services::ollama::OllamaConfig {
90            endpoint: "http://localhost:11434".to_string(),
91            default_model: "qwen3-coder:30b".to_string(),
92            timeout_seconds: 60,
93        };
94        let ollama_plugin = Box::new(OllamaPlugin::new("ollama", ollama_config));
95        self.register_plugin(ollama_plugin);
96
97        let vllm_config = crate::services::vllm::VllmConfig {
98            endpoint: "http://localhost:8000".to_string(),
99            model: "microsoft/DialoGPT-medium".to_string(),
100            max_num_seqs: Some(100),
101            max_model_len: Some(2048),
102            tensor_parallel_size: Some(1),
103            gpu_memory_utilization: Some(0.9),
104            enable_prefix_caching: Some(true),
105            timeout_seconds: 60,
106        };
107        let vllm_plugin = Box::new(VllmPlugin::new("vllm", vllm_config));
108        self.register_plugin(vllm_plugin);
109
110        let tgi_config = crate::services::tgi::TgiConfig {
111            endpoint: "http://localhost:8080".to_string(),
112            model_id: "microsoft/DialoGPT-medium".to_string(),
113            max_total_tokens: Some(2048),
114            max_input_length: Some(1024),
115            max_batch_prefill_tokens: Some(4096),
116            max_concurrent_requests: Some(32),
117            max_batch_total_tokens: Some(8192),
118            timeout_seconds: 60,
119        };
120        let tgi_plugin = Box::new(TgiPlugin::new("tgi", tgi_config));
121        self.register_plugin(tgi_plugin);
122
123        self
124    }
125
126    /// Register a service plugin
127    pub fn register_plugin(&mut self, plugin: Box<dyn ServicePlugin>) {
128        let name = plugin.name().to_string();
129        self.plugins.insert(name, plugin);
130    }
131
132    /// Start a service by name
133    pub async fn start_service(&mut self, service_name: &str) -> Result<ServiceHandle> {
134        let plugin = self.plugins.get(service_name).ok_or_else(|| {
135            CleanroomError::internal_error(format!("Service plugin '{}' not found", service_name))
136        })?;
137
138        let handle = plugin.start()?;
139        self.active_services
140            .insert(handle.id.clone(), handle.clone());
141
142        Ok(handle)
143    }
144
145    /// Stop a service by handle ID
146    pub async fn stop_service(&mut self, handle_id: &str) -> Result<()> {
147        if let Some(handle) = self.active_services.remove(handle_id) {
148            let plugin = self.plugins.get(&handle.service_name).ok_or_else(|| {
149                CleanroomError::internal_error(format!(
150                    "Service plugin '{}' not found for handle '{}'",
151                    handle.service_name, handle_id
152                ))
153            })?;
154
155            plugin.stop(handle)?;
156        }
157
158        Ok(())
159    }
160
161    /// Check health of all services
162    pub async fn check_all_health(&self) -> HashMap<String, HealthStatus> {
163        let mut health_status = HashMap::new();
164
165        for (handle_id, handle) in &self.active_services {
166            if let Some(plugin) = self.plugins.get(&handle.service_name) {
167                health_status.insert(handle_id.clone(), plugin.health_check(handle));
168            } else {
169                health_status.insert(handle_id.clone(), HealthStatus::Unknown);
170            }
171        }
172
173        health_status
174    }
175
176    /// Get all active service handles
177    pub fn active_services(&self) -> &HashMap<String, ServiceHandle> {
178        &self.active_services
179    }
180
181    /// Check if service is running
182    pub fn is_service_running(&self, service_name: &str) -> bool {
183        self.active_services
184            .values()
185            .any(|handle| handle.service_name == service_name)
186    }
187
188    /// Get service logs
189    pub async fn get_service_logs(&self, service_id: &str, lines: usize) -> Result<Vec<String>> {
190        let handle = self.active_services.get(service_id).ok_or_else(|| {
191            CleanroomError::internal_error(format!("Service with ID '{}' not found", service_id))
192        })?;
193
194        let _plugin = self.plugins.get(&handle.service_name).ok_or_else(|| {
195            CleanroomError::internal_error(format!(
196                "Service plugin '{}' not found",
197                handle.service_name
198            ))
199        })?;
200
201        // For now, return mock logs since actual log retrieval depends on the service implementation
202        // In a real implementation, this would call plugin.get_logs(handle, lines)
203        let mock_logs = vec![
204            format!(
205                "[{}] Service '{}' started",
206                chrono::Utc::now().format("%Y-%m-%d %H:%M:%S"),
207                handle.service_name
208            ),
209            format!(
210                "[{}] Service '{}' is running",
211                chrono::Utc::now().format("%Y-%m-%d %H:%M:%S"),
212                handle.service_name
213            ),
214        ];
215
216        // Return only the requested number of lines
217        Ok(mock_logs.into_iter().take(lines).collect())
218    }
219}
220
221/// Simple metrics for quick access
222#[derive(Debug, Clone)]
223pub struct SimpleMetrics {
224    /// Session ID
225    pub session_id: Uuid,
226    /// Start time
227    pub start_time: std::time::Instant,
228    /// Tests executed
229    pub tests_executed: u32,
230    /// Tests passed
231    pub tests_passed: u32,
232    /// Tests failed
233    pub tests_failed: u32,
234    /// Total duration
235    pub total_duration_ms: u64,
236    /// Active containers
237    pub active_containers: u32,
238    /// Active services
239    pub active_services: u32,
240    /// Containers created in this session
241    pub containers_created: u32,
242    /// Containers reused in this session
243    pub containers_reused: u32,
244}
245
246impl SimpleMetrics {
247    pub fn new() -> Self {
248        Self {
249            session_id: Uuid::new_v4(),
250            start_time: std::time::Instant::now(),
251            tests_executed: 0,
252            tests_passed: 0,
253            tests_failed: 0,
254            total_duration_ms: 0,
255            active_containers: 0,
256            active_services: 0,
257            containers_created: 0,
258            containers_reused: 0,
259        }
260    }
261}
262
263impl Default for SimpleMetrics {
264    fn default() -> Self {
265        Self::new()
266    }
267}
268
269/// Execution result for container command execution
270#[derive(Debug, Clone)]
271pub struct ExecutionResult {
272    /// Exit code of the executed command
273    pub exit_code: i32,
274    /// Standard output from the command
275    pub stdout: String,
276    /// Standard error from the command
277    pub stderr: String,
278    /// Duration of command execution
279    pub duration: std::time::Duration,
280    /// Command that was executed
281    pub command: Vec<String>,
282    /// Container name where command was executed
283    pub container_name: String,
284}
285
286impl ExecutionResult {
287    /// Check if command output matches a regex pattern
288    pub fn matches_regex(&self, pattern: &str) -> Result<bool> {
289        use regex::Regex;
290        let regex = Regex::new(pattern).map_err(|e| {
291            CleanroomError::validation_error(format!("Invalid regex pattern '{}': {}", pattern, e))
292        })?;
293        Ok(regex.is_match(&self.stdout))
294    }
295
296    /// Check if command output does NOT match a regex pattern
297    pub fn does_not_match_regex(&self, pattern: &str) -> Result<bool> {
298        Ok(!self.matches_regex(pattern)?)
299    }
300
301    /// Check if command succeeded (exit code 0)
302    pub fn succeeded(&self) -> bool {
303        self.exit_code == 0
304    }
305
306    /// Check if command failed (non-zero exit code)
307    pub fn failed(&self) -> bool {
308        !self.succeeded()
309    }
310}
311
312/// Simple environment wrapper around existing infrastructure
313#[allow(dead_code)]
314#[derive(Debug)]
315pub struct CleanroomEnvironment {
316    /// Session ID
317    session_id: Uuid,
318    /// Backend for container execution
319    backend: Arc<dyn Backend>,
320    /// Plugin-based service registry
321    services: Arc<RwLock<ServiceRegistry>>,
322    /// Simple metrics for quick access
323    metrics: Arc<RwLock<SimpleMetrics>>,
324    /// Container registry for reuse - stores actual container instances
325    container_registry: Arc<RwLock<HashMap<String, Box<dyn Any + Send + Sync>>>>,
326    /// OpenTelemetry meter for metrics
327    #[cfg(feature = "otel-metrics")]
328    meter: opentelemetry::metrics::Meter,
329    /// Telemetry configuration and state
330    telemetry: Arc<RwLock<TelemetryState>>,
331}
332
333impl Default for CleanroomEnvironment {
334    fn default() -> Self {
335        // Use a simple synchronous approach for Default
336        // This is acceptable since Default is typically used in tests
337        let backend = Arc::new(
338            TestcontainerBackend::new("alpine:latest").unwrap_or_else(|_| {
339                panic!("Failed to create default backend - check Docker availability")
340            }),
341        );
342
343        Self {
344            session_id: Uuid::new_v4(),
345            backend,
346            services: Arc::new(RwLock::new(ServiceRegistry::new())),
347            metrics: Arc::new(RwLock::new(SimpleMetrics::new())),
348            container_registry: Arc::new(RwLock::new(HashMap::new())),
349            #[cfg(feature = "otel-metrics")]
350            meter: opentelemetry::global::meter("clnrm"),
351            telemetry: Arc::new(RwLock::new(TelemetryState {
352                tracing_enabled: false,
353                metrics_enabled: false,
354                traces: Vec::new(),
355            })),
356        }
357    }
358}
359
360/// Telemetry state for the cleanroom environment
361#[derive(Debug)]
362pub struct TelemetryState {
363    /// Whether tracing is enabled
364    pub tracing_enabled: bool,
365    /// Whether metrics are enabled
366    pub metrics_enabled: bool,
367    /// Collected traces (for testing/debugging)
368    pub traces: Vec<String>,
369}
370
371impl TelemetryState {
372    /// Enable tracing
373    pub fn enable_tracing(&mut self) {
374        self.tracing_enabled = true;
375    }
376
377    /// Enable metrics collection
378    pub fn enable_metrics(&mut self) {
379        self.metrics_enabled = true;
380    }
381
382    /// Get collected traces
383    pub fn get_traces(&self) -> Vec<String> {
384        self.traces.clone()
385    }
386}
387
388impl CleanroomEnvironment {
389    /// Create a new cleanroom environment
390    pub async fn new() -> Result<Self> {
391        Ok(Self {
392            session_id: Uuid::new_v4(),
393            #[cfg(feature = "otel-metrics")]
394            meter: {
395                let meter_provider = global::meter_provider();
396                meter_provider.meter("clnrm-cleanroom")
397            },
398            backend: Arc::new(TestcontainerBackend::new("alpine:latest")?),
399            services: Arc::new(RwLock::new(ServiceRegistry::new().with_default_plugins())),
400            metrics: Arc::new(RwLock::new(SimpleMetrics::default())),
401            container_registry: Arc::new(RwLock::new(HashMap::new())),
402            telemetry: Arc::new(RwLock::new(TelemetryState {
403                tracing_enabled: false,
404                metrics_enabled: false,
405                traces: Vec::new(),
406            })),
407        })
408    }
409
410    /// Execute a test with OTel tracing
411    pub async fn execute_test<F, T>(&self, _test_name: &str, test_fn: F) -> Result<T>
412    where
413        F: FnOnce() -> Result<T>,
414    {
415        #[cfg(feature = "otel-traces")]
416        let tracer_provider = global::tracer_provider();
417        #[cfg(feature = "otel-traces")]
418        let mut span = tracer_provider
419            .tracer("clnrm-cleanroom")
420            .start(format!("test.{}", _test_name));
421        #[cfg(feature = "otel-traces")]
422        span.set_attributes(vec![
423            KeyValue::new("test.name", _test_name.to_string()),
424            KeyValue::new("session.id", self.session_id.to_string()),
425        ]);
426
427        let start_time = std::time::Instant::now();
428
429        // Update metrics
430        {
431            let mut metrics = self.metrics.write().await;
432            metrics.tests_executed += 1;
433        }
434
435        let result = test_fn();
436
437        let duration = start_time.elapsed();
438
439        // Record OTel metrics
440        let success = result.is_ok();
441        if success {
442            let mut metrics = self.metrics.write().await;
443            metrics.tests_passed += 1;
444        } else {
445            let mut metrics = self.metrics.write().await;
446            metrics.tests_failed += 1;
447        }
448
449        let mut metrics = self.metrics.write().await;
450        metrics.total_duration_ms += duration.as_millis() as u64;
451
452        #[cfg(feature = "otel-metrics")]
453        {
454            // OTel metrics
455            let attributes = vec![
456                KeyValue::new("test.name", _test_name.to_string()),
457                KeyValue::new("session.id", self.session_id.to_string()),
458            ];
459
460            let counter = self
461                .meter
462                .u64_counter("test.executions")
463                .with_description("Number of test executions")
464                .build();
465            counter.add(1, &attributes);
466
467            let histogram = self
468                .meter
469                .f64_histogram("test.duration")
470                .with_description("Test execution duration")
471                .build();
472            histogram.record(duration.as_secs_f64(), &attributes);
473        }
474
475        #[cfg(feature = "otel-traces")]
476        if !success {
477            span.set_status(opentelemetry::trace::Status::error("Test failed"));
478        }
479
480        #[cfg(feature = "otel-traces")]
481        span.end();
482
483        result
484    }
485
486    /// Get current metrics
487    pub async fn get_metrics(&self) -> Result<SimpleMetrics> {
488        Ok(self.metrics.read().await.clone())
489    }
490
491    /// Enable tracing for this environment
492    pub async fn enable_tracing(&self) -> Result<()> {
493        #[cfg(feature = "otel-traces")]
494        {
495            let mut telemetry = self.telemetry.write().await;
496            telemetry.enable_tracing();
497        }
498        Ok(())
499    }
500
501    /// Enable metrics collection for this environment
502    pub async fn enable_metrics(&self) -> Result<()> {
503        #[cfg(feature = "otel-traces")]
504        {
505            let mut telemetry = self.telemetry.write().await;
506            telemetry.enable_metrics();
507        }
508        Ok(())
509    }
510
511    /// Get traces from this environment
512    pub async fn get_traces(&self) -> Result<Vec<String>> {
513        #[cfg(feature = "otel-traces")]
514        {
515            let telemetry = self.telemetry.read().await;
516            Ok(telemetry.get_traces())
517        }
518        #[cfg(not(feature = "otel-traces"))]
519        {
520            Ok(Vec::new())
521        }
522    }
523
524    /// Get container reuse statistics
525    pub async fn get_container_reuse_stats(&self) -> (u32, u32) {
526        let metrics = self.metrics.read().await;
527        (metrics.containers_created, metrics.containers_reused)
528    }
529
530    /// Check if a container with the given name has been created in this session
531    pub async fn has_container(&self, name: &str) -> bool {
532        let registry = self.container_registry.read().await;
533        registry.contains_key(name)
534    }
535
536    /// Register a service plugin
537    pub async fn register_service(&self, plugin: Box<dyn ServicePlugin>) -> Result<()> {
538        let mut services = self.services.write().await;
539        services.register_plugin(plugin);
540        Ok(())
541    }
542
543    /// Start a service by name
544    pub async fn start_service(&self, service_name: &str) -> Result<ServiceHandle> {
545        let mut services = self.services.write().await;
546        services.start_service(service_name).await
547    }
548
549    /// Stop a service by handle ID
550    pub async fn stop_service(&self, handle_id: &str) -> Result<()> {
551        let mut services = self.services.write().await;
552        services.stop_service(handle_id).await
553    }
554
555    /// Get service registry (read-only access)
556    pub async fn services(&self) -> tokio::sync::RwLockReadGuard<'_, ServiceRegistry> {
557        self.services.read().await
558    }
559
560    /// Register a container for reuse
561    pub async fn register_container<T: Send + Sync + 'static>(
562        &self,
563        name: String,
564        container: T,
565    ) -> Result<()> {
566        let mut registry = self.container_registry.write().await;
567        registry.insert(name, Box::new(container));
568        Ok(())
569    }
570
571    /// Get or create container with reuse pattern
572    ///
573    /// This method implements true container reuse by storing and returning
574    /// the actual container instances, providing the promised 10-50x performance improvement.
575    pub async fn get_or_create_container<F, T>(&self, name: &str, factory: F) -> Result<T>
576    where
577        F: FnOnce() -> Result<T>,
578        T: Send + Sync + Clone + 'static,
579    {
580        // Check if we've already created a container with this name in this session
581        let existing_container = {
582            let registry = self.container_registry.read().await;
583            if let Some(existing_container) = registry.get(name) {
584                // Try to downcast to the requested type
585                existing_container
586                    .downcast_ref::<T>()
587                    .map(|typed_container| typed_container.clone())
588            } else {
589                None
590            }
591        };
592
593        if let Some(container) = existing_container {
594            // Update metrics to track actual reuse
595            {
596                let mut metrics = self.metrics.write().await;
597                metrics.containers_reused += 1;
598            }
599
600            return Ok(container);
601        }
602
603        // First time creating this container
604        let container = factory()?;
605
606        // Register the actual container for future reuse
607        let mut registry = self.container_registry.write().await;
608        registry.insert(name.to_string(), Box::new(container.clone()));
609
610        // Update metrics
611        {
612            let mut metrics = self.metrics.write().await;
613            metrics.containers_created += 1;
614        }
615
616        Ok(container)
617    }
618
619    /// Check health of all services
620    pub async fn check_health(&self) -> HashMap<String, HealthStatus> {
621        self.services.read().await.check_all_health().await
622    }
623
624    /// Get service logs
625    pub async fn get_service_logs(&self, service_id: &str, lines: usize) -> Result<Vec<String>> {
626        let services = self.services.read().await;
627        services.get_service_logs(service_id, lines).await
628    }
629
630    /// Get session ID
631    pub fn session_id(&self) -> Uuid {
632        self.session_id
633    }
634
635    /// Get backend
636    pub fn backend(&self) -> &dyn Backend {
637        self.backend.as_ref() as &dyn Backend
638    }
639
640    /// Execute a command in a container with proper error handling and observability
641    /// Core Team Compliance: Async for I/O operations, proper error handling, no unwrap/expect
642    ///
643    /// This method creates a fresh container for each command execution, which is appropriate
644    /// for testing scenarios where isolation is more important than performance.
645    pub async fn execute_in_container(
646        &self,
647        container_name: &str,
648        command: &[String],
649    ) -> Result<ExecutionResult> {
650        #[cfg(feature = "otel-traces")]
651        let tracer_provider = global::tracer_provider();
652        #[cfg(feature = "otel-traces")]
653        let mut span = tracer_provider
654            .tracer("clnrm-cleanroom")
655            .start(format!("container.exec.{}", container_name));
656        #[cfg(feature = "otel-traces")]
657        span.set_attributes(vec![
658            KeyValue::new("container.name", container_name.to_string()),
659            KeyValue::new("command", command.join(" ")),
660            KeyValue::new("session.id", self.session_id.to_string()),
661        ]);
662
663        let start_time = std::time::Instant::now();
664
665        // Execute command using backend - this creates a fresh container for each command
666        // This provides maximum isolation and is appropriate for testing scenarios
667        let cmd = Cmd::new("sh")
668            .arg("-c")
669            .arg(command.join(" "))
670            .env("CONTAINER_NAME", container_name);
671
672        // Use spawn_blocking to avoid runtime conflicts with testcontainers
673        // Clone the backend to move it into the blocking task
674        let backend = self.backend.clone();
675        let execution_result = tokio::task::spawn_blocking(move || backend.run_cmd(cmd))
676            .await
677            .map_err(|e| {
678                #[cfg(feature = "otel-traces")]
679                {
680                    span.set_status(opentelemetry::trace::Status::error("Task join failed"));
681                    span.end();
682                }
683                CleanroomError::internal_error("Failed to execute command in blocking task")
684                    .with_context("Command execution task failed")
685                    .with_source(e.to_string())
686            })?
687            .map_err(|e| {
688                #[cfg(feature = "otel-traces")]
689                {
690                    span.set_status(opentelemetry::trace::Status::error(
691                        "Command execution failed",
692                    ));
693                    span.end();
694                }
695                CleanroomError::container_error("Failed to execute command in container")
696                    .with_context(format!(
697                        "Container: {}, Command: {}",
698                        container_name,
699                        command.join(" ")
700                    ))
701                    .with_source(e.to_string())
702            })?;
703
704        let duration = start_time.elapsed();
705
706        // Record metrics
707        #[cfg(feature = "otel-metrics")]
708        {
709            let histogram = self
710                .meter
711                .f64_histogram("container.command.duration")
712                .with_description("Container command execution duration")
713                .build();
714            histogram.record(
715                duration.as_secs_f64(),
716                &[
717                    KeyValue::new("container.name", container_name.to_string()),
718                    KeyValue::new("command", command.join(" ")),
719                ],
720            );
721        }
722
723        #[cfg(feature = "otel-traces")]
724        span.set_attributes(vec![
725            KeyValue::new(
726                "execution.exit_code",
727                execution_result.exit_code.to_string(),
728            ),
729            KeyValue::new("execution.duration_ms", duration.as_millis().to_string()),
730        ]);
731
732        #[cfg(feature = "otel-traces")]
733        if execution_result.exit_code != 0 {
734            span.set_status(opentelemetry::trace::Status::error("Command failed"));
735        }
736
737        #[cfg(feature = "otel-traces")]
738        span.end();
739
740        Ok(ExecutionResult {
741            exit_code: execution_result.exit_code,
742            stdout: execution_result.stdout,
743            stderr: execution_result.stderr,
744            duration,
745            command: command.to_vec(),
746            container_name: container_name.to_string(),
747        })
748    }
749}
750
751// Default implementation removed to avoid panic in production code
752// Use CleanroomEnvironment::new() instead for proper error handling
753
754/// Example custom service plugin implementation
755///
756/// This demonstrates how to create custom services without hardcoded dependencies
757#[derive(Debug)]
758pub struct MockDatabasePlugin {
759    name: String,
760    container_id: Arc<RwLock<Option<String>>>,
761}
762
763impl Default for MockDatabasePlugin {
764    fn default() -> Self {
765        Self::new()
766    }
767}
768
769impl MockDatabasePlugin {
770    pub fn new() -> Self {
771        Self {
772            name: "mock_database".to_string(),
773            container_id: Arc::new(RwLock::new(None)),
774        }
775    }
776}
777
778impl ServicePlugin for MockDatabasePlugin {
779    fn name(&self) -> &str {
780        &self.name
781    }
782
783    fn start(&self) -> Result<ServiceHandle> {
784        // For testing, create a simple mock handle without actual container
785        // In production, this would use proper async container startup
786
787        // Build metadata with mock connection details
788        let mut metadata = HashMap::new();
789        metadata.insert("host".to_string(), "127.0.0.1".to_string());
790        metadata.insert("port".to_string(), "8000".to_string());
791        metadata.insert("username".to_string(), "root".to_string());
792        metadata.insert("password".to_string(), "root".to_string());
793
794        Ok(ServiceHandle {
795            id: Uuid::new_v4().to_string(),
796            service_name: "mock_database".to_string(),
797            metadata,
798        })
799    }
800
801    fn stop(&self, _handle: ServiceHandle) -> Result<()> {
802        // For testing, just return success without actual container cleanup
803        // In production, this would properly stop the container
804        Ok(())
805    }
806
807    fn health_check(&self, handle: &ServiceHandle) -> HealthStatus {
808        // Quick check if we have port information
809        if handle.metadata.contains_key("port") {
810            HealthStatus::Healthy
811        } else {
812            HealthStatus::Unknown
813        }
814    }
815}
816
817#[cfg(test)]
818mod tests {
819    use super::*;
820
821    #[tokio::test]
822    async fn test_cleanroom_creation() {
823        let result = CleanroomEnvironment::new().await;
824        assert!(result.is_ok()); // Should succeed with default implementation
825    }
826
827    #[test]
828    fn test_service_plugin_dyn_compatibility() {
829        // This test verifies that ServicePlugin is dyn compatible
830        let plugin: Arc<dyn ServicePlugin> = Arc::new(MockDatabasePlugin::new());
831
832        // Test that we can call methods on the trait object
833        assert_eq!(plugin.name(), "mock_database");
834
835        // Test that we can store multiple plugins in a collection
836        let mut plugins: Vec<Arc<dyn ServicePlugin>> = Vec::new();
837        plugins.push(plugin);
838
839        // Test that we can iterate over them
840        for plugin in &plugins {
841            assert_eq!(plugin.name(), "mock_database");
842        }
843
844        println!("✅ ServicePlugin trait is dyn compatible!");
845    }
846
847    #[tokio::test]
848    async fn test_cleanroom_session_id() -> Result<()> {
849        let env = CleanroomEnvironment::new().await?;
850        assert!(!env.session_id().is_nil());
851        Ok(())
852    }
853
854    #[tokio::test]
855    async fn test_cleanroom_execute_test() -> Result<()> {
856        let env = CleanroomEnvironment::new().await?;
857        let result = env
858            .execute_test("test", || Ok::<i32, CleanroomError>(42))
859            .await?;
860        assert_eq!(result, 42);
861        Ok(())
862    }
863
864    #[tokio::test]
865    async fn test_service_registry() -> Result<()> {
866        let env = CleanroomEnvironment::new().await?;
867        let services = env.services().await;
868        assert!(services.active_services().is_empty());
869        Ok(())
870    }
871
872    #[tokio::test]
873    async fn test_service_plugin_registration() -> Result<()> {
874        let env = CleanroomEnvironment::new().await?;
875        let plugin = Box::new(MockDatabasePlugin::new());
876        env.register_service(plugin).await?;
877        Ok(())
878    }
879
880    #[tokio::test]
881    async fn test_service_start_stop() -> Result<()> {
882        let env = CleanroomEnvironment::new().await?;
883        let plugin = Box::new(MockDatabasePlugin::new());
884        env.register_service(plugin).await?;
885
886        let handle = env.start_service("mock_database").await?;
887        assert_eq!(handle.service_name, "mock_database");
888
889        env.stop_service(&handle.id).await?;
890        Ok(())
891    }
892
893    #[tokio::test]
894    async fn test_register_container() {
895        let env = CleanroomEnvironment::default();
896        let result = env
897            .register_container("test-container".to_string(), "container-123".to_string())
898            .await;
899        assert!(result.is_ok());
900
901        // Verify container was registered
902        assert!(env.has_container("test-container").await);
903    }
904
905    #[tokio::test]
906    async fn test_get_or_create_container() -> Result<()> {
907        let env = CleanroomEnvironment::new().await?;
908
909        // First call should create and register container
910        let result1 = env
911            .get_or_create_container("test-container", || {
912                Ok::<String, CleanroomError>("container-instance".to_string())
913            })
914            .await?;
915        assert_eq!(result1, "container-instance");
916
917        // Verify container was registered
918        assert!(env.has_container("test-container").await);
919        let (created, reused) = env.get_container_reuse_stats().await;
920        assert_eq!(created, 1);
921        assert_eq!(reused, 0);
922
923        // Second call should return the SAME container instance (true reuse!)
924        let result2 = env
925            .get_or_create_container("test-container", || {
926                Ok::<String, CleanroomError>("container-instance-2".to_string())
927            })
928            .await?;
929        // This should be the SAME instance, not a new one
930        assert_eq!(result2, "container-instance");
931
932        // Verify reuse was tracked
933        let (created, reused) = env.get_container_reuse_stats().await;
934        assert_eq!(created, 1);
935        assert_eq!(reused, 1);
936        Ok(())
937    }
938
939    #[tokio::test]
940    async fn test_check_health_delegates_to_service_registry() -> Result<()> {
941        let env = CleanroomEnvironment::new().await?;
942        let health_status = env.check_health().await;
943        // Should return empty HashMap since no services are registered
944        assert!(health_status.is_empty());
945        Ok(())
946    }
947}