mockforge_ui/
handlers.rs

1//! Request handlers for the admin UI
2//!
3//! This module has been refactored into sub-modules for better organization:
4//! - assets: Static asset serving
5//! - admin: Admin dashboard and server management
6//! - workspace: Workspace management operations
7//! - plugin: Plugin management operations
8//! - sync: Synchronization operations
9//! - import: Data import operations
10//! - fixtures: Fixture management operations
11
12use axum::{
13    extract::{Path, Query, State},
14    http::{self, StatusCode},
15    response::{
16        sse::{Event, Sse},
17        Html, IntoResponse, Json,
18    },
19};
20use chrono::Utc;
21use futures_util::stream::{self, Stream};
22use mockforge_core::{Error, Result};
23use mockforge_plugin_loader::PluginRegistry;
24use serde::{Deserialize, Serialize};
25use serde_json::json;
26use std::collections::HashMap;
27use std::convert::Infallible;
28use std::process::Command;
29use std::process::Stdio;
30use std::sync::Arc;
31use std::time::Duration;
32use sysinfo::System;
33use tokio::sync::RwLock;
34
35// Import all types from models
36use crate::models::{
37    ApiResponse, ConfigUpdate, DashboardData, DashboardSystemInfo, FaultConfig, HealthCheck,
38    LatencyProfile, LogFilter, MetricsData, ProxyConfig, RequestLog, RouteInfo, ServerInfo,
39    ServerStatus, SimpleMetricsData, SystemInfo, ValidationSettings, ValidationUpdate,
40};
41
42// Import import types from core
43use mockforge_core::workspace_import::{ImportResponse, ImportRoute};
44
45// Handler sub-modules
46pub mod admin;
47pub mod ai_studio;
48pub mod analytics;
49pub mod analytics_stream;
50pub mod analytics_v2;
51pub mod assets;
52pub mod behavioral_cloning;
53pub mod chains;
54pub mod community;
55pub mod contract_diff;
56pub mod coverage_metrics;
57pub mod failure_analysis;
58pub mod graph;
59pub mod health;
60pub mod migration;
61pub mod pillar_analytics;
62pub mod playground;
63pub mod plugin;
64pub mod promotions;
65pub mod protocol_contracts;
66pub mod verification;
67pub mod voice;
68pub mod workspaces;
69
70// Re-export commonly used types
71pub use assets::*;
72pub use chains::*;
73pub use graph::*;
74pub use migration::*;
75pub use plugin::*;
76
77// Import workspace persistence
78use mockforge_core::workspace_import::WorkspaceImportConfig;
79use mockforge_core::workspace_persistence::WorkspacePersistence;
80
81/// Request metrics for tracking
82#[derive(Debug, Clone, Default)]
83pub struct RequestMetrics {
84    /// Total requests served
85    pub total_requests: u64,
86    /// Active connections
87    pub active_connections: u64,
88    /// Requests by endpoint
89    pub requests_by_endpoint: HashMap<String, u64>,
90    /// Response times (last N measurements)
91    pub response_times: Vec<u64>,
92    /// Response times by endpoint (last N measurements per endpoint)
93    pub response_times_by_endpoint: HashMap<String, Vec<u64>>,
94    /// Error count by endpoint
95    pub errors_by_endpoint: HashMap<String, u64>,
96    /// Last request timestamp by endpoint
97    pub last_request_by_endpoint: HashMap<String, chrono::DateTime<chrono::Utc>>,
98}
99
100/// System metrics
101#[derive(Debug, Clone)]
102pub struct SystemMetrics {
103    /// Memory usage in MB
104    pub memory_usage_mb: u64,
105    /// CPU usage percentage
106    pub cpu_usage_percent: f64,
107    /// Active threads
108    pub active_threads: u32,
109}
110
111/// Time series data point
112#[derive(Debug, Clone)]
113pub struct TimeSeriesPoint {
114    /// Timestamp
115    pub timestamp: chrono::DateTime<chrono::Utc>,
116    /// Value
117    pub value: f64,
118}
119
120/// Time series data for tracking metrics over time
121#[derive(Debug, Clone, Default)]
122pub struct TimeSeriesData {
123    /// Memory usage over time
124    pub memory_usage: Vec<TimeSeriesPoint>,
125    /// CPU usage over time
126    pub cpu_usage: Vec<TimeSeriesPoint>,
127    /// Request count over time
128    pub request_count: Vec<TimeSeriesPoint>,
129    /// Response time over time
130    pub response_time: Vec<TimeSeriesPoint>,
131}
132
133/// Restart status tracking
134#[derive(Debug, Clone, Serialize, Deserialize)]
135pub struct RestartStatus {
136    /// Whether a restart is currently in progress
137    pub in_progress: bool,
138    /// Timestamp when restart was initiated
139    pub initiated_at: Option<chrono::DateTime<chrono::Utc>>,
140    /// Restart reason/message
141    pub reason: Option<String>,
142    /// Whether restart was successful
143    pub success: Option<bool>,
144}
145
146/// Fixture metadata
147#[derive(Debug, Clone, Serialize, Deserialize)]
148pub struct FixtureInfo {
149    /// Unique identifier for the fixture
150    pub id: String,
151    /// Protocol type (http, websocket, grpc)
152    pub protocol: String,
153    /// HTTP method or operation type
154    pub method: String,
155    /// Request path
156    pub path: String,
157    /// When the fixture was saved
158    pub saved_at: chrono::DateTime<chrono::Utc>,
159    /// File size in bytes
160    pub file_size: u64,
161    /// File path relative to fixtures directory
162    pub file_path: String,
163    /// Request fingerprint hash
164    pub fingerprint: String,
165    /// Additional metadata from the fixture file
166    pub metadata: serde_json::Value,
167}
168
169/// Smoke test result
170#[derive(Debug, Clone, Serialize, Deserialize)]
171pub struct SmokeTestResult {
172    /// Test ID
173    pub id: String,
174    /// Test name
175    pub name: String,
176    /// HTTP method
177    pub method: String,
178    /// Request path
179    pub path: String,
180    /// Test description
181    pub description: String,
182    /// When the test was last run
183    pub last_run: Option<chrono::DateTime<chrono::Utc>>,
184    /// Test status (passed, failed, running, pending)
185    pub status: String,
186    /// Response time in milliseconds
187    pub response_time_ms: Option<u64>,
188    /// Error message if test failed
189    pub error_message: Option<String>,
190    /// HTTP status code received
191    pub status_code: Option<u16>,
192    /// Test duration in seconds
193    pub duration_seconds: Option<f64>,
194}
195
196/// Smoke test execution context
197#[derive(Debug, Clone)]
198pub struct SmokeTestContext {
199    /// Base URL for the service being tested
200    pub base_url: String,
201    /// Timeout for individual tests
202    pub timeout_seconds: u64,
203    /// Whether to run tests in parallel
204    pub parallel: bool,
205}
206
207/// Configuration state
208#[derive(Debug, Clone, Serialize)]
209pub struct ConfigurationState {
210    /// Latency profile
211    pub latency_profile: LatencyProfile,
212    /// Fault configuration
213    pub fault_config: FaultConfig,
214    /// Proxy configuration
215    pub proxy_config: ProxyConfig,
216    /// Validation settings
217    pub validation_settings: ValidationSettings,
218}
219
220/// Import history entry
221#[derive(Debug, Clone, Serialize, Deserialize)]
222pub struct ImportHistoryEntry {
223    /// Unique ID for the import
224    pub id: String,
225    /// Import format (postman, insomnia, curl)
226    pub format: String,
227    /// Timestamp of the import
228    pub timestamp: chrono::DateTime<chrono::Utc>,
229    /// Number of routes imported
230    pub routes_count: usize,
231    /// Number of variables imported
232    pub variables_count: usize,
233    /// Number of warnings
234    pub warnings_count: usize,
235    /// Whether the import was successful
236    pub success: bool,
237    /// Filename of the imported file
238    pub filename: Option<String>,
239    /// Environment used
240    pub environment: Option<String>,
241    /// Base URL used
242    pub base_url: Option<String>,
243    /// Error message if failed
244    pub error_message: Option<String>,
245}
246
247/// Shared state for the admin UI
248#[derive(Clone)]
249pub struct AdminState {
250    /// HTTP server address
251    pub http_server_addr: Option<std::net::SocketAddr>,
252    /// WebSocket server address
253    pub ws_server_addr: Option<std::net::SocketAddr>,
254    /// gRPC server address
255    pub grpc_server_addr: Option<std::net::SocketAddr>,
256    /// GraphQL server address
257    pub graphql_server_addr: Option<std::net::SocketAddr>,
258    /// Whether API endpoints are enabled
259    pub api_enabled: bool,
260    /// Admin server port
261    pub admin_port: u16,
262    /// Start time
263    pub start_time: chrono::DateTime<chrono::Utc>,
264    /// Request metrics (protected by RwLock)
265    pub metrics: Arc<RwLock<RequestMetrics>>,
266    /// System metrics (protected by RwLock)
267    pub system_metrics: Arc<RwLock<SystemMetrics>>,
268    /// Configuration (protected by RwLock)
269    pub config: Arc<RwLock<ConfigurationState>>,
270    /// Request logs (protected by RwLock)
271    pub logs: Arc<RwLock<Vec<RequestLog>>>,
272    /// Time series data (protected by RwLock)
273    pub time_series: Arc<RwLock<TimeSeriesData>>,
274    /// Restart status (protected by RwLock)
275    pub restart_status: Arc<RwLock<RestartStatus>>,
276    /// Smoke test results (protected by RwLock)
277    pub smoke_test_results: Arc<RwLock<Vec<SmokeTestResult>>>,
278    /// Import history (protected by RwLock)
279    pub import_history: Arc<RwLock<Vec<ImportHistoryEntry>>>,
280    /// Workspace persistence
281    pub workspace_persistence: Arc<WorkspacePersistence>,
282    /// Plugin registry (protected by RwLock)
283    pub plugin_registry: Arc<RwLock<PluginRegistry>>,
284    /// Reality engine for managing realism levels
285    pub reality_engine: Arc<RwLock<mockforge_core::RealityEngine>>,
286    /// Reality Continuum engine for blending mock and real data sources
287    pub continuum_engine: Arc<RwLock<mockforge_core::RealityContinuumEngine>>,
288    /// Chaos API state for hot-reload support (optional)
289    /// Contains config that can be updated at runtime
290    pub chaos_api_state: Option<std::sync::Arc<mockforge_chaos::api::ChaosApiState>>,
291    /// Latency injector for HTTP middleware (optional)
292    /// Allows updating latency profile at runtime
293    pub latency_injector:
294        Option<std::sync::Arc<tokio::sync::RwLock<mockforge_core::latency::LatencyInjector>>>,
295    /// MockAI instance (optional)
296    /// Allows updating MockAI configuration at runtime
297    pub mockai:
298        Option<std::sync::Arc<tokio::sync::RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
299}
300
301impl AdminState {
302    /// Start system monitoring background task
303    pub async fn start_system_monitoring(&self) {
304        let state_clone = self.clone();
305        tokio::spawn(async move {
306            let mut sys = System::new_all();
307            let mut refresh_count = 0u64;
308
309            tracing::info!("Starting system monitoring background task");
310
311            loop {
312                // Refresh system information
313                sys.refresh_all();
314
315                // Get CPU usage
316                let cpu_usage = sys.global_cpu_usage();
317
318                // Get memory usage
319                let _total_memory = sys.total_memory() as f64;
320                let used_memory = sys.used_memory() as f64;
321                let memory_usage_mb = used_memory / 1024.0 / 1024.0;
322
323                // Get thread count (use available CPU cores as approximate measure)
324                let active_threads = sys.cpus().len() as u32;
325
326                // Update system metrics
327                let memory_mb_u64 = memory_usage_mb as u64;
328
329                // Only log every 10 refreshes to avoid spam
330                if refresh_count.is_multiple_of(10) {
331                    tracing::debug!(
332                        "System metrics updated: CPU={:.1}%, Mem={}MB, Threads={}",
333                        cpu_usage,
334                        memory_mb_u64,
335                        active_threads
336                    );
337                }
338
339                state_clone
340                    .update_system_metrics(memory_mb_u64, cpu_usage as f64, active_threads)
341                    .await;
342
343                refresh_count += 1;
344
345                // Sleep for 10 seconds between updates
346                tokio::time::sleep(Duration::from_secs(10)).await;
347            }
348        });
349    }
350
351    /// Create new admin state
352    ///
353    /// # Arguments
354    /// * `http_server_addr` - HTTP server address
355    /// * `ws_server_addr` - WebSocket server address
356    /// * `grpc_server_addr` - gRPC server address
357    /// * `graphql_server_addr` - GraphQL server address
358    /// * `api_enabled` - Whether API endpoints are enabled
359    /// * `admin_port` - Admin server port
360    /// * `chaos_api_state` - Optional chaos API state for hot-reload support
361    /// * `latency_injector` - Optional latency injector for hot-reload support
362    /// * `mockai` - Optional MockAI instance for hot-reload support
363    /// * `continuum_config` - Optional Reality Continuum configuration
364    /// * `virtual_clock` - Optional virtual clock for time-based progression
365    pub fn new(
366        http_server_addr: Option<std::net::SocketAddr>,
367        ws_server_addr: Option<std::net::SocketAddr>,
368        grpc_server_addr: Option<std::net::SocketAddr>,
369        graphql_server_addr: Option<std::net::SocketAddr>,
370        api_enabled: bool,
371        admin_port: u16,
372        chaos_api_state: Option<std::sync::Arc<mockforge_chaos::api::ChaosApiState>>,
373        latency_injector: Option<
374            std::sync::Arc<tokio::sync::RwLock<mockforge_core::latency::LatencyInjector>>,
375        >,
376        mockai: Option<
377            std::sync::Arc<tokio::sync::RwLock<mockforge_core::intelligent_behavior::MockAI>>,
378        >,
379        continuum_config: Option<mockforge_core::ContinuumConfig>,
380        virtual_clock: Option<std::sync::Arc<mockforge_core::VirtualClock>>,
381    ) -> Self {
382        let start_time = chrono::Utc::now();
383
384        Self {
385            http_server_addr,
386            ws_server_addr,
387            grpc_server_addr,
388            graphql_server_addr,
389            api_enabled,
390            admin_port,
391            start_time,
392            metrics: Arc::new(RwLock::new(RequestMetrics::default())),
393            system_metrics: Arc::new(RwLock::new(SystemMetrics {
394                memory_usage_mb: 0,
395                cpu_usage_percent: 0.0,
396                active_threads: 0,
397            })),
398            config: Arc::new(RwLock::new(ConfigurationState {
399                latency_profile: LatencyProfile {
400                    name: "default".to_string(),
401                    base_ms: 50,
402                    jitter_ms: 20,
403                    tag_overrides: HashMap::new(),
404                },
405                fault_config: FaultConfig {
406                    enabled: false,
407                    failure_rate: 0.0,
408                    status_codes: vec![500, 502, 503],
409                    active_failures: 0,
410                },
411                proxy_config: ProxyConfig {
412                    enabled: false,
413                    upstream_url: None,
414                    timeout_seconds: 30,
415                    requests_proxied: 0,
416                },
417                validation_settings: ValidationSettings {
418                    mode: "enforce".to_string(),
419                    aggregate_errors: true,
420                    validate_responses: false,
421                    overrides: HashMap::new(),
422                },
423            })),
424            logs: Arc::new(RwLock::new(Vec::new())),
425            time_series: Arc::new(RwLock::new(TimeSeriesData::default())),
426            restart_status: Arc::new(RwLock::new(RestartStatus {
427                in_progress: false,
428                initiated_at: None,
429                reason: None,
430                success: None,
431            })),
432            smoke_test_results: Arc::new(RwLock::new(Vec::new())),
433            import_history: Arc::new(RwLock::new(Vec::new())),
434            workspace_persistence: Arc::new(WorkspacePersistence::new("./workspaces")),
435            plugin_registry: Arc::new(RwLock::new(PluginRegistry::new())),
436            reality_engine: Arc::new(RwLock::new(mockforge_core::RealityEngine::new())),
437            continuum_engine: Arc::new(RwLock::new({
438                let config = continuum_config.unwrap_or_default();
439                if let Some(clock) = virtual_clock {
440                    mockforge_core::RealityContinuumEngine::with_virtual_clock(config, clock)
441                } else {
442                    mockforge_core::RealityContinuumEngine::new(config)
443                }
444            })),
445            chaos_api_state,
446            latency_injector,
447            mockai,
448        }
449    }
450
451    /// Record a request
452    pub async fn record_request(
453        &self,
454        method: &str,
455        path: &str,
456        status_code: u16,
457        response_time_ms: u64,
458        error: Option<String>,
459    ) {
460        let mut metrics = self.metrics.write().await;
461
462        metrics.total_requests += 1;
463        let endpoint = format!("{} {}", method, path);
464        *metrics.requests_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
465
466        if status_code >= 400 {
467            *metrics.errors_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
468        }
469
470        // Keep only last 100 response times globally
471        metrics.response_times.push(response_time_ms);
472        if metrics.response_times.len() > 100 {
473            metrics.response_times.remove(0);
474        }
475
476        // Keep only last 50 response times per endpoint
477        let endpoint_times = metrics
478            .response_times_by_endpoint
479            .entry(endpoint.clone())
480            .or_insert_with(Vec::new);
481        endpoint_times.push(response_time_ms);
482        if endpoint_times.len() > 50 {
483            endpoint_times.remove(0);
484        }
485
486        // Update last request timestamp for this endpoint
487        metrics.last_request_by_endpoint.insert(endpoint, chrono::Utc::now());
488
489        // Capture total_requests before releasing the lock
490        let total_requests = metrics.total_requests;
491
492        // Release metrics lock before acquiring other locks
493        drop(metrics);
494
495        // Update time series data for request count and response time
496        self.update_time_series_on_request(response_time_ms, total_requests).await;
497
498        // Record the log
499        let mut logs = self.logs.write().await;
500        let log_entry = RequestLog {
501            id: format!("req_{}", total_requests),
502            timestamp: Utc::now(),
503            method: method.to_string(),
504            path: path.to_string(),
505            status_code,
506            response_time_ms,
507            client_ip: None,
508            user_agent: None,
509            headers: HashMap::new(),
510            response_size_bytes: 0,
511            error_message: error,
512        };
513
514        logs.push(log_entry);
515
516        // Keep only last 1000 logs
517        if logs.len() > 1000 {
518            logs.remove(0);
519        }
520    }
521
522    /// Get current metrics
523    pub async fn get_metrics(&self) -> RequestMetrics {
524        self.metrics.read().await.clone()
525    }
526
527    /// Update system metrics
528    pub async fn update_system_metrics(&self, memory_mb: u64, cpu_percent: f64, threads: u32) {
529        let mut system_metrics = self.system_metrics.write().await;
530        system_metrics.memory_usage_mb = memory_mb;
531        system_metrics.cpu_usage_percent = cpu_percent;
532        system_metrics.active_threads = threads;
533
534        // Update time series data
535        self.update_time_series_data(memory_mb as f64, cpu_percent).await;
536    }
537
538    /// Update time series data with new metrics
539    async fn update_time_series_data(&self, memory_mb: f64, cpu_percent: f64) {
540        let now = chrono::Utc::now();
541        let mut time_series = self.time_series.write().await;
542
543        // Add memory usage data point
544        time_series.memory_usage.push(TimeSeriesPoint {
545            timestamp: now,
546            value: memory_mb,
547        });
548
549        // Add CPU usage data point
550        time_series.cpu_usage.push(TimeSeriesPoint {
551            timestamp: now,
552            value: cpu_percent,
553        });
554
555        // Add request count data point (from current metrics)
556        let metrics = self.metrics.read().await;
557        time_series.request_count.push(TimeSeriesPoint {
558            timestamp: now,
559            value: metrics.total_requests as f64,
560        });
561
562        // Add average response time data point
563        let avg_response_time = if !metrics.response_times.is_empty() {
564            metrics.response_times.iter().sum::<u64>() as f64 / metrics.response_times.len() as f64
565        } else {
566            0.0
567        };
568        time_series.response_time.push(TimeSeriesPoint {
569            timestamp: now,
570            value: avg_response_time,
571        });
572
573        // Keep only last 100 data points for each metric to prevent memory bloat
574        const MAX_POINTS: usize = 100;
575        if time_series.memory_usage.len() > MAX_POINTS {
576            time_series.memory_usage.remove(0);
577        }
578        if time_series.cpu_usage.len() > MAX_POINTS {
579            time_series.cpu_usage.remove(0);
580        }
581        if time_series.request_count.len() > MAX_POINTS {
582            time_series.request_count.remove(0);
583        }
584        if time_series.response_time.len() > MAX_POINTS {
585            time_series.response_time.remove(0);
586        }
587    }
588
589    /// Get system metrics
590    pub async fn get_system_metrics(&self) -> SystemMetrics {
591        self.system_metrics.read().await.clone()
592    }
593
594    /// Get time series data
595    pub async fn get_time_series_data(&self) -> TimeSeriesData {
596        self.time_series.read().await.clone()
597    }
598
599    /// Get restart status
600    pub async fn get_restart_status(&self) -> RestartStatus {
601        self.restart_status.read().await.clone()
602    }
603
604    /// Initiate server restart
605    pub async fn initiate_restart(&self, reason: String) -> Result<()> {
606        let mut status = self.restart_status.write().await;
607
608        if status.in_progress {
609            return Err(Error::generic("Restart already in progress".to_string()));
610        }
611
612        status.in_progress = true;
613        status.initiated_at = Some(chrono::Utc::now());
614        status.reason = Some(reason);
615        status.success = None;
616
617        Ok(())
618    }
619
620    /// Complete restart (success or failure)
621    pub async fn complete_restart(&self, success: bool) {
622        let mut status = self.restart_status.write().await;
623        status.in_progress = false;
624        status.success = Some(success);
625    }
626
627    /// Get smoke test results
628    pub async fn get_smoke_test_results(&self) -> Vec<SmokeTestResult> {
629        self.smoke_test_results.read().await.clone()
630    }
631
632    /// Update smoke test result
633    pub async fn update_smoke_test_result(&self, result: SmokeTestResult) {
634        let mut results = self.smoke_test_results.write().await;
635
636        // Find existing result by ID and update, or add new one
637        if let Some(existing) = results.iter_mut().find(|r| r.id == result.id) {
638            *existing = result;
639        } else {
640            results.push(result);
641        }
642
643        // Keep only last 100 test results
644        if results.len() > 100 {
645            results.remove(0);
646        }
647    }
648
649    /// Clear all smoke test results
650    pub async fn clear_smoke_test_results(&self) {
651        let mut results = self.smoke_test_results.write().await;
652        results.clear();
653    }
654
655    /// Update time series data when a request is recorded
656    async fn update_time_series_on_request(&self, response_time_ms: u64, total_requests: u64) {
657        let now = chrono::Utc::now();
658        let mut time_series = self.time_series.write().await;
659
660        // Add request count data point
661        time_series.request_count.push(TimeSeriesPoint {
662            timestamp: now,
663            value: total_requests as f64,
664        });
665
666        // Add response time data point
667        time_series.response_time.push(TimeSeriesPoint {
668            timestamp: now,
669            value: response_time_ms as f64,
670        });
671
672        // Keep only last 100 data points for each metric to prevent memory bloat
673        const MAX_POINTS: usize = 100;
674        if time_series.request_count.len() > MAX_POINTS {
675            time_series.request_count.remove(0);
676        }
677        if time_series.response_time.len() > MAX_POINTS {
678            time_series.response_time.remove(0);
679        }
680    }
681
682    /// Get current configuration
683    pub async fn get_config(&self) -> ConfigurationState {
684        self.config.read().await.clone()
685    }
686
687    /// Update latency configuration
688    pub async fn update_latency_config(
689        &self,
690        base_ms: u64,
691        jitter_ms: u64,
692        tag_overrides: HashMap<String, u64>,
693    ) {
694        let mut config = self.config.write().await;
695        config.latency_profile.base_ms = base_ms;
696        config.latency_profile.jitter_ms = jitter_ms;
697        config.latency_profile.tag_overrides = tag_overrides;
698    }
699
700    /// Update fault configuration
701    pub async fn update_fault_config(
702        &self,
703        enabled: bool,
704        failure_rate: f64,
705        status_codes: Vec<u16>,
706    ) {
707        let mut config = self.config.write().await;
708        config.fault_config.enabled = enabled;
709        config.fault_config.failure_rate = failure_rate;
710        config.fault_config.status_codes = status_codes;
711    }
712
713    /// Update proxy configuration
714    pub async fn update_proxy_config(
715        &self,
716        enabled: bool,
717        upstream_url: Option<String>,
718        timeout_seconds: u64,
719    ) {
720        let mut config = self.config.write().await;
721        config.proxy_config.enabled = enabled;
722        config.proxy_config.upstream_url = upstream_url;
723        config.proxy_config.timeout_seconds = timeout_seconds;
724    }
725
726    /// Update validation settings
727    pub async fn update_validation_config(
728        &self,
729        mode: String,
730        aggregate_errors: bool,
731        validate_responses: bool,
732        overrides: HashMap<String, String>,
733    ) {
734        let mut config = self.config.write().await;
735        config.validation_settings.mode = mode;
736        config.validation_settings.aggregate_errors = aggregate_errors;
737        config.validation_settings.validate_responses = validate_responses;
738        config.validation_settings.overrides = overrides;
739    }
740
741    /// Get filtered logs
742    pub async fn get_logs_filtered(&self, filter: &LogFilter) -> Vec<RequestLog> {
743        let logs = self.logs.read().await;
744
745        logs.iter()
746            .rev() // Most recent first
747            .filter(|log| {
748                if let Some(ref method) = filter.method {
749                    if log.method != *method {
750                        return false;
751                    }
752                }
753                if let Some(ref path_pattern) = filter.path_pattern {
754                    if !log.path.contains(path_pattern) {
755                        return false;
756                    }
757                }
758                if let Some(status) = filter.status_code {
759                    if log.status_code != status {
760                        return false;
761                    }
762                }
763                true
764            })
765            .take(filter.limit.unwrap_or(100))
766            .cloned()
767            .collect()
768    }
769
770    /// Clear all logs
771    pub async fn clear_logs(&self) {
772        let mut logs = self.logs.write().await;
773        logs.clear();
774    }
775}
776
777/// Serve the main admin interface
778pub async fn serve_admin_html() -> Html<&'static str> {
779    Html(crate::get_admin_html())
780}
781
782/// Serve admin CSS
783pub async fn serve_admin_css() -> ([(http::HeaderName, &'static str); 1], &'static str) {
784    ([(http::header::CONTENT_TYPE, "text/css")], crate::get_admin_css())
785}
786
787/// Serve admin JavaScript
788pub async fn serve_admin_js() -> ([(http::HeaderName, &'static str); 1], &'static str) {
789    ([(http::header::CONTENT_TYPE, "application/javascript")], crate::get_admin_js())
790}
791
792/// Get dashboard data
793pub async fn get_dashboard(State(state): State<AdminState>) -> Json<ApiResponse<DashboardData>> {
794    let uptime = Utc::now().signed_duration_since(state.start_time).num_seconds() as u64;
795
796    // Get system metrics from state
797    let system_metrics = state.get_system_metrics().await;
798    let _config = state.get_config().await;
799
800    // Get recent logs and calculate metrics from centralized logger
801    let (recent_logs, calculated_metrics): (Vec<RequestLog>, RequestMetrics) =
802        if let Some(global_logger) = mockforge_core::get_global_logger() {
803            // Get all logs to calculate metrics
804            let all_logs = global_logger.get_recent_logs(None).await;
805            let recent_logs_subset = global_logger.get_recent_logs(Some(20)).await;
806
807            // Calculate metrics from logs
808            let total_requests = all_logs.len() as u64;
809            let mut requests_by_endpoint = HashMap::new();
810            let mut errors_by_endpoint = HashMap::new();
811            let mut response_times = Vec::new();
812            let mut last_request_by_endpoint = HashMap::new();
813
814            for log in &all_logs {
815                let endpoint_key = format!("{} {}", log.method, log.path);
816                *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
817
818                if log.status_code >= 400 {
819                    *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
820                }
821
822                response_times.push(log.response_time_ms);
823                last_request_by_endpoint.insert(endpoint_key, log.timestamp);
824            }
825
826            let calculated_metrics = RequestMetrics {
827                total_requests,
828                active_connections: 0, // We don't track this from logs
829                requests_by_endpoint,
830                response_times,
831                response_times_by_endpoint: HashMap::new(), // Simplified for now
832                errors_by_endpoint,
833                last_request_by_endpoint,
834            };
835
836            // Convert to RequestLog format for admin UI
837            let recent_logs = recent_logs_subset
838                .into_iter()
839                .map(|log| RequestLog {
840                    id: log.id,
841                    timestamp: log.timestamp,
842                    method: log.method,
843                    path: log.path,
844                    status_code: log.status_code,
845                    response_time_ms: log.response_time_ms,
846                    client_ip: log.client_ip,
847                    user_agent: log.user_agent,
848                    headers: log.headers,
849                    response_size_bytes: log.response_size_bytes,
850                    error_message: log.error_message,
851                })
852                .collect();
853
854            (recent_logs, calculated_metrics)
855        } else {
856            // Fallback to local logs if centralized logger not available
857            let logs = state.logs.read().await;
858            let recent_logs = logs.iter().rev().take(10).cloned().collect();
859            let metrics = state.get_metrics().await;
860            (recent_logs, metrics)
861        };
862
863    let metrics = calculated_metrics;
864
865    let system_info = SystemInfo {
866        version: env!("CARGO_PKG_VERSION").to_string(),
867        uptime_seconds: uptime,
868        memory_usage_mb: system_metrics.memory_usage_mb,
869        cpu_usage_percent: system_metrics.cpu_usage_percent,
870        active_threads: system_metrics.active_threads as usize,
871        total_routes: metrics.requests_by_endpoint.len(),
872        total_fixtures: count_fixtures().unwrap_or(0),
873    };
874
875    let servers = vec![
876        ServerStatus {
877            server_type: "HTTP".to_string(),
878            address: state.http_server_addr.map(|addr| addr.to_string()),
879            running: state.http_server_addr.is_some(),
880            start_time: Some(state.start_time),
881            uptime_seconds: Some(uptime),
882            active_connections: metrics.active_connections,
883            total_requests: count_requests_by_server_type(&metrics, "HTTP"),
884        },
885        ServerStatus {
886            server_type: "WebSocket".to_string(),
887            address: state.ws_server_addr.map(|addr| addr.to_string()),
888            running: state.ws_server_addr.is_some(),
889            start_time: Some(state.start_time),
890            uptime_seconds: Some(uptime),
891            active_connections: metrics.active_connections / 2, // Estimate
892            total_requests: count_requests_by_server_type(&metrics, "WebSocket"),
893        },
894        ServerStatus {
895            server_type: "gRPC".to_string(),
896            address: state.grpc_server_addr.map(|addr| addr.to_string()),
897            running: state.grpc_server_addr.is_some(),
898            start_time: Some(state.start_time),
899            uptime_seconds: Some(uptime),
900            active_connections: metrics.active_connections / 3, // Estimate
901            total_requests: count_requests_by_server_type(&metrics, "gRPC"),
902        },
903    ];
904
905    // Build routes info from actual request metrics
906    let mut routes = Vec::new();
907    for (endpoint, count) in &metrics.requests_by_endpoint {
908        let parts: Vec<&str> = endpoint.splitn(2, ' ').collect();
909        if parts.len() == 2 {
910            let method = parts[0].to_string();
911            let path = parts[1].to_string();
912            let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
913
914            routes.push(RouteInfo {
915                method: Some(method.clone()),
916                path: path.clone(),
917                priority: 0,
918                has_fixtures: route_has_fixtures(&method, &path),
919                latency_ms: calculate_endpoint_latency(&metrics, endpoint),
920                request_count: *count,
921                last_request: get_endpoint_last_request(&metrics, endpoint),
922                error_count,
923            });
924        }
925    }
926
927    let dashboard = DashboardData {
928        server_info: ServerInfo {
929            version: env!("CARGO_PKG_VERSION").to_string(),
930            build_time: option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown").to_string(),
931            git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("unknown").to_string(),
932            http_server: state.http_server_addr.map(|addr| addr.to_string()),
933            ws_server: state.ws_server_addr.map(|addr| addr.to_string()),
934            grpc_server: state.grpc_server_addr.map(|addr| addr.to_string()),
935            graphql_server: state.graphql_server_addr.map(|addr| addr.to_string()),
936            api_enabled: state.api_enabled,
937            admin_port: state.admin_port,
938        },
939        system_info: DashboardSystemInfo {
940            os: std::env::consts::OS.to_string(),
941            arch: std::env::consts::ARCH.to_string(),
942            uptime,
943            memory_usage: system_metrics.memory_usage_mb * 1024 * 1024, // Convert MB to bytes
944        },
945        metrics: SimpleMetricsData {
946            total_requests: metrics.requests_by_endpoint.values().sum(),
947            active_requests: metrics.active_connections,
948            average_response_time: if metrics.response_times.is_empty() {
949                0.0
950            } else {
951                metrics.response_times.iter().sum::<u64>() as f64
952                    / metrics.response_times.len() as f64
953            },
954            error_rate: {
955                let total_requests = metrics.requests_by_endpoint.values().sum::<u64>();
956                let total_errors = metrics.errors_by_endpoint.values().sum::<u64>();
957                if total_requests == 0 {
958                    0.0
959                } else {
960                    total_errors as f64 / total_requests as f64
961                }
962            },
963        },
964        servers,
965        recent_logs,
966        system: system_info,
967    };
968
969    Json(ApiResponse::success(dashboard))
970}
971
972/// Get routes by proxying to HTTP server
973pub async fn get_routes(State(state): State<AdminState>) -> impl IntoResponse {
974    if let Some(http_addr) = state.http_server_addr {
975        // Try to fetch routes from the HTTP server
976        let url = format!("http://{}/__mockforge/routes", http_addr);
977        if let Ok(response) = reqwest::get(&url).await {
978            if response.status().is_success() {
979                if let Ok(body) = response.text().await {
980                    return (StatusCode::OK, [("content-type", "application/json")], body);
981                }
982            }
983        }
984    }
985
986    // Fallback: return empty routes
987    (
988        StatusCode::OK,
989        [("content-type", "application/json")],
990        r#"{"routes":[]}"#.to_string(),
991    )
992}
993
994/// Get server info (HTTP server address for API calls)
995pub async fn get_server_info(State(state): State<AdminState>) -> Json<serde_json::Value> {
996    Json(json!({
997        "http_server": state.http_server_addr.map(|addr| addr.to_string()),
998        "ws_server": state.ws_server_addr.map(|addr| addr.to_string()),
999        "grpc_server": state.grpc_server_addr.map(|addr| addr.to_string()),
1000        "admin_port": state.admin_port
1001    }))
1002}
1003
1004/// Get health check status
1005pub async fn get_health() -> Json<HealthCheck> {
1006    Json(
1007        HealthCheck::healthy()
1008            .with_service("http".to_string(), "healthy".to_string())
1009            .with_service("websocket".to_string(), "healthy".to_string())
1010            .with_service("grpc".to_string(), "healthy".to_string()),
1011    )
1012}
1013
1014/// Get request logs with optional filtering
1015pub async fn get_logs(
1016    State(state): State<AdminState>,
1017    Query(params): Query<HashMap<String, String>>,
1018) -> Json<ApiResponse<Vec<RequestLog>>> {
1019    let mut filter = LogFilter::default();
1020
1021    if let Some(method) = params.get("method") {
1022        filter.method = Some(method.clone());
1023    }
1024    if let Some(path) = params.get("path") {
1025        filter.path_pattern = Some(path.clone());
1026    }
1027    if let Some(status) = params.get("status").and_then(|s| s.parse().ok()) {
1028        filter.status_code = Some(status);
1029    }
1030    if let Some(limit) = params.get("limit").and_then(|s| s.parse().ok()) {
1031        filter.limit = Some(limit);
1032    }
1033
1034    // Get logs from centralized logger (same as dashboard)
1035    let logs = if let Some(global_logger) = mockforge_core::get_global_logger() {
1036        // Get logs from centralized logger
1037        let centralized_logs = global_logger.get_recent_logs(filter.limit).await;
1038
1039        // Convert to RequestLog format and apply filters
1040        centralized_logs
1041            .into_iter()
1042            .filter(|log| {
1043                if let Some(ref method) = filter.method {
1044                    if log.method != *method {
1045                        return false;
1046                    }
1047                }
1048                if let Some(ref path_pattern) = filter.path_pattern {
1049                    if !log.path.contains(path_pattern) {
1050                        return false;
1051                    }
1052                }
1053                if let Some(status) = filter.status_code {
1054                    if log.status_code != status {
1055                        return false;
1056                    }
1057                }
1058                true
1059            })
1060            .map(|log| RequestLog {
1061                id: log.id,
1062                timestamp: log.timestamp,
1063                method: log.method,
1064                path: log.path,
1065                status_code: log.status_code,
1066                response_time_ms: log.response_time_ms,
1067                client_ip: log.client_ip,
1068                user_agent: log.user_agent,
1069                headers: log.headers,
1070                response_size_bytes: log.response_size_bytes,
1071                error_message: log.error_message,
1072            })
1073            .collect()
1074    } else {
1075        // Fallback to local logs if centralized logger not available
1076        state.get_logs_filtered(&filter).await
1077    };
1078
1079    Json(ApiResponse::success(logs))
1080}
1081
1082/// Get reality trace metadata for a specific request
1083///
1084/// GET /__mockforge/api/reality/trace/:request_id
1085pub async fn get_reality_trace(
1086    Path(request_id): Path<String>,
1087) -> Json<ApiResponse<Option<mockforge_core::request_logger::RealityTraceMetadata>>> {
1088    if let Some(global_logger) = mockforge_core::get_global_logger() {
1089        let logs = global_logger.get_recent_logs(None).await;
1090        if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1091            Json(ApiResponse::success(log_entry.reality_metadata))
1092        } else {
1093            Json(ApiResponse::error(format!("Request {} not found", request_id)))
1094        }
1095    } else {
1096        Json(ApiResponse::error("Request logger not initialized".to_string()))
1097    }
1098}
1099
1100/// Get response generation trace for a specific request
1101///
1102/// GET /__mockforge/api/reality/response-trace/:request_id
1103pub async fn get_response_trace(
1104    Path(request_id): Path<String>,
1105) -> Json<ApiResponse<Option<serde_json::Value>>> {
1106    if let Some(global_logger) = mockforge_core::get_global_logger() {
1107        let logs = global_logger.get_recent_logs(None).await;
1108        if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1109            // Response generation trace would be stored in metadata
1110            // For now, return the metadata as JSON
1111            let trace = log_entry
1112                .metadata
1113                .get("response_generation_trace")
1114                .and_then(|s| serde_json::from_str::<serde_json::Value>(s).ok());
1115            Json(ApiResponse::success(trace))
1116        } else {
1117            Json(ApiResponse::error(format!("Request {} not found", request_id)))
1118        }
1119    } else {
1120        Json(ApiResponse::error("Request logger not initialized".to_string()))
1121    }
1122}
1123
1124// Configuration for recent logs display
1125const RECENT_LOGS_LIMIT: usize = 20;
1126const RECENT_LOGS_TTL_MINUTES: i64 = 5;
1127
1128/// SSE endpoint for real-time log streaming
1129pub async fn logs_sse(
1130    State(_state): State<AdminState>,
1131) -> Sse<impl Stream<Item = std::result::Result<Event, Infallible>>> {
1132    tracing::info!("SSE endpoint /logs/sse accessed - starting real-time log streaming for recent requests only");
1133
1134    let stream = stream::unfold(std::collections::HashSet::new(), |mut seen_ids| async move {
1135        tokio::time::sleep(Duration::from_millis(500)).await;
1136
1137        // Get recent logs from centralized logger (limit to recent entries for dashboard)
1138        if let Some(global_logger) = mockforge_core::get_global_logger() {
1139            let centralized_logs = global_logger.get_recent_logs(Some(RECENT_LOGS_LIMIT)).await;
1140
1141            tracing::debug!(
1142                "SSE: Checking logs - total logs: {}, seen logs: {}",
1143                centralized_logs.len(),
1144                seen_ids.len()
1145            );
1146
1147            // Filter for recent logs within TTL
1148            let now = chrono::Utc::now();
1149            let ttl_cutoff = now - chrono::Duration::minutes(RECENT_LOGS_TTL_MINUTES);
1150
1151            // Find new logs that haven't been seen before
1152            let new_logs: Vec<RequestLog> = centralized_logs
1153                .into_iter()
1154                .filter(|log| {
1155                    // Only include logs from the last X minutes and not yet seen
1156                    log.timestamp > ttl_cutoff && !seen_ids.contains(&log.id)
1157                })
1158                .map(|log| RequestLog {
1159                    id: log.id,
1160                    timestamp: log.timestamp,
1161                    method: log.method,
1162                    path: log.path,
1163                    status_code: log.status_code,
1164                    response_time_ms: log.response_time_ms,
1165                    client_ip: log.client_ip,
1166                    user_agent: log.user_agent,
1167                    headers: log.headers,
1168                    response_size_bytes: log.response_size_bytes,
1169                    error_message: log.error_message,
1170                })
1171                .collect();
1172
1173            // Add new log IDs to the seen set
1174            for log in &new_logs {
1175                seen_ids.insert(log.id.clone());
1176            }
1177
1178            // Send new logs if any
1179            if !new_logs.is_empty() {
1180                tracing::info!("SSE: Sending {} new logs to client", new_logs.len());
1181
1182                let event_data = serde_json::to_string(&new_logs).unwrap_or_default();
1183                let event = Ok(Event::default().event("new_logs").data(event_data));
1184
1185                return Some((event, seen_ids));
1186            }
1187        }
1188
1189        // Send keep-alive
1190        let event = Ok(Event::default().event("keep_alive").data(""));
1191        Some((event, seen_ids))
1192    });
1193
1194    Sse::new(stream).keep_alive(
1195        axum::response::sse::KeepAlive::new()
1196            .interval(Duration::from_secs(15))
1197            .text("keep-alive-text"),
1198    )
1199}
1200
1201/// Get metrics data
1202pub async fn get_metrics(State(state): State<AdminState>) -> Json<ApiResponse<MetricsData>> {
1203    // Get metrics from global logger (same as get_dashboard)
1204    let metrics = if let Some(global_logger) = mockforge_core::get_global_logger() {
1205        let all_logs = global_logger.get_recent_logs(None).await;
1206
1207        let total_requests = all_logs.len() as u64;
1208        let mut requests_by_endpoint = HashMap::new();
1209        let mut errors_by_endpoint = HashMap::new();
1210        let mut response_times = Vec::new();
1211        let mut last_request_by_endpoint = HashMap::new();
1212
1213        for log in &all_logs {
1214            let endpoint_key = format!("{} {}", log.method, log.path);
1215            *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1216
1217            if log.status_code >= 400 {
1218                *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1219            }
1220
1221            response_times.push(log.response_time_ms);
1222            last_request_by_endpoint.insert(endpoint_key, log.timestamp);
1223        }
1224
1225        RequestMetrics {
1226            total_requests,
1227            active_connections: 0,
1228            requests_by_endpoint,
1229            response_times,
1230            response_times_by_endpoint: HashMap::new(),
1231            errors_by_endpoint,
1232            last_request_by_endpoint,
1233        }
1234    } else {
1235        state.get_metrics().await
1236    };
1237
1238    let system_metrics = state.get_system_metrics().await;
1239    let time_series = state.get_time_series_data().await;
1240
1241    // Helper function to calculate percentile from sorted array
1242    fn calculate_percentile(sorted_data: &[u64], percentile: f64) -> u64 {
1243        if sorted_data.is_empty() {
1244            return 0;
1245        }
1246        let idx = ((sorted_data.len() as f64) * percentile).ceil() as usize;
1247        let idx = idx.min(sorted_data.len().saturating_sub(1));
1248        sorted_data[idx]
1249    }
1250
1251    // Calculate percentiles from response times (p50, p75, p90, p95, p99, p99.9)
1252    let mut response_times = metrics.response_times.clone();
1253    response_times.sort();
1254
1255    let p50 = calculate_percentile(&response_times, 0.50);
1256    let p75 = calculate_percentile(&response_times, 0.75);
1257    let p90 = calculate_percentile(&response_times, 0.90);
1258    let p95 = calculate_percentile(&response_times, 0.95);
1259    let p99 = calculate_percentile(&response_times, 0.99);
1260    let p999 = calculate_percentile(&response_times, 0.999);
1261
1262    // Calculate per-endpoint percentiles for detailed analysis
1263    let mut response_times_by_endpoint: HashMap<String, Vec<u64>> = HashMap::new();
1264    if let Some(global_logger) = mockforge_core::get_global_logger() {
1265        let all_logs = global_logger.get_recent_logs(None).await;
1266        for log in &all_logs {
1267            let endpoint_key = format!("{} {}", log.method, log.path);
1268            response_times_by_endpoint
1269                .entry(endpoint_key)
1270                .or_default()
1271                .push(log.response_time_ms);
1272        }
1273    }
1274
1275    // Calculate percentiles for each endpoint
1276    let mut endpoint_percentiles: HashMap<String, HashMap<String, u64>> = HashMap::new();
1277    for (endpoint, times) in &mut response_times_by_endpoint {
1278        times.sort();
1279        if !times.is_empty() {
1280            endpoint_percentiles.insert(
1281                endpoint.clone(),
1282                HashMap::from([
1283                    ("p50".to_string(), calculate_percentile(times, 0.50)),
1284                    ("p75".to_string(), calculate_percentile(times, 0.75)),
1285                    ("p90".to_string(), calculate_percentile(times, 0.90)),
1286                    ("p95".to_string(), calculate_percentile(times, 0.95)),
1287                    ("p99".to_string(), calculate_percentile(times, 0.99)),
1288                    ("p999".to_string(), calculate_percentile(times, 0.999)),
1289                ]),
1290            );
1291        }
1292    }
1293
1294    // Calculate error rates
1295    let mut error_rate_by_endpoint = HashMap::new();
1296    for (endpoint, total_count) in &metrics.requests_by_endpoint {
1297        let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
1298        let error_rate = if *total_count > 0 {
1299            error_count as f64 / *total_count as f64
1300        } else {
1301            0.0
1302        };
1303        error_rate_by_endpoint.insert(endpoint.clone(), error_rate);
1304    }
1305
1306    // Convert time series data to the format expected by the frontend
1307    // If no time series data exists yet, use current system metrics as a fallback
1308    let memory_usage_over_time = if time_series.memory_usage.is_empty() {
1309        vec![(Utc::now(), system_metrics.memory_usage_mb)]
1310    } else {
1311        time_series
1312            .memory_usage
1313            .iter()
1314            .map(|point| (point.timestamp, point.value as u64))
1315            .collect()
1316    };
1317
1318    let cpu_usage_over_time = if time_series.cpu_usage.is_empty() {
1319        vec![(Utc::now(), system_metrics.cpu_usage_percent)]
1320    } else {
1321        time_series
1322            .cpu_usage
1323            .iter()
1324            .map(|point| (point.timestamp, point.value))
1325            .collect()
1326    };
1327
1328    // Build time-series latency data (last 100 data points)
1329    let latency_over_time: Vec<(chrono::DateTime<chrono::Utc>, u64)> =
1330        if let Some(global_logger) = mockforge_core::get_global_logger() {
1331            let all_logs = global_logger.get_recent_logs(Some(100)).await;
1332            all_logs.iter().map(|log| (log.timestamp, log.response_time_ms)).collect()
1333        } else {
1334            Vec::new()
1335        };
1336
1337    let metrics_data = MetricsData {
1338        requests_by_endpoint: metrics.requests_by_endpoint,
1339        response_time_percentiles: HashMap::from([
1340            ("p50".to_string(), p50),
1341            ("p75".to_string(), p75),
1342            ("p90".to_string(), p90),
1343            ("p95".to_string(), p95),
1344            ("p99".to_string(), p99),
1345            ("p999".to_string(), p999),
1346        ]),
1347        endpoint_percentiles: Some(endpoint_percentiles),
1348        latency_over_time: Some(latency_over_time),
1349        error_rate_by_endpoint,
1350        memory_usage_over_time,
1351        cpu_usage_over_time,
1352    };
1353
1354    Json(ApiResponse::success(metrics_data))
1355}
1356
1357/// Update latency profile
1358pub async fn update_latency(
1359    State(state): State<AdminState>,
1360    headers: axum::http::HeaderMap,
1361    Json(update): Json<ConfigUpdate>,
1362) -> Json<ApiResponse<String>> {
1363    use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1364    use crate::rbac::{extract_user_context, get_default_user_context};
1365
1366    if update.config_type != "latency" {
1367        return Json(ApiResponse::error("Invalid config type".to_string()));
1368    }
1369
1370    // Extract latency configuration from the update data
1371    let base_ms = update.data.get("base_ms").and_then(|v| v.as_u64()).unwrap_or(50);
1372    let jitter_ms = update.data.get("jitter_ms").and_then(|v| v.as_u64()).unwrap_or(20);
1373
1374    let tag_overrides: std::collections::HashMap<String, u64> = update
1375        .data
1376        .get("tag_overrides")
1377        .and_then(|v| v.as_object())
1378        .map(|obj| obj.iter().filter_map(|(k, v)| v.as_u64().map(|val| (k.clone(), val))).collect())
1379        .unwrap_or_default();
1380
1381    // Update the actual configuration
1382    state.update_latency_config(base_ms, jitter_ms, tag_overrides.clone()).await;
1383
1384    // Record audit log with user context
1385    if let Some(audit_store) = get_global_audit_store() {
1386        let metadata = serde_json::json!({
1387            "base_ms": base_ms,
1388            "jitter_ms": jitter_ms,
1389            "tag_overrides": tag_overrides,
1390        });
1391        let mut audit_log = create_audit_log(
1392            AdminActionType::ConfigLatencyUpdated,
1393            format!("Latency profile updated: base_ms={}, jitter_ms={}", base_ms, jitter_ms),
1394            None,
1395            true,
1396            None,
1397            Some(metadata),
1398        );
1399
1400        // Extract user context from headers
1401        if let Some(user_ctx) = extract_user_context(&headers).or_else(get_default_user_context) {
1402            audit_log.user_id = Some(user_ctx.user_id);
1403            audit_log.username = Some(user_ctx.username);
1404        }
1405
1406        // Extract IP address from headers
1407        if let Some(ip) = headers
1408            .get("x-forwarded-for")
1409            .or_else(|| headers.get("x-real-ip"))
1410            .and_then(|h| h.to_str().ok())
1411        {
1412            audit_log.ip_address = Some(ip.to_string());
1413        }
1414
1415        // Extract user agent
1416        if let Some(ua) = headers.get("user-agent").and_then(|h| h.to_str().ok()) {
1417            audit_log.user_agent = Some(ua.to_string());
1418        }
1419
1420        audit_store.record(audit_log).await;
1421    }
1422
1423    tracing::info!("Updated latency profile: base_ms={}, jitter_ms={}", base_ms, jitter_ms);
1424
1425    Json(ApiResponse::success("Latency profile updated".to_string()))
1426}
1427
1428/// Update fault injection configuration
1429pub async fn update_faults(
1430    State(state): State<AdminState>,
1431    Json(update): Json<ConfigUpdate>,
1432) -> Json<ApiResponse<String>> {
1433    if update.config_type != "faults" {
1434        return Json(ApiResponse::error("Invalid config type".to_string()));
1435    }
1436
1437    // Extract fault configuration from the update data
1438    let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1439
1440    let failure_rate = update.data.get("failure_rate").and_then(|v| v.as_f64()).unwrap_or(0.0);
1441
1442    let status_codes = update
1443        .data
1444        .get("status_codes")
1445        .and_then(|v| v.as_array())
1446        .map(|arr| arr.iter().filter_map(|v| v.as_u64().map(|n| n as u16)).collect())
1447        .unwrap_or_else(|| vec![500, 502, 503]);
1448
1449    // Update the actual configuration
1450    state.update_fault_config(enabled, failure_rate, status_codes).await;
1451
1452    tracing::info!(
1453        "Updated fault configuration: enabled={}, failure_rate={}",
1454        enabled,
1455        failure_rate
1456    );
1457
1458    Json(ApiResponse::success("Fault configuration updated".to_string()))
1459}
1460
1461/// Update proxy configuration
1462pub async fn update_proxy(
1463    State(state): State<AdminState>,
1464    Json(update): Json<ConfigUpdate>,
1465) -> Json<ApiResponse<String>> {
1466    if update.config_type != "proxy" {
1467        return Json(ApiResponse::error("Invalid config type".to_string()));
1468    }
1469
1470    // Extract proxy configuration from the update data
1471    let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1472
1473    let upstream_url =
1474        update.data.get("upstream_url").and_then(|v| v.as_str()).map(|s| s.to_string());
1475
1476    let timeout_seconds = update.data.get("timeout_seconds").and_then(|v| v.as_u64()).unwrap_or(30);
1477
1478    // Update the actual configuration
1479    state.update_proxy_config(enabled, upstream_url.clone(), timeout_seconds).await;
1480
1481    tracing::info!(
1482        "Updated proxy configuration: enabled={}, upstream_url={:?}",
1483        enabled,
1484        upstream_url
1485    );
1486
1487    Json(ApiResponse::success("Proxy configuration updated".to_string()))
1488}
1489
1490/// Clear request logs
1491pub async fn clear_logs(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1492    // Clear the actual logs from state
1493    state.clear_logs().await;
1494    tracing::info!("Cleared all request logs");
1495
1496    Json(ApiResponse::success("Logs cleared".to_string()))
1497}
1498
1499/// Restart servers
1500pub async fn restart_servers(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1501    use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1502    // Check if restart is already in progress
1503    let current_status = state.get_restart_status().await;
1504    if current_status.in_progress {
1505        return Json(ApiResponse::error("Server restart already in progress".to_string()));
1506    }
1507
1508    // Initiate restart status
1509    let restart_result = state
1510        .initiate_restart("Manual restart requested via admin UI".to_string())
1511        .await;
1512
1513    let success = restart_result.is_ok();
1514    let error_msg = restart_result.as_ref().err().map(|e| format!("{}", e));
1515
1516    // Record audit log
1517    if let Some(audit_store) = get_global_audit_store() {
1518        let audit_log = create_audit_log(
1519            AdminActionType::ServerRestarted,
1520            "Server restart initiated via admin UI".to_string(),
1521            None,
1522            success,
1523            error_msg.clone(),
1524            None,
1525        );
1526        audit_store.record(audit_log).await;
1527    }
1528
1529    if let Err(e) = restart_result {
1530        return Json(ApiResponse::error(format!("Failed to initiate restart: {}", e)));
1531    }
1532
1533    // Spawn restart task to avoid blocking the response
1534    let state_clone = state.clone();
1535    tokio::spawn(async move {
1536        if let Err(e) = perform_server_restart(&state_clone).await {
1537            tracing::error!("Server restart failed: {}", e);
1538            state_clone.complete_restart(false).await;
1539        } else {
1540            tracing::info!("Server restart completed successfully");
1541            state_clone.complete_restart(true).await;
1542        }
1543    });
1544
1545    tracing::info!("Server restart initiated via admin UI");
1546    Json(ApiResponse::success(
1547        "Server restart initiated. Please wait for completion.".to_string(),
1548    ))
1549}
1550
1551/// Perform the actual server restart
1552async fn perform_server_restart(_state: &AdminState) -> Result<()> {
1553    // Get the current process ID
1554    let current_pid = std::process::id();
1555    tracing::info!("Initiating restart for process PID: {}", current_pid);
1556
1557    // Try to find the parent process (MockForge CLI)
1558    let parent_pid = get_parent_process_id(current_pid).await?;
1559    tracing::info!("Found parent process PID: {}", parent_pid);
1560
1561    // Method 1: Try to restart via parent process signal
1562    if let Ok(()) = restart_via_parent_signal(parent_pid).await {
1563        tracing::info!("Restart initiated via parent process signal");
1564        return Ok(());
1565    }
1566
1567    // Method 2: Fallback to process replacement
1568    if let Ok(()) = restart_via_process_replacement().await {
1569        tracing::info!("Restart initiated via process replacement");
1570        return Ok(());
1571    }
1572
1573    // Method 3: Last resort - graceful shutdown with restart script
1574    restart_via_script().await
1575}
1576
1577/// Get parent process ID
1578async fn get_parent_process_id(pid: u32) -> Result<u32> {
1579    // Try to read from /proc/pid/stat on Linux
1580    #[cfg(target_os = "linux")]
1581    {
1582        // Read /proc filesystem using spawn_blocking
1583        let stat_path = format!("/proc/{}/stat", pid);
1584        if let Ok(ppid) = tokio::task::spawn_blocking(move || -> Result<u32> {
1585            let content = std::fs::read_to_string(&stat_path)
1586                .map_err(|e| Error::generic(format!("Failed to read {}: {}", stat_path, e)))?;
1587
1588            let fields: Vec<&str> = content.split_whitespace().collect();
1589            if fields.len() > 3 {
1590                fields[3]
1591                    .parse::<u32>()
1592                    .map_err(|e| Error::generic(format!("Failed to parse PPID: {}", e)))
1593            } else {
1594                Err(Error::generic("Insufficient fields in /proc/pid/stat".to_string()))
1595            }
1596        })
1597        .await
1598        {
1599            return ppid;
1600        }
1601    }
1602
1603    // Fallback: assume we're running under a shell/process manager
1604    Ok(1) // PID 1 as fallback
1605}
1606
1607/// Restart via parent process signal
1608async fn restart_via_parent_signal(parent_pid: u32) -> Result<()> {
1609    #[cfg(unix)]
1610    {
1611        use std::process::Command;
1612
1613        // Send SIGTERM to parent process to trigger restart
1614        let output = Command::new("kill")
1615            .args(["-TERM", &parent_pid.to_string()])
1616            .output()
1617            .map_err(|e| Error::generic(format!("Failed to send signal: {}", e)))?;
1618
1619        if !output.status.success() {
1620            return Err(Error::generic(
1621                "Failed to send restart signal to parent process".to_string(),
1622            ));
1623        }
1624
1625        // Wait a moment for the signal to be processed
1626        tokio::time::sleep(Duration::from_millis(100)).await;
1627        Ok(())
1628    }
1629
1630    #[cfg(not(unix))]
1631    {
1632        Err(Error::generic(
1633            "Signal-based restart not supported on this platform".to_string(),
1634        ))
1635    }
1636}
1637
1638/// Restart via process replacement
1639async fn restart_via_process_replacement() -> Result<()> {
1640    // Get the current executable path
1641    let current_exe = std::env::current_exe()
1642        .map_err(|e| Error::generic(format!("Failed to get current executable: {}", e)))?;
1643
1644    // Get current command line arguments
1645    let args: Vec<String> = std::env::args().collect();
1646
1647    tracing::info!("Restarting with command: {:?}", args);
1648
1649    // Start new process
1650    let mut child = Command::new(&current_exe)
1651        .args(&args[1..]) // Skip the program name
1652        .stdout(Stdio::inherit())
1653        .stderr(Stdio::inherit())
1654        .spawn()
1655        .map_err(|e| Error::generic(format!("Failed to start new process: {}", e)))?;
1656
1657    // Give the new process a moment to start
1658    tokio::time::sleep(Duration::from_millis(500)).await;
1659
1660    // Check if the new process is still running
1661    match child.try_wait() {
1662        Ok(Some(status)) => {
1663            if status.success() {
1664                tracing::info!("New process started successfully");
1665                Ok(())
1666            } else {
1667                Err(Error::generic("New process exited with error".to_string()))
1668            }
1669        }
1670        Ok(None) => {
1671            tracing::info!("New process is running, exiting current process");
1672            // Exit current process
1673            std::process::exit(0);
1674        }
1675        Err(e) => Err(Error::generic(format!("Failed to check new process status: {}", e))),
1676    }
1677}
1678
1679/// Restart via external script
1680async fn restart_via_script() -> Result<()> {
1681    // Look for restart script in common locations
1682    let script_paths = ["./scripts/restart.sh", "./restart.sh", "restart.sh"];
1683
1684    for script_path in &script_paths {
1685        if std::path::Path::new(script_path).exists() {
1686            tracing::info!("Using restart script: {}", script_path);
1687
1688            let output = Command::new("bash")
1689                .arg(script_path)
1690                .output()
1691                .map_err(|e| Error::generic(format!("Failed to execute restart script: {}", e)))?;
1692
1693            if output.status.success() {
1694                return Ok(());
1695            } else {
1696                tracing::warn!(
1697                    "Restart script failed: {}",
1698                    String::from_utf8_lossy(&output.stderr)
1699                );
1700            }
1701        }
1702    }
1703
1704    // If no script found, try to use the clear-ports script as a fallback
1705    let clear_script = "./scripts/clear-ports.sh";
1706    if std::path::Path::new(clear_script).exists() {
1707        tracing::info!("Using clear-ports script as fallback");
1708
1709        let _ = Command::new("bash").arg(clear_script).output();
1710    }
1711
1712    Err(Error::generic(
1713        "No restart mechanism available. Please restart manually.".to_string(),
1714    ))
1715}
1716
1717/// Get restart status
1718pub async fn get_restart_status(
1719    State(state): State<AdminState>,
1720) -> Json<ApiResponse<RestartStatus>> {
1721    let status = state.get_restart_status().await;
1722    Json(ApiResponse::success(status))
1723}
1724
1725/// Get audit logs
1726pub async fn get_audit_logs(
1727    Query(params): Query<std::collections::HashMap<String, String>>,
1728) -> Json<ApiResponse<Vec<crate::audit::AdminAuditLog>>> {
1729    use crate::audit::{get_global_audit_store, AdminActionType};
1730
1731    let action_type_str = params.get("action_type");
1732    let user_id = params.get("user_id").map(|s| s.as_str());
1733    let limit = params.get("limit").and_then(|s| s.parse::<usize>().ok());
1734    let offset = params.get("offset").and_then(|s| s.parse::<usize>().ok());
1735
1736    // Parse action type if provided
1737    let action_type = action_type_str.and_then(|s| {
1738        // Simple string matching - could be enhanced with proper parsing
1739        match s.as_str() {
1740            "config_latency_updated" => Some(AdminActionType::ConfigLatencyUpdated),
1741            "config_faults_updated" => Some(AdminActionType::ConfigFaultsUpdated),
1742            "server_restarted" => Some(AdminActionType::ServerRestarted),
1743            "logs_cleared" => Some(AdminActionType::LogsCleared),
1744            _ => None,
1745        }
1746    });
1747
1748    if let Some(audit_store) = get_global_audit_store() {
1749        let logs = audit_store.get_logs(action_type, user_id, limit, offset).await;
1750        Json(ApiResponse::success(logs))
1751    } else {
1752        Json(ApiResponse::error("Audit logging not initialized".to_string()))
1753    }
1754}
1755
1756/// Get audit log statistics
1757pub async fn get_audit_stats() -> Json<ApiResponse<crate::audit::AuditLogStats>> {
1758    use crate::audit::get_global_audit_store;
1759
1760    if let Some(audit_store) = get_global_audit_store() {
1761        let stats = audit_store.get_stats().await;
1762        Json(ApiResponse::success(stats))
1763    } else {
1764        Json(ApiResponse::error("Audit logging not initialized".to_string()))
1765    }
1766}
1767
1768/// Get server configuration
1769pub async fn get_config(State(state): State<AdminState>) -> Json<ApiResponse<serde_json::Value>> {
1770    let config_state = state.get_config().await;
1771
1772    let config = json!({
1773        "latency": {
1774            "enabled": true,
1775            "base_ms": config_state.latency_profile.base_ms,
1776            "jitter_ms": config_state.latency_profile.jitter_ms,
1777            "tag_overrides": config_state.latency_profile.tag_overrides
1778        },
1779        "faults": {
1780            "enabled": config_state.fault_config.enabled,
1781            "failure_rate": config_state.fault_config.failure_rate,
1782            "status_codes": config_state.fault_config.status_codes
1783        },
1784        "proxy": {
1785            "enabled": config_state.proxy_config.enabled,
1786            "upstream_url": config_state.proxy_config.upstream_url,
1787            "timeout_seconds": config_state.proxy_config.timeout_seconds
1788        },
1789        "validation": {
1790            "mode": config_state.validation_settings.mode,
1791            "aggregate_errors": config_state.validation_settings.aggregate_errors,
1792            "validate_responses": config_state.validation_settings.validate_responses,
1793            "overrides": config_state.validation_settings.overrides
1794        }
1795    });
1796
1797    Json(ApiResponse::success(config))
1798}
1799
1800/// Count total fixtures in the fixtures directory
1801pub fn count_fixtures() -> Result<usize> {
1802    // Get the fixtures directory from environment or use default
1803    let fixtures_dir =
1804        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1805    let fixtures_path = std::path::Path::new(&fixtures_dir);
1806
1807    if !fixtures_path.exists() {
1808        return Ok(0);
1809    }
1810
1811    let mut total_count = 0;
1812
1813    // Count HTTP fixtures
1814    let http_fixtures_path = fixtures_path.join("http");
1815    if http_fixtures_path.exists() {
1816        total_count += count_fixtures_in_directory(&http_fixtures_path)?;
1817    }
1818
1819    // Count WebSocket fixtures
1820    let ws_fixtures_path = fixtures_path.join("websocket");
1821    if ws_fixtures_path.exists() {
1822        total_count += count_fixtures_in_directory(&ws_fixtures_path)?;
1823    }
1824
1825    // Count gRPC fixtures
1826    let grpc_fixtures_path = fixtures_path.join("grpc");
1827    if grpc_fixtures_path.exists() {
1828        total_count += count_fixtures_in_directory(&grpc_fixtures_path)?;
1829    }
1830
1831    Ok(total_count)
1832}
1833
1834/// Helper function to count JSON files in a directory recursively (blocking version)
1835fn count_fixtures_in_directory(dir_path: &std::path::Path) -> Result<usize> {
1836    let mut count = 0;
1837
1838    if let Ok(entries) = std::fs::read_dir(dir_path) {
1839        for entry in entries {
1840            let entry = entry
1841                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1842            let path = entry.path();
1843
1844            if path.is_dir() {
1845                // Recursively count fixtures in subdirectories
1846                count += count_fixtures_in_directory(&path)?;
1847            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1848                // Count JSON files as fixtures
1849                count += 1;
1850            }
1851        }
1852    }
1853
1854    Ok(count)
1855}
1856
1857/// Check if a specific route has fixtures
1858pub fn route_has_fixtures(method: &str, path: &str) -> bool {
1859    // Get the fixtures directory from environment or use default
1860    let fixtures_dir =
1861        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1862    let fixtures_path = std::path::Path::new(&fixtures_dir);
1863
1864    if !fixtures_path.exists() {
1865        return false;
1866    }
1867
1868    // Check HTTP fixtures
1869    let method_lower = method.to_lowercase();
1870    let path_hash = path.replace(['/', ':'], "_");
1871    let http_fixtures_path = fixtures_path.join("http").join(&method_lower).join(&path_hash);
1872
1873    if http_fixtures_path.exists() {
1874        // Check if there are any JSON files in this directory
1875        if let Ok(entries) = std::fs::read_dir(&http_fixtures_path) {
1876            for entry in entries.flatten() {
1877                if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1878                    return true;
1879                }
1880            }
1881        }
1882    }
1883
1884    // Check WebSocket fixtures for WS method
1885    if method.to_uppercase() == "WS" {
1886        let ws_fixtures_path = fixtures_path.join("websocket").join(&path_hash);
1887
1888        if ws_fixtures_path.exists() {
1889            if let Ok(entries) = std::fs::read_dir(&ws_fixtures_path) {
1890                for entry in entries.flatten() {
1891                    if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1892                        return true;
1893                    }
1894                }
1895            }
1896        }
1897    }
1898
1899    false
1900}
1901
1902/// Calculate average latency for a specific endpoint
1903fn calculate_endpoint_latency(metrics: &RequestMetrics, endpoint: &str) -> Option<u64> {
1904    metrics.response_times_by_endpoint.get(endpoint).and_then(|times| {
1905        if times.is_empty() {
1906            None
1907        } else {
1908            let sum: u64 = times.iter().sum();
1909            Some(sum / times.len() as u64)
1910        }
1911    })
1912}
1913
1914/// Get the last request timestamp for a specific endpoint
1915fn get_endpoint_last_request(
1916    metrics: &RequestMetrics,
1917    endpoint: &str,
1918) -> Option<chrono::DateTime<chrono::Utc>> {
1919    metrics.last_request_by_endpoint.get(endpoint).copied()
1920}
1921
1922/// Count total requests for a specific server type
1923fn count_requests_by_server_type(metrics: &RequestMetrics, server_type: &str) -> u64 {
1924    match server_type {
1925        "HTTP" => {
1926            // Count all HTTP requests (GET, POST, PUT, DELETE, etc.)
1927            metrics
1928                .requests_by_endpoint
1929                .iter()
1930                .filter(|(endpoint, _)| {
1931                    let method = endpoint.split(' ').next().unwrap_or("");
1932                    matches!(
1933                        method,
1934                        "GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "HEAD" | "OPTIONS"
1935                    )
1936                })
1937                .map(|(_, count)| count)
1938                .sum()
1939        }
1940        "WebSocket" => {
1941            // Count WebSocket requests (WS method)
1942            metrics
1943                .requests_by_endpoint
1944                .iter()
1945                .filter(|(endpoint, _)| {
1946                    let method = endpoint.split(' ').next().unwrap_or("");
1947                    method == "WS"
1948                })
1949                .map(|(_, count)| count)
1950                .sum()
1951        }
1952        "gRPC" => {
1953            // Count gRPC requests (gRPC method)
1954            metrics
1955                .requests_by_endpoint
1956                .iter()
1957                .filter(|(endpoint, _)| {
1958                    let method = endpoint.split(' ').next().unwrap_or("");
1959                    method == "gRPC"
1960                })
1961                .map(|(_, count)| count)
1962                .sum()
1963        }
1964        _ => 0,
1965    }
1966}
1967
1968/// Get fixtures/replay data
1969pub async fn get_fixtures() -> Json<ApiResponse<Vec<FixtureInfo>>> {
1970    match scan_fixtures_directory() {
1971        Ok(fixtures) => Json(ApiResponse::success(fixtures)),
1972        Err(e) => {
1973            tracing::error!("Failed to scan fixtures directory: {}", e);
1974            Json(ApiResponse::error(format!("Failed to load fixtures: {}", e)))
1975        }
1976    }
1977}
1978
1979/// Scan the fixtures directory and return all fixture information
1980fn scan_fixtures_directory() -> Result<Vec<FixtureInfo>> {
1981    let fixtures_dir =
1982        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1983    let fixtures_path = std::path::Path::new(&fixtures_dir);
1984
1985    if !fixtures_path.exists() {
1986        tracing::warn!("Fixtures directory does not exist: {}", fixtures_dir);
1987        return Ok(Vec::new());
1988    }
1989
1990    let mut all_fixtures = Vec::new();
1991
1992    // Scan HTTP fixtures
1993    let http_fixtures = scan_protocol_fixtures(fixtures_path, "http")?;
1994    all_fixtures.extend(http_fixtures);
1995
1996    // Scan WebSocket fixtures
1997    let ws_fixtures = scan_protocol_fixtures(fixtures_path, "websocket")?;
1998    all_fixtures.extend(ws_fixtures);
1999
2000    // Scan gRPC fixtures
2001    let grpc_fixtures = scan_protocol_fixtures(fixtures_path, "grpc")?;
2002    all_fixtures.extend(grpc_fixtures);
2003
2004    // Sort by saved_at timestamp (newest first)
2005    all_fixtures.sort_by(|a, b| b.saved_at.cmp(&a.saved_at));
2006
2007    tracing::info!("Found {} fixtures in directory: {}", all_fixtures.len(), fixtures_dir);
2008    Ok(all_fixtures)
2009}
2010
2011/// Scan fixtures for a specific protocol
2012fn scan_protocol_fixtures(
2013    fixtures_path: &std::path::Path,
2014    protocol: &str,
2015) -> Result<Vec<FixtureInfo>> {
2016    let protocol_path = fixtures_path.join(protocol);
2017    let mut fixtures = Vec::new();
2018
2019    if !protocol_path.exists() {
2020        return Ok(fixtures);
2021    }
2022
2023    // Walk through the protocol directory recursively
2024    if let Ok(entries) = std::fs::read_dir(&protocol_path) {
2025        for entry in entries {
2026            let entry = entry
2027                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2028            let path = entry.path();
2029
2030            if path.is_dir() {
2031                // Recursively scan subdirectories
2032                let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2033                fixtures.extend(sub_fixtures);
2034            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2035                // Process individual JSON fixture file
2036                if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2037                    fixtures.push(fixture);
2038                }
2039            }
2040        }
2041    }
2042
2043    Ok(fixtures)
2044}
2045
2046/// Recursively scan a directory for fixture files
2047fn scan_directory_recursive(
2048    dir_path: &std::path::Path,
2049    protocol: &str,
2050) -> Result<Vec<FixtureInfo>> {
2051    let mut fixtures = Vec::new();
2052
2053    if let Ok(entries) = std::fs::read_dir(dir_path) {
2054        for entry in entries {
2055            let entry = entry
2056                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2057            let path = entry.path();
2058
2059            if path.is_dir() {
2060                // Recursively scan subdirectories
2061                let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2062                fixtures.extend(sub_fixtures);
2063            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2064                // Process individual JSON fixture file
2065                if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2066                    fixtures.push(fixture);
2067                }
2068            }
2069        }
2070    }
2071
2072    Ok(fixtures)
2073}
2074
2075/// Parse a single fixture file and extract metadata (synchronous version)
2076fn parse_fixture_file_sync(file_path: &std::path::Path, protocol: &str) -> Result<FixtureInfo> {
2077    // Get file metadata
2078    let metadata = std::fs::metadata(file_path)
2079        .map_err(|e| Error::generic(format!("Failed to read file metadata: {}", e)))?;
2080
2081    let file_size = metadata.len();
2082    let modified_time = metadata
2083        .modified()
2084        .map_err(|e| Error::generic(format!("Failed to get file modification time: {}", e)))?;
2085
2086    let saved_at = chrono::DateTime::from(modified_time);
2087
2088    // Read and parse the fixture file (blocking - called from spawn_blocking context)
2089    let content = std::fs::read_to_string(file_path)
2090        .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2091
2092    let fixture_data: serde_json::Value = serde_json::from_str(&content)
2093        .map_err(|e| Error::generic(format!("Failed to parse fixture JSON: {}", e)))?;
2094
2095    // Extract method and path from the fixture data
2096    let (method, path) = extract_method_and_path(&fixture_data, protocol)?;
2097
2098    // Generate a unique ID based on file path and content
2099    let id = generate_fixture_id(file_path, &content);
2100
2101    // Extract fingerprint from file path or fixture data
2102    let fingerprint = extract_fingerprint(file_path, &fixture_data)?;
2103
2104    // Get relative file path
2105    let fixtures_dir =
2106        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2107    let fixtures_path = std::path::Path::new(&fixtures_dir);
2108    let file_path_str = file_path
2109        .strip_prefix(fixtures_path)
2110        .unwrap_or(file_path)
2111        .to_string_lossy()
2112        .to_string();
2113
2114    Ok(FixtureInfo {
2115        id,
2116        protocol: protocol.to_string(),
2117        method,
2118        path,
2119        saved_at,
2120        file_size,
2121        file_path: file_path_str,
2122        fingerprint,
2123        metadata: fixture_data,
2124    })
2125}
2126
2127/// Extract method and path from fixture data
2128fn extract_method_and_path(
2129    fixture_data: &serde_json::Value,
2130    protocol: &str,
2131) -> Result<(String, String)> {
2132    match protocol {
2133        "http" => {
2134            // For HTTP fixtures, look for request.method and request.path
2135            let method = fixture_data
2136                .get("request")
2137                .and_then(|req| req.get("method"))
2138                .and_then(|m| m.as_str())
2139                .unwrap_or("UNKNOWN")
2140                .to_uppercase();
2141
2142            let path = fixture_data
2143                .get("request")
2144                .and_then(|req| req.get("path"))
2145                .and_then(|p| p.as_str())
2146                .unwrap_or("/unknown")
2147                .to_string();
2148
2149            Ok((method, path))
2150        }
2151        "websocket" => {
2152            // For WebSocket fixtures, use WS method and extract path from metadata
2153            let path = fixture_data
2154                .get("path")
2155                .and_then(|p| p.as_str())
2156                .or_else(|| {
2157                    fixture_data
2158                        .get("request")
2159                        .and_then(|req| req.get("path"))
2160                        .and_then(|p| p.as_str())
2161                })
2162                .unwrap_or("/ws")
2163                .to_string();
2164
2165            Ok(("WS".to_string(), path))
2166        }
2167        "grpc" => {
2168            // For gRPC fixtures, extract service and method
2169            let service =
2170                fixture_data.get("service").and_then(|s| s.as_str()).unwrap_or("UnknownService");
2171
2172            let method =
2173                fixture_data.get("method").and_then(|m| m.as_str()).unwrap_or("UnknownMethod");
2174
2175            let path = format!("/{}/{}", service, method);
2176            Ok(("gRPC".to_string(), path))
2177        }
2178        _ => {
2179            let path = fixture_data
2180                .get("path")
2181                .and_then(|p| p.as_str())
2182                .unwrap_or("/unknown")
2183                .to_string();
2184            Ok((protocol.to_uppercase(), path))
2185        }
2186    }
2187}
2188
2189/// Generate a unique fixture ID
2190fn generate_fixture_id(file_path: &std::path::Path, content: &str) -> String {
2191    use std::collections::hash_map::DefaultHasher;
2192    use std::hash::{Hash, Hasher};
2193
2194    let mut hasher = DefaultHasher::new();
2195    file_path.hash(&mut hasher);
2196    content.hash(&mut hasher);
2197    format!("fixture_{:x}", hasher.finish())
2198}
2199
2200/// Extract fingerprint from file path or fixture data
2201fn extract_fingerprint(
2202    file_path: &std::path::Path,
2203    fixture_data: &serde_json::Value,
2204) -> Result<String> {
2205    // Try to extract from fixture data first
2206    if let Some(fingerprint) = fixture_data.get("fingerprint").and_then(|f| f.as_str()) {
2207        return Ok(fingerprint.to_string());
2208    }
2209
2210    // Try to extract from file path (common pattern: method_path_hash.json)
2211    if let Some(file_name) = file_path.file_stem().and_then(|s| s.to_str()) {
2212        // Look for hash pattern at the end of filename
2213        if let Some(hash) = file_name.split('_').next_back() {
2214            if hash.len() >= 8 && hash.chars().all(|c| c.is_alphanumeric()) {
2215                return Ok(hash.to_string());
2216            }
2217        }
2218    }
2219
2220    // Fallback: generate from file path
2221    use std::collections::hash_map::DefaultHasher;
2222    use std::hash::{Hash, Hasher};
2223
2224    let mut hasher = DefaultHasher::new();
2225    file_path.hash(&mut hasher);
2226    Ok(format!("{:x}", hasher.finish()))
2227}
2228
2229/// Delete a fixture
2230pub async fn delete_fixture(
2231    Json(payload): Json<FixtureDeleteRequest>,
2232) -> Json<ApiResponse<String>> {
2233    match delete_fixture_by_id(&payload.fixture_id).await {
2234        Ok(_) => {
2235            tracing::info!("Successfully deleted fixture: {}", payload.fixture_id);
2236            Json(ApiResponse::success("Fixture deleted successfully".to_string()))
2237        }
2238        Err(e) => {
2239            tracing::error!("Failed to delete fixture {}: {}", payload.fixture_id, e);
2240            Json(ApiResponse::error(format!("Failed to delete fixture: {}", e)))
2241        }
2242    }
2243}
2244
2245/// Delete multiple fixtures
2246pub async fn delete_fixtures_bulk(
2247    Json(payload): Json<FixtureBulkDeleteRequest>,
2248) -> Json<ApiResponse<FixtureBulkDeleteResult>> {
2249    let mut deleted_count = 0;
2250    let mut errors = Vec::new();
2251
2252    for fixture_id in &payload.fixture_ids {
2253        match delete_fixture_by_id(fixture_id).await {
2254            Ok(_) => {
2255                deleted_count += 1;
2256                tracing::info!("Successfully deleted fixture: {}", fixture_id);
2257            }
2258            Err(e) => {
2259                errors.push(format!("Failed to delete {}: {}", fixture_id, e));
2260                tracing::error!("Failed to delete fixture {}: {}", fixture_id, e);
2261            }
2262        }
2263    }
2264
2265    let result = FixtureBulkDeleteResult {
2266        deleted_count,
2267        total_requested: payload.fixture_ids.len(),
2268        errors: errors.clone(),
2269    };
2270
2271    if errors.is_empty() {
2272        Json(ApiResponse::success(result))
2273    } else {
2274        Json(ApiResponse::error(format!(
2275            "Partial success: {} deleted, {} errors",
2276            deleted_count,
2277            errors.len()
2278        )))
2279    }
2280}
2281
2282/// Delete a single fixture by ID
2283async fn delete_fixture_by_id(fixture_id: &str) -> Result<()> {
2284    // First, try to find the fixture by scanning the fixtures directory
2285    // This is more robust than trying to parse the ID format
2286    let fixtures_dir =
2287        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2288    let fixtures_path = std::path::Path::new(&fixtures_dir);
2289
2290    if !fixtures_path.exists() {
2291        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2292    }
2293
2294    // Search for the fixture file by ID across all protocols
2295    let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2296
2297    // Delete the file using spawn_blocking
2298    let file_path_clone = file_path.clone();
2299    tokio::task::spawn_blocking(move || {
2300        if file_path_clone.exists() {
2301            std::fs::remove_file(&file_path_clone).map_err(|e| {
2302                Error::generic(format!(
2303                    "Failed to delete fixture file {}: {}",
2304                    file_path_clone.display(),
2305                    e
2306                ))
2307            })
2308        } else {
2309            Err(Error::generic(format!("Fixture file not found: {}", file_path_clone.display())))
2310        }
2311    })
2312    .await
2313    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2314
2315    tracing::info!("Deleted fixture file: {}", file_path.display());
2316
2317    // Also try to remove empty parent directories
2318    cleanup_empty_directories(&file_path).await;
2319
2320    Ok(())
2321}
2322
2323/// Find a fixture file by its ID across all protocols
2324fn find_fixture_file_by_id(
2325    fixtures_path: &std::path::Path,
2326    fixture_id: &str,
2327) -> Result<std::path::PathBuf> {
2328    // Search in all protocol directories
2329    let protocols = ["http", "websocket", "grpc"];
2330
2331    for protocol in &protocols {
2332        let protocol_path = fixtures_path.join(protocol);
2333        if let Ok(found_path) = search_fixture_in_directory(&protocol_path, fixture_id) {
2334            return Ok(found_path);
2335        }
2336    }
2337
2338    Err(Error::generic(format!(
2339        "Fixture with ID '{}' not found in any protocol directory",
2340        fixture_id
2341    )))
2342}
2343
2344/// Recursively search for a fixture file by ID in a directory
2345fn search_fixture_in_directory(
2346    dir_path: &std::path::Path,
2347    fixture_id: &str,
2348) -> Result<std::path::PathBuf> {
2349    if !dir_path.exists() {
2350        return Err(Error::generic(format!("Directory does not exist: {}", dir_path.display())));
2351    }
2352
2353    if let Ok(entries) = std::fs::read_dir(dir_path) {
2354        for entry in entries {
2355            let entry = entry
2356                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2357            let path = entry.path();
2358
2359            if path.is_dir() {
2360                // Recursively search subdirectories
2361                if let Ok(found_path) = search_fixture_in_directory(&path, fixture_id) {
2362                    return Ok(found_path);
2363                }
2364            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2365                // Check if this file matches the fixture ID
2366                if let Ok(fixture_info) = parse_fixture_file_sync(&path, "unknown") {
2367                    if fixture_info.id == fixture_id {
2368                        return Ok(path);
2369                    }
2370                }
2371            }
2372        }
2373    }
2374
2375    Err(Error::generic(format!(
2376        "Fixture not found in directory: {}",
2377        dir_path.display()
2378    )))
2379}
2380
2381/// Clean up empty directories after file deletion
2382async fn cleanup_empty_directories(file_path: &std::path::Path) {
2383    let file_path = file_path.to_path_buf();
2384
2385    // Use spawn_blocking for directory operations
2386    let _ = tokio::task::spawn_blocking(move || {
2387        if let Some(parent) = file_path.parent() {
2388            // Try to remove empty directories up to the protocol level
2389            let mut current = parent;
2390            let fixtures_dir =
2391                std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2392            let fixtures_path = std::path::Path::new(&fixtures_dir);
2393
2394            while current != fixtures_path && current.parent().is_some() {
2395                if let Ok(entries) = std::fs::read_dir(current) {
2396                    if entries.count() == 0 {
2397                        if let Err(e) = std::fs::remove_dir(current) {
2398                            tracing::debug!(
2399                                "Failed to remove empty directory {}: {}",
2400                                current.display(),
2401                                e
2402                            );
2403                            break;
2404                        } else {
2405                            tracing::debug!("Removed empty directory: {}", current.display());
2406                        }
2407                    } else {
2408                        break;
2409                    }
2410                } else {
2411                    break;
2412                }
2413
2414                if let Some(next_parent) = current.parent() {
2415                    current = next_parent;
2416                } else {
2417                    break;
2418                }
2419            }
2420        }
2421    })
2422    .await;
2423}
2424
2425/// Download a fixture file
2426pub async fn download_fixture(
2427    axum::extract::Path(fixture_id): axum::extract::Path<String>,
2428) -> impl IntoResponse {
2429    // Find and read the fixture file
2430    match download_fixture_by_id(&fixture_id).await {
2431        Ok((content, file_name)) => (
2432            http::StatusCode::OK,
2433            [
2434                (http::header::CONTENT_TYPE, "application/json".to_string()),
2435                (
2436                    http::header::CONTENT_DISPOSITION,
2437                    format!("attachment; filename=\"{}\"", file_name),
2438                ),
2439            ],
2440            content,
2441        )
2442            .into_response(),
2443        Err(e) => {
2444            tracing::error!("Failed to download fixture {}: {}", fixture_id, e);
2445            let error_response = format!(r#"{{"error": "Failed to download fixture: {}"}}"#, e);
2446            (
2447                http::StatusCode::NOT_FOUND,
2448                [(http::header::CONTENT_TYPE, "application/json".to_string())],
2449                error_response,
2450            )
2451                .into_response()
2452        }
2453    }
2454}
2455
2456/// Download a fixture file by ID
2457async fn download_fixture_by_id(fixture_id: &str) -> Result<(String, String)> {
2458    // Find the fixture file by ID
2459    let fixtures_dir =
2460        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2461    let fixtures_path = std::path::Path::new(&fixtures_dir);
2462
2463    if !fixtures_path.exists() {
2464        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2465    }
2466
2467    let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2468
2469    // Read the file content using spawn_blocking
2470    let file_path_clone = file_path.clone();
2471    let (content, file_name) = tokio::task::spawn_blocking(move || {
2472        let content = std::fs::read_to_string(&file_path_clone)
2473            .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2474
2475        let file_name = file_path_clone
2476            .file_name()
2477            .and_then(|name| name.to_str())
2478            .unwrap_or("fixture.json")
2479            .to_string();
2480
2481        Ok::<_, Error>((content, file_name))
2482    })
2483    .await
2484    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2485
2486    tracing::info!("Downloaded fixture file: {} ({} bytes)", file_path.display(), content.len());
2487    Ok((content, file_name))
2488}
2489
2490/// Rename a fixture
2491pub async fn rename_fixture(
2492    axum::extract::Path(fixture_id): axum::extract::Path<String>,
2493    Json(payload): Json<FixtureRenameRequest>,
2494) -> Json<ApiResponse<String>> {
2495    match rename_fixture_by_id(&fixture_id, &payload.new_name).await {
2496        Ok(new_path) => {
2497            tracing::info!("Successfully renamed fixture: {} -> {}", fixture_id, payload.new_name);
2498            Json(ApiResponse::success(format!("Fixture renamed successfully to: {}", new_path)))
2499        }
2500        Err(e) => {
2501            tracing::error!("Failed to rename fixture {}: {}", fixture_id, e);
2502            Json(ApiResponse::error(format!("Failed to rename fixture: {}", e)))
2503        }
2504    }
2505}
2506
2507/// Rename a fixture by ID
2508async fn rename_fixture_by_id(fixture_id: &str, new_name: &str) -> Result<String> {
2509    // Validate new name
2510    if new_name.is_empty() {
2511        return Err(Error::generic("New name cannot be empty".to_string()));
2512    }
2513
2514    // Ensure new name ends with .json
2515    let new_name = if new_name.ends_with(".json") {
2516        new_name.to_string()
2517    } else {
2518        format!("{}.json", new_name)
2519    };
2520
2521    // Find the fixture file
2522    let fixtures_dir =
2523        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2524    let fixtures_path = std::path::Path::new(&fixtures_dir);
2525
2526    if !fixtures_path.exists() {
2527        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2528    }
2529
2530    let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2531
2532    // Get the parent directory and construct new path
2533    let parent = old_path
2534        .parent()
2535        .ok_or_else(|| Error::generic("Could not determine parent directory".to_string()))?;
2536
2537    let new_path = parent.join(&new_name);
2538
2539    // Check if target already exists
2540    if new_path.exists() {
2541        return Err(Error::generic(format!(
2542            "A fixture with name '{}' already exists in the same directory",
2543            new_name
2544        )));
2545    }
2546
2547    // Rename the file using spawn_blocking
2548    let old_path_clone = old_path.clone();
2549    let new_path_clone = new_path.clone();
2550    tokio::task::spawn_blocking(move || {
2551        std::fs::rename(&old_path_clone, &new_path_clone)
2552            .map_err(|e| Error::generic(format!("Failed to rename fixture file: {}", e)))
2553    })
2554    .await
2555    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2556
2557    tracing::info!("Renamed fixture file: {} -> {}", old_path.display(), new_path.display());
2558
2559    // Return relative path for display
2560    Ok(new_path
2561        .strip_prefix(fixtures_path)
2562        .unwrap_or(&new_path)
2563        .to_string_lossy()
2564        .to_string())
2565}
2566
2567/// Move a fixture to a new path
2568pub async fn move_fixture(
2569    axum::extract::Path(fixture_id): axum::extract::Path<String>,
2570    Json(payload): Json<FixtureMoveRequest>,
2571) -> Json<ApiResponse<String>> {
2572    match move_fixture_by_id(&fixture_id, &payload.new_path).await {
2573        Ok(new_location) => {
2574            tracing::info!("Successfully moved fixture: {} -> {}", fixture_id, payload.new_path);
2575            Json(ApiResponse::success(format!("Fixture moved successfully to: {}", new_location)))
2576        }
2577        Err(e) => {
2578            tracing::error!("Failed to move fixture {}: {}", fixture_id, e);
2579            Json(ApiResponse::error(format!("Failed to move fixture: {}", e)))
2580        }
2581    }
2582}
2583
2584/// Move a fixture by ID to a new path
2585async fn move_fixture_by_id(fixture_id: &str, new_path: &str) -> Result<String> {
2586    // Validate new path
2587    if new_path.is_empty() {
2588        return Err(Error::generic("New path cannot be empty".to_string()));
2589    }
2590
2591    // Find the fixture file
2592    let fixtures_dir =
2593        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2594    let fixtures_path = std::path::Path::new(&fixtures_dir);
2595
2596    if !fixtures_path.exists() {
2597        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2598    }
2599
2600    let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2601
2602    // Construct the new path - can be either relative to fixtures_dir or absolute within it
2603    let new_full_path = if new_path.starts_with('/') {
2604        // Absolute path within fixtures directory
2605        fixtures_path.join(new_path.trim_start_matches('/'))
2606    } else {
2607        // Relative path from fixtures directory
2608        fixtures_path.join(new_path)
2609    };
2610
2611    // Ensure target ends with .json if it doesn't already
2612    let new_full_path = if new_full_path.extension().and_then(|s| s.to_str()) == Some("json") {
2613        new_full_path
2614    } else {
2615        // If the path is a directory or doesn't have .json extension, append the original filename
2616        if new_full_path.is_dir() || !new_path.contains('.') {
2617            let file_name = old_path.file_name().ok_or_else(|| {
2618                Error::generic("Could not determine original file name".to_string())
2619            })?;
2620            new_full_path.join(file_name)
2621        } else {
2622            new_full_path.with_extension("json")
2623        }
2624    };
2625
2626    // Check if target already exists
2627    if new_full_path.exists() {
2628        return Err(Error::generic(format!(
2629            "A fixture already exists at path: {}",
2630            new_full_path.display()
2631        )));
2632    }
2633
2634    // Create parent directories and move file using spawn_blocking
2635    let old_path_clone = old_path.clone();
2636    let new_full_path_clone = new_full_path.clone();
2637    tokio::task::spawn_blocking(move || {
2638        // Create parent directories if they don't exist
2639        if let Some(parent) = new_full_path_clone.parent() {
2640            std::fs::create_dir_all(parent)
2641                .map_err(|e| Error::generic(format!("Failed to create target directory: {}", e)))?;
2642        }
2643
2644        // Move the file
2645        std::fs::rename(&old_path_clone, &new_full_path_clone)
2646            .map_err(|e| Error::generic(format!("Failed to move fixture file: {}", e)))
2647    })
2648    .await
2649    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2650
2651    tracing::info!("Moved fixture file: {} -> {}", old_path.display(), new_full_path.display());
2652
2653    // Clean up empty directories from the old location
2654    cleanup_empty_directories(&old_path).await;
2655
2656    // Return relative path for display
2657    Ok(new_full_path
2658        .strip_prefix(fixtures_path)
2659        .unwrap_or(&new_full_path)
2660        .to_string_lossy()
2661        .to_string())
2662}
2663
2664/// Get current validation settings
2665pub async fn get_validation(
2666    State(state): State<AdminState>,
2667) -> Json<ApiResponse<ValidationSettings>> {
2668    // Get real validation settings from configuration
2669    let config_state = state.get_config().await;
2670
2671    Json(ApiResponse::success(config_state.validation_settings))
2672}
2673
2674/// Update validation settings
2675pub async fn update_validation(
2676    State(state): State<AdminState>,
2677    Json(update): Json<ValidationUpdate>,
2678) -> Json<ApiResponse<String>> {
2679    // Validate the mode
2680    match update.mode.as_str() {
2681        "enforce" | "warn" | "off" => {}
2682        _ => {
2683            return Json(ApiResponse::error(
2684                "Invalid validation mode. Must be 'enforce', 'warn', or 'off'".to_string(),
2685            ))
2686        }
2687    }
2688
2689    // Update the actual validation configuration
2690    let mode = update.mode.clone();
2691    state
2692        .update_validation_config(
2693            update.mode,
2694            update.aggregate_errors,
2695            update.validate_responses,
2696            update.overrides.unwrap_or_default(),
2697        )
2698        .await;
2699
2700    tracing::info!(
2701        "Updated validation settings: mode={}, aggregate_errors={}",
2702        mode,
2703        update.aggregate_errors
2704    );
2705
2706    Json(ApiResponse::success("Validation settings updated".to_string()))
2707}
2708
2709/// Get environment variables
2710pub async fn get_env_vars() -> Json<ApiResponse<HashMap<String, String>>> {
2711    // Get actual environment variables that are relevant to MockForge
2712    let mut env_vars = HashMap::new();
2713
2714    let relevant_vars = [
2715        // Core functionality
2716        "MOCKFORGE_LATENCY_ENABLED",
2717        "MOCKFORGE_FAILURES_ENABLED",
2718        "MOCKFORGE_PROXY_ENABLED",
2719        "MOCKFORGE_RECORD_ENABLED",
2720        "MOCKFORGE_REPLAY_ENABLED",
2721        "MOCKFORGE_LOG_LEVEL",
2722        "MOCKFORGE_CONFIG_FILE",
2723        "RUST_LOG",
2724        // HTTP server configuration
2725        "MOCKFORGE_HTTP_PORT",
2726        "MOCKFORGE_HTTP_HOST",
2727        "MOCKFORGE_HTTP_OPENAPI_SPEC",
2728        "MOCKFORGE_CORS_ENABLED",
2729        "MOCKFORGE_REQUEST_TIMEOUT_SECS",
2730        // WebSocket server configuration
2731        "MOCKFORGE_WS_PORT",
2732        "MOCKFORGE_WS_HOST",
2733        "MOCKFORGE_WS_REPLAY_FILE",
2734        "MOCKFORGE_WS_CONNECTION_TIMEOUT_SECS",
2735        // gRPC server configuration
2736        "MOCKFORGE_GRPC_PORT",
2737        "MOCKFORGE_GRPC_HOST",
2738        // Admin UI configuration
2739        "MOCKFORGE_ADMIN_ENABLED",
2740        "MOCKFORGE_ADMIN_PORT",
2741        "MOCKFORGE_ADMIN_HOST",
2742        "MOCKFORGE_ADMIN_MOUNT_PATH",
2743        "MOCKFORGE_ADMIN_API_ENABLED",
2744        // Template and validation
2745        "MOCKFORGE_RESPONSE_TEMPLATE_EXPAND",
2746        "MOCKFORGE_REQUEST_VALIDATION",
2747        "MOCKFORGE_AGGREGATE_ERRORS",
2748        "MOCKFORGE_RESPONSE_VALIDATION",
2749        "MOCKFORGE_VALIDATION_STATUS",
2750        // Data generation
2751        "MOCKFORGE_RAG_ENABLED",
2752        "MOCKFORGE_FAKE_TOKENS",
2753        // Other settings
2754        "MOCKFORGE_FIXTURES_DIR",
2755    ];
2756
2757    for var_name in &relevant_vars {
2758        if let Ok(value) = std::env::var(var_name) {
2759            env_vars.insert(var_name.to_string(), value);
2760        }
2761    }
2762
2763    Json(ApiResponse::success(env_vars))
2764}
2765
2766/// Update environment variable
2767pub async fn update_env_var(Json(update): Json<EnvVarUpdate>) -> Json<ApiResponse<String>> {
2768    // Set the environment variable (runtime only - not persisted)
2769    std::env::set_var(&update.key, &update.value);
2770
2771    tracing::info!("Updated environment variable: {}={}", update.key, update.value);
2772
2773    // Note: Environment variables set at runtime are not persisted
2774    // In a production system, you might want to write to a .env file or config file
2775    Json(ApiResponse::success(format!(
2776        "Environment variable {} updated to '{}'. Note: This change is not persisted and will be lost on restart.",
2777        update.key, update.value
2778    )))
2779}
2780
2781/// Get file content
2782pub async fn get_file_content(
2783    Json(request): Json<FileContentRequest>,
2784) -> Json<ApiResponse<String>> {
2785    // Validate the file path for security
2786    if let Err(e) = validate_file_path(&request.file_path) {
2787        return Json(ApiResponse::error(format!("Invalid file path: {}", e)));
2788    }
2789
2790    // Read the actual file content
2791    match tokio::fs::read_to_string(&request.file_path).await {
2792        Ok(content) => {
2793            // Validate the file content for security
2794            if let Err(e) = validate_file_content(&content) {
2795                return Json(ApiResponse::error(format!("Invalid file content: {}", e)));
2796            }
2797            Json(ApiResponse::success(content))
2798        }
2799        Err(e) => Json(ApiResponse::error(format!("Failed to read file: {}", e))),
2800    }
2801}
2802
2803/// Save file content
2804pub async fn save_file_content(Json(request): Json<FileSaveRequest>) -> Json<ApiResponse<String>> {
2805    match save_file_to_filesystem(&request.file_path, &request.content).await {
2806        Ok(_) => {
2807            tracing::info!("Successfully saved file: {}", request.file_path);
2808            Json(ApiResponse::success("File saved successfully".to_string()))
2809        }
2810        Err(e) => {
2811            tracing::error!("Failed to save file {}: {}", request.file_path, e);
2812            Json(ApiResponse::error(format!("Failed to save file: {}", e)))
2813        }
2814    }
2815}
2816
2817/// Save content to a file on the filesystem
2818async fn save_file_to_filesystem(file_path: &str, content: &str) -> Result<()> {
2819    // Validate the file path for security
2820    validate_file_path(file_path)?;
2821
2822    // Validate the file content for security
2823    validate_file_content(content)?;
2824
2825    // Convert to PathBuf and clone data for spawn_blocking
2826    let path = std::path::PathBuf::from(file_path);
2827    let content = content.to_string();
2828
2829    // Perform file operations using spawn_blocking
2830    let path_clone = path.clone();
2831    let content_clone = content.clone();
2832    tokio::task::spawn_blocking(move || {
2833        // Create parent directories if they don't exist
2834        if let Some(parent) = path_clone.parent() {
2835            std::fs::create_dir_all(parent).map_err(|e| {
2836                Error::generic(format!("Failed to create directory {}: {}", parent.display(), e))
2837            })?;
2838        }
2839
2840        // Write the content to the file
2841        std::fs::write(&path_clone, &content_clone).map_err(|e| {
2842            Error::generic(format!("Failed to write file {}: {}", path_clone.display(), e))
2843        })?;
2844
2845        // Verify the file was written correctly
2846        let written_content = std::fs::read_to_string(&path_clone).map_err(|e| {
2847            Error::generic(format!("Failed to verify written file {}: {}", path_clone.display(), e))
2848        })?;
2849
2850        if written_content != content_clone {
2851            return Err(Error::generic(format!(
2852                "File content verification failed for {}",
2853                path_clone.display()
2854            )));
2855        }
2856
2857        Ok::<_, Error>(())
2858    })
2859    .await
2860    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2861
2862    tracing::info!("File saved successfully: {} ({} bytes)", path.display(), content.len());
2863    Ok(())
2864}
2865
2866/// Validate file path for security
2867fn validate_file_path(file_path: &str) -> Result<()> {
2868    // Check for path traversal attacks
2869    if file_path.contains("..") {
2870        return Err(Error::generic("Path traversal detected in file path".to_string()));
2871    }
2872
2873    // Check for absolute paths that might be outside allowed directories
2874    let path = std::path::Path::new(file_path);
2875    if path.is_absolute() {
2876        // For absolute paths, ensure they're within allowed directories
2877        let allowed_dirs = [
2878            std::env::current_dir().unwrap_or_default(),
2879            std::path::PathBuf::from("."),
2880            std::path::PathBuf::from("fixtures"),
2881            std::path::PathBuf::from("config"),
2882        ];
2883
2884        let mut is_allowed = false;
2885        for allowed_dir in &allowed_dirs {
2886            if path.starts_with(allowed_dir) {
2887                is_allowed = true;
2888                break;
2889            }
2890        }
2891
2892        if !is_allowed {
2893            return Err(Error::generic("File path is outside allowed directories".to_string()));
2894        }
2895    }
2896
2897    // Check for dangerous file extensions or names
2898    let dangerous_extensions = ["exe", "bat", "cmd", "sh", "ps1", "scr", "com"];
2899    if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
2900        if dangerous_extensions.contains(&extension.to_lowercase().as_str()) {
2901            return Err(Error::generic(format!(
2902                "Dangerous file extension not allowed: {}",
2903                extension
2904            )));
2905        }
2906    }
2907
2908    Ok(())
2909}
2910
2911/// Validate file content for security
2912fn validate_file_content(content: &str) -> Result<()> {
2913    // Check for reasonable file size (prevent DoS)
2914    if content.len() > 10 * 1024 * 1024 {
2915        // 10MB limit
2916        return Err(Error::generic("File content too large (max 10MB)".to_string()));
2917    }
2918
2919    // Check for null bytes (potential security issue)
2920    if content.contains('\0') {
2921        return Err(Error::generic("File content contains null bytes".to_string()));
2922    }
2923
2924    Ok(())
2925}
2926
2927/// Fixture delete request
2928#[derive(Debug, Clone, Serialize, Deserialize)]
2929pub struct FixtureDeleteRequest {
2930    pub fixture_id: String,
2931}
2932
2933/// Environment variable update
2934#[derive(Debug, Clone, Serialize, Deserialize)]
2935pub struct EnvVarUpdate {
2936    pub key: String,
2937    pub value: String,
2938}
2939
2940/// Fixture bulk delete request
2941#[derive(Debug, Clone, Serialize, Deserialize)]
2942pub struct FixtureBulkDeleteRequest {
2943    pub fixture_ids: Vec<String>,
2944}
2945
2946/// Fixture bulk delete result
2947#[derive(Debug, Clone, Serialize, Deserialize)]
2948pub struct FixtureBulkDeleteResult {
2949    pub deleted_count: usize,
2950    pub total_requested: usize,
2951    pub errors: Vec<String>,
2952}
2953
2954/// Fixture rename request
2955#[derive(Debug, Clone, Serialize, Deserialize)]
2956pub struct FixtureRenameRequest {
2957    pub new_name: String,
2958}
2959
2960/// Fixture move request
2961#[derive(Debug, Clone, Serialize, Deserialize)]
2962pub struct FixtureMoveRequest {
2963    pub new_path: String,
2964}
2965
2966/// File content request
2967#[derive(Debug, Clone, Serialize, Deserialize)]
2968pub struct FileContentRequest {
2969    pub file_path: String,
2970    pub file_type: String,
2971}
2972
2973/// File save request
2974#[derive(Debug, Clone, Serialize, Deserialize)]
2975pub struct FileSaveRequest {
2976    pub file_path: String,
2977    pub content: String,
2978}
2979
2980/// Get smoke tests
2981pub async fn get_smoke_tests(
2982    State(state): State<AdminState>,
2983) -> Json<ApiResponse<Vec<SmokeTestResult>>> {
2984    let results = state.get_smoke_test_results().await;
2985    Json(ApiResponse::success(results))
2986}
2987
2988/// Run smoke tests endpoint
2989pub async fn run_smoke_tests_endpoint(
2990    State(state): State<AdminState>,
2991) -> Json<ApiResponse<String>> {
2992    tracing::info!("Starting smoke test execution");
2993
2994    // Spawn smoke test execution in background to avoid blocking
2995    let state_clone = state.clone();
2996    tokio::spawn(async move {
2997        if let Err(e) = execute_smoke_tests(&state_clone).await {
2998            tracing::error!("Smoke test execution failed: {}", e);
2999        } else {
3000            tracing::info!("Smoke test execution completed successfully");
3001        }
3002    });
3003
3004    Json(ApiResponse::success(
3005        "Smoke tests started. Check results in the smoke tests section.".to_string(),
3006    ))
3007}
3008
3009/// Execute smoke tests against fixtures
3010async fn execute_smoke_tests(state: &AdminState) -> Result<()> {
3011    // Get base URL from environment or use default
3012    let base_url =
3013        std::env::var("MOCKFORGE_BASE_URL").unwrap_or_else(|_| "http://localhost:3000".to_string());
3014
3015    let context = SmokeTestContext {
3016        base_url,
3017        timeout_seconds: 30,
3018        parallel: true,
3019    };
3020
3021    // Get all fixtures to create smoke tests from
3022    let fixtures = scan_fixtures_directory()?;
3023
3024    // Filter for HTTP fixtures only (smoke tests are typically HTTP)
3025    let http_fixtures: Vec<&FixtureInfo> =
3026        fixtures.iter().filter(|f| f.protocol == "http").collect();
3027
3028    if http_fixtures.is_empty() {
3029        tracing::warn!("No HTTP fixtures found for smoke testing");
3030        return Ok(());
3031    }
3032
3033    tracing::info!("Running smoke tests for {} HTTP fixtures", http_fixtures.len());
3034
3035    // Create smoke test results from fixtures
3036    let mut test_results = Vec::new();
3037
3038    for fixture in http_fixtures {
3039        let test_result = create_smoke_test_from_fixture(fixture);
3040        test_results.push(test_result);
3041    }
3042
3043    // Execute tests
3044    let mut executed_results = Vec::new();
3045    for mut test_result in test_results {
3046        // Update status to running
3047        test_result.status = "running".to_string();
3048        state.update_smoke_test_result(test_result.clone()).await;
3049
3050        // Execute the test
3051        let start_time = std::time::Instant::now();
3052        match execute_single_smoke_test(&test_result, &context).await {
3053            Ok((status_code, response_time_ms)) => {
3054                test_result.status = "passed".to_string();
3055                test_result.status_code = Some(status_code);
3056                test_result.response_time_ms = Some(response_time_ms);
3057                test_result.error_message = None;
3058            }
3059            Err(e) => {
3060                test_result.status = "failed".to_string();
3061                test_result.error_message = Some(e.to_string());
3062                test_result.status_code = None;
3063                test_result.response_time_ms = None;
3064            }
3065        }
3066
3067        let duration = start_time.elapsed();
3068        test_result.duration_seconds = Some(duration.as_secs_f64());
3069        test_result.last_run = Some(chrono::Utc::now());
3070
3071        executed_results.push(test_result.clone());
3072        state.update_smoke_test_result(test_result).await;
3073    }
3074
3075    tracing::info!("Smoke test execution completed: {} tests run", executed_results.len());
3076    Ok(())
3077}
3078
3079/// Create a smoke test result from a fixture
3080fn create_smoke_test_from_fixture(fixture: &FixtureInfo) -> SmokeTestResult {
3081    let test_name = format!("{} {}", fixture.method, fixture.path);
3082    let description = format!("Smoke test for {} endpoint", fixture.path);
3083
3084    SmokeTestResult {
3085        id: format!("smoke_{}", fixture.id),
3086        name: test_name,
3087        method: fixture.method.clone(),
3088        path: fixture.path.clone(),
3089        description,
3090        last_run: None,
3091        status: "pending".to_string(),
3092        response_time_ms: None,
3093        error_message: None,
3094        status_code: None,
3095        duration_seconds: None,
3096    }
3097}
3098
3099/// Execute a single smoke test
3100async fn execute_single_smoke_test(
3101    test: &SmokeTestResult,
3102    context: &SmokeTestContext,
3103) -> Result<(u16, u64)> {
3104    let url = format!("{}{}", context.base_url, test.path);
3105    let client = reqwest::Client::builder()
3106        .timeout(std::time::Duration::from_secs(context.timeout_seconds))
3107        .build()
3108        .map_err(|e| Error::generic(format!("Failed to create HTTP client: {}", e)))?;
3109
3110    let start_time = std::time::Instant::now();
3111
3112    let response = match test.method.as_str() {
3113        "GET" => client.get(&url).send().await,
3114        "POST" => client.post(&url).send().await,
3115        "PUT" => client.put(&url).send().await,
3116        "DELETE" => client.delete(&url).send().await,
3117        "PATCH" => client.patch(&url).send().await,
3118        "HEAD" => client.head(&url).send().await,
3119        "OPTIONS" => client.request(reqwest::Method::OPTIONS, &url).send().await,
3120        _ => {
3121            return Err(Error::generic(format!("Unsupported HTTP method: {}", test.method)));
3122        }
3123    };
3124
3125    let response_time = start_time.elapsed();
3126    let response_time_ms = response_time.as_millis() as u64;
3127
3128    match response {
3129        Ok(resp) => {
3130            let status_code = resp.status().as_u16();
3131            if (200..400).contains(&status_code) {
3132                Ok((status_code, response_time_ms))
3133            } else {
3134                Err(Error::generic(format!(
3135                    "HTTP error: {} {}",
3136                    status_code,
3137                    resp.status().canonical_reason().unwrap_or("Unknown")
3138                )))
3139            }
3140        }
3141        Err(e) => Err(Error::generic(format!("Request failed: {}", e))),
3142    }
3143}
3144
3145/// Install a plugin from a path or URL
3146pub async fn install_plugin(Json(request): Json<serde_json::Value>) -> impl IntoResponse {
3147    // Extract source from request
3148    let source = request.get("source").and_then(|s| s.as_str()).unwrap_or("");
3149
3150    if source.is_empty() {
3151        return Json(json!({
3152            "success": false,
3153            "error": "Plugin source is required"
3154        }));
3155    }
3156
3157    // Determine if source is a URL or local path
3158    let plugin_path = if source.starts_with("http://") || source.starts_with("https://") {
3159        // Download the plugin from URL
3160        match download_plugin_from_url(source).await {
3161            Ok(temp_path) => temp_path,
3162            Err(e) => {
3163                return Json(json!({
3164                    "success": false,
3165                    "error": format!("Failed to download plugin: {}", e)
3166                }))
3167            }
3168        }
3169    } else {
3170        // Use local file path
3171        std::path::PathBuf::from(source)
3172    };
3173
3174    // Check if the plugin file exists
3175    if !plugin_path.exists() {
3176        return Json(json!({
3177            "success": false,
3178            "error": format!("Plugin file not found: {}", source)
3179        }));
3180    }
3181
3182    // For now, just return success since we don't have the plugin loader infrastructure
3183    Json(json!({
3184        "success": true,
3185        "message": format!("Plugin would be installed from: {}", source)
3186    }))
3187}
3188
3189/// Download a plugin from a URL and return the temporary file path
3190async fn download_plugin_from_url(url: &str) -> Result<std::path::PathBuf> {
3191    // Create a temporary file
3192    let temp_file =
3193        std::env::temp_dir().join(format!("plugin_{}.tmp", chrono::Utc::now().timestamp()));
3194    let temp_path = temp_file.clone();
3195
3196    // Download the file
3197    let response = reqwest::get(url)
3198        .await
3199        .map_err(|e| Error::generic(format!("Failed to download from URL: {}", e)))?;
3200
3201    if !response.status().is_success() {
3202        return Err(Error::generic(format!(
3203            "HTTP error {}: {}",
3204            response.status().as_u16(),
3205            response.status().canonical_reason().unwrap_or("Unknown")
3206        )));
3207    }
3208
3209    // Read the response bytes
3210    let bytes = response
3211        .bytes()
3212        .await
3213        .map_err(|e| Error::generic(format!("Failed to read response: {}", e)))?;
3214
3215    // Write to temporary file
3216    tokio::fs::write(&temp_file, &bytes)
3217        .await
3218        .map_err(|e| Error::generic(format!("Failed to write temporary file: {}", e)))?;
3219
3220    Ok(temp_path)
3221}
3222
3223pub async fn serve_icon() -> impl IntoResponse {
3224    // Return a simple placeholder icon response
3225    ([(http::header::CONTENT_TYPE, "image/png")], "")
3226}
3227
3228pub async fn serve_icon_32() -> impl IntoResponse {
3229    ([(http::header::CONTENT_TYPE, "image/png")], "")
3230}
3231
3232pub async fn serve_icon_48() -> impl IntoResponse {
3233    ([(http::header::CONTENT_TYPE, "image/png")], "")
3234}
3235
3236pub async fn serve_logo() -> impl IntoResponse {
3237    ([(http::header::CONTENT_TYPE, "image/png")], "")
3238}
3239
3240pub async fn serve_logo_40() -> impl IntoResponse {
3241    ([(http::header::CONTENT_TYPE, "image/png")], "")
3242}
3243
3244pub async fn serve_logo_80() -> impl IntoResponse {
3245    ([(http::header::CONTENT_TYPE, "image/png")], "")
3246}
3247
3248// Missing handler functions that routes.rs expects
3249pub async fn update_traffic_shaping(
3250    State(_state): State<AdminState>,
3251    Json(_config): Json<serde_json::Value>,
3252) -> Json<ApiResponse<String>> {
3253    Json(ApiResponse::success("Traffic shaping updated".to_string()))
3254}
3255
3256pub async fn import_postman(
3257    State(state): State<AdminState>,
3258    Json(request): Json<serde_json::Value>,
3259) -> Json<ApiResponse<String>> {
3260    use mockforge_core::workspace_import::{import_postman_to_workspace, WorkspaceImportConfig};
3261    use uuid::Uuid;
3262
3263    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3264    let filename = request.get("filename").and_then(|v| v.as_str());
3265    let environment = request.get("environment").and_then(|v| v.as_str());
3266    let base_url = request.get("base_url").and_then(|v| v.as_str());
3267
3268    // Import the collection
3269    let import_result = match mockforge_core::import::import_postman_collection(content, base_url) {
3270        Ok(result) => result,
3271        Err(e) => {
3272            // Record failed import
3273            let entry = ImportHistoryEntry {
3274                id: Uuid::new_v4().to_string(),
3275                format: "postman".to_string(),
3276                timestamp: chrono::Utc::now(),
3277                routes_count: 0,
3278                variables_count: 0,
3279                warnings_count: 0,
3280                success: false,
3281                filename: filename.map(|s| s.to_string()),
3282                environment: environment.map(|s| s.to_string()),
3283                base_url: base_url.map(|s| s.to_string()),
3284                error_message: Some(e.clone()),
3285            };
3286            let mut history = state.import_history.write().await;
3287            history.push(entry);
3288
3289            return Json(ApiResponse::error(format!("Postman import failed: {}", e)));
3290        }
3291    };
3292
3293    // Create workspace from imported routes
3294    let workspace_name = filename
3295        .and_then(|f| f.split('.').next())
3296        .unwrap_or("Imported Postman Collection");
3297
3298    let config = WorkspaceImportConfig {
3299        create_folders: true,
3300        base_folder_name: None,
3301        preserve_hierarchy: true,
3302        max_depth: 5,
3303    };
3304
3305    // Convert MockForgeRoute to ImportRoute
3306    let routes: Vec<ImportRoute> = import_result
3307        .routes
3308        .into_iter()
3309        .map(|route| ImportRoute {
3310            method: route.method,
3311            path: route.path,
3312            headers: route.headers,
3313            body: route.body,
3314            response: ImportResponse {
3315                status: route.response.status,
3316                headers: route.response.headers,
3317                body: route.response.body,
3318            },
3319        })
3320        .collect();
3321
3322    match import_postman_to_workspace(routes, workspace_name.to_string(), config) {
3323        Ok(workspace_result) => {
3324            // Save the workspace to persistent storage
3325            if let Err(e) =
3326                state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3327            {
3328                tracing::error!("Failed to save workspace: {}", e);
3329                return Json(ApiResponse::error(format!(
3330                    "Import succeeded but failed to save workspace: {}",
3331                    e
3332                )));
3333            }
3334
3335            // Record successful import
3336            let entry = ImportHistoryEntry {
3337                id: Uuid::new_v4().to_string(),
3338                format: "postman".to_string(),
3339                timestamp: chrono::Utc::now(),
3340                routes_count: workspace_result.request_count,
3341                variables_count: import_result.variables.len(),
3342                warnings_count: workspace_result.warnings.len(),
3343                success: true,
3344                filename: filename.map(|s| s.to_string()),
3345                environment: environment.map(|s| s.to_string()),
3346                base_url: base_url.map(|s| s.to_string()),
3347                error_message: None,
3348            };
3349            let mut history = state.import_history.write().await;
3350            history.push(entry);
3351
3352            Json(ApiResponse::success(format!(
3353                "Successfully imported {} routes into workspace '{}'",
3354                workspace_result.request_count, workspace_name
3355            )))
3356        }
3357        Err(e) => {
3358            // Record failed import
3359            let entry = ImportHistoryEntry {
3360                id: Uuid::new_v4().to_string(),
3361                format: "postman".to_string(),
3362                timestamp: chrono::Utc::now(),
3363                routes_count: 0,
3364                variables_count: 0,
3365                warnings_count: 0,
3366                success: false,
3367                filename: filename.map(|s| s.to_string()),
3368                environment: environment.map(|s| s.to_string()),
3369                base_url: base_url.map(|s| s.to_string()),
3370                error_message: Some(e.to_string()),
3371            };
3372            let mut history = state.import_history.write().await;
3373            history.push(entry);
3374
3375            Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3376        }
3377    }
3378}
3379
3380pub async fn import_insomnia(
3381    State(state): State<AdminState>,
3382    Json(request): Json<serde_json::Value>,
3383) -> Json<ApiResponse<String>> {
3384    use uuid::Uuid;
3385
3386    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3387    let filename = request.get("filename").and_then(|v| v.as_str());
3388    let environment = request.get("environment").and_then(|v| v.as_str());
3389    let base_url = request.get("base_url").and_then(|v| v.as_str());
3390
3391    // Import the export
3392    let import_result = match mockforge_core::import::import_insomnia_export(content, environment) {
3393        Ok(result) => result,
3394        Err(e) => {
3395            // Record failed import
3396            let entry = ImportHistoryEntry {
3397                id: Uuid::new_v4().to_string(),
3398                format: "insomnia".to_string(),
3399                timestamp: chrono::Utc::now(),
3400                routes_count: 0,
3401                variables_count: 0,
3402                warnings_count: 0,
3403                success: false,
3404                filename: filename.map(|s| s.to_string()),
3405                environment: environment.map(|s| s.to_string()),
3406                base_url: base_url.map(|s| s.to_string()),
3407                error_message: Some(e.clone()),
3408            };
3409            let mut history = state.import_history.write().await;
3410            history.push(entry);
3411
3412            return Json(ApiResponse::error(format!("Insomnia import failed: {}", e)));
3413        }
3414    };
3415
3416    // Create workspace from imported routes
3417    let workspace_name = filename
3418        .and_then(|f| f.split('.').next())
3419        .unwrap_or("Imported Insomnia Collection");
3420
3421    let _config = WorkspaceImportConfig {
3422        create_folders: true,
3423        base_folder_name: None,
3424        preserve_hierarchy: true,
3425        max_depth: 5,
3426    };
3427
3428    // Extract variables count before moving import_result
3429    let variables_count = import_result.variables.len();
3430
3431    match mockforge_core::workspace_import::create_workspace_from_insomnia(
3432        import_result,
3433        Some(workspace_name.to_string()),
3434    ) {
3435        Ok(workspace_result) => {
3436            // Save the workspace to persistent storage
3437            if let Err(e) =
3438                state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3439            {
3440                tracing::error!("Failed to save workspace: {}", e);
3441                return Json(ApiResponse::error(format!(
3442                    "Import succeeded but failed to save workspace: {}",
3443                    e
3444                )));
3445            }
3446
3447            // Record successful import
3448            let entry = ImportHistoryEntry {
3449                id: Uuid::new_v4().to_string(),
3450                format: "insomnia".to_string(),
3451                timestamp: chrono::Utc::now(),
3452                routes_count: workspace_result.request_count,
3453                variables_count,
3454                warnings_count: workspace_result.warnings.len(),
3455                success: true,
3456                filename: filename.map(|s| s.to_string()),
3457                environment: environment.map(|s| s.to_string()),
3458                base_url: base_url.map(|s| s.to_string()),
3459                error_message: None,
3460            };
3461            let mut history = state.import_history.write().await;
3462            history.push(entry);
3463
3464            Json(ApiResponse::success(format!(
3465                "Successfully imported {} routes into workspace '{}'",
3466                workspace_result.request_count, workspace_name
3467            )))
3468        }
3469        Err(e) => {
3470            // Record failed import
3471            let entry = ImportHistoryEntry {
3472                id: Uuid::new_v4().to_string(),
3473                format: "insomnia".to_string(),
3474                timestamp: chrono::Utc::now(),
3475                routes_count: 0,
3476                variables_count: 0,
3477                warnings_count: 0,
3478                success: false,
3479                filename: filename.map(|s| s.to_string()),
3480                environment: environment.map(|s| s.to_string()),
3481                base_url: base_url.map(|s| s.to_string()),
3482                error_message: Some(e.to_string()),
3483            };
3484            let mut history = state.import_history.write().await;
3485            history.push(entry);
3486
3487            Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3488        }
3489    }
3490}
3491
3492pub async fn import_openapi(
3493    State(_state): State<AdminState>,
3494    Json(_request): Json<serde_json::Value>,
3495) -> Json<ApiResponse<String>> {
3496    Json(ApiResponse::success("OpenAPI import completed".to_string()))
3497}
3498
3499pub async fn import_curl(
3500    State(state): State<AdminState>,
3501    Json(request): Json<serde_json::Value>,
3502) -> Json<ApiResponse<String>> {
3503    use uuid::Uuid;
3504
3505    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3506    let filename = request.get("filename").and_then(|v| v.as_str());
3507    let base_url = request.get("base_url").and_then(|v| v.as_str());
3508
3509    // Import the commands
3510    let import_result = match mockforge_core::import::import_curl_commands(content, base_url) {
3511        Ok(result) => result,
3512        Err(e) => {
3513            // Record failed import
3514            let entry = ImportHistoryEntry {
3515                id: Uuid::new_v4().to_string(),
3516                format: "curl".to_string(),
3517                timestamp: chrono::Utc::now(),
3518                routes_count: 0,
3519                variables_count: 0,
3520                warnings_count: 0,
3521                success: false,
3522                filename: filename.map(|s| s.to_string()),
3523                environment: None,
3524                base_url: base_url.map(|s| s.to_string()),
3525                error_message: Some(e.clone()),
3526            };
3527            let mut history = state.import_history.write().await;
3528            history.push(entry);
3529
3530            return Json(ApiResponse::error(format!("Curl import failed: {}", e)));
3531        }
3532    };
3533
3534    // Create workspace from imported routes
3535    let workspace_name =
3536        filename.and_then(|f| f.split('.').next()).unwrap_or("Imported Curl Commands");
3537
3538    match mockforge_core::workspace_import::create_workspace_from_curl(
3539        import_result,
3540        Some(workspace_name.to_string()),
3541    ) {
3542        Ok(workspace_result) => {
3543            // Save the workspace to persistent storage
3544            if let Err(e) =
3545                state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3546            {
3547                tracing::error!("Failed to save workspace: {}", e);
3548                return Json(ApiResponse::error(format!(
3549                    "Import succeeded but failed to save workspace: {}",
3550                    e
3551                )));
3552            }
3553
3554            // Record successful import
3555            let entry = ImportHistoryEntry {
3556                id: Uuid::new_v4().to_string(),
3557                format: "curl".to_string(),
3558                timestamp: chrono::Utc::now(),
3559                routes_count: workspace_result.request_count,
3560                variables_count: 0, // Curl doesn't have variables
3561                warnings_count: workspace_result.warnings.len(),
3562                success: true,
3563                filename: filename.map(|s| s.to_string()),
3564                environment: None,
3565                base_url: base_url.map(|s| s.to_string()),
3566                error_message: None,
3567            };
3568            let mut history = state.import_history.write().await;
3569            history.push(entry);
3570
3571            Json(ApiResponse::success(format!(
3572                "Successfully imported {} routes into workspace '{}'",
3573                workspace_result.request_count, workspace_name
3574            )))
3575        }
3576        Err(e) => {
3577            // Record failed import
3578            let entry = ImportHistoryEntry {
3579                id: Uuid::new_v4().to_string(),
3580                format: "curl".to_string(),
3581                timestamp: chrono::Utc::now(),
3582                routes_count: 0,
3583                variables_count: 0,
3584                warnings_count: 0,
3585                success: false,
3586                filename: filename.map(|s| s.to_string()),
3587                environment: None,
3588                base_url: base_url.map(|s| s.to_string()),
3589                error_message: Some(e.to_string()),
3590            };
3591            let mut history = state.import_history.write().await;
3592            history.push(entry);
3593
3594            Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3595        }
3596    }
3597}
3598
3599pub async fn preview_import(
3600    State(_state): State<AdminState>,
3601    Json(request): Json<serde_json::Value>,
3602) -> Json<ApiResponse<serde_json::Value>> {
3603    use mockforge_core::import::{
3604        import_curl_commands, import_insomnia_export, import_postman_collection,
3605    };
3606
3607    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3608    let filename = request.get("filename").and_then(|v| v.as_str());
3609    let environment = request.get("environment").and_then(|v| v.as_str());
3610    let base_url = request.get("base_url").and_then(|v| v.as_str());
3611
3612    // Detect format from filename or content
3613    let format = if let Some(fname) = filename {
3614        if fname.to_lowercase().contains("postman")
3615            || fname.to_lowercase().ends_with(".postman_collection")
3616        {
3617            "postman"
3618        } else if fname.to_lowercase().contains("insomnia")
3619            || fname.to_lowercase().ends_with(".insomnia")
3620        {
3621            "insomnia"
3622        } else if fname.to_lowercase().contains("curl")
3623            || fname.to_lowercase().ends_with(".sh")
3624            || fname.to_lowercase().ends_with(".curl")
3625        {
3626            "curl"
3627        } else {
3628            "unknown"
3629        }
3630    } else {
3631        "unknown"
3632    };
3633
3634    match format {
3635        "postman" => match import_postman_collection(content, base_url) {
3636            Ok(import_result) => {
3637                let routes: Vec<serde_json::Value> = import_result
3638                    .routes
3639                    .into_iter()
3640                    .map(|route| {
3641                        serde_json::json!({
3642                            "method": route.method,
3643                            "path": route.path,
3644                            "headers": route.headers,
3645                            "body": route.body,
3646                            "status_code": route.response.status,
3647                            "response": serde_json::json!({
3648                                "status": route.response.status,
3649                                "headers": route.response.headers,
3650                                "body": route.response.body
3651                            })
3652                        })
3653                    })
3654                    .collect();
3655
3656                let response = serde_json::json!({
3657                    "routes": routes,
3658                    "variables": import_result.variables,
3659                    "warnings": import_result.warnings
3660                });
3661
3662                Json(ApiResponse::success(response))
3663            }
3664            Err(e) => Json(ApiResponse::error(format!("Postman import failed: {}", e))),
3665        },
3666        "insomnia" => match import_insomnia_export(content, environment) {
3667            Ok(import_result) => {
3668                let routes: Vec<serde_json::Value> = import_result
3669                    .routes
3670                    .into_iter()
3671                    .map(|route| {
3672                        serde_json::json!({
3673                            "method": route.method,
3674                            "path": route.path,
3675                            "headers": route.headers,
3676                            "body": route.body,
3677                            "status_code": route.response.status,
3678                            "response": serde_json::json!({
3679                                "status": route.response.status,
3680                                "headers": route.response.headers,
3681                                "body": route.response.body
3682                            })
3683                        })
3684                    })
3685                    .collect();
3686
3687                let response = serde_json::json!({
3688                    "routes": routes,
3689                    "variables": import_result.variables,
3690                    "warnings": import_result.warnings
3691                });
3692
3693                Json(ApiResponse::success(response))
3694            }
3695            Err(e) => Json(ApiResponse::error(format!("Insomnia import failed: {}", e))),
3696        },
3697        "curl" => match import_curl_commands(content, base_url) {
3698            Ok(import_result) => {
3699                let routes: Vec<serde_json::Value> = import_result
3700                    .routes
3701                    .into_iter()
3702                    .map(|route| {
3703                        serde_json::json!({
3704                            "method": route.method,
3705                            "path": route.path,
3706                            "headers": route.headers,
3707                            "body": route.body,
3708                            "status_code": route.response.status,
3709                            "response": serde_json::json!({
3710                                "status": route.response.status,
3711                                "headers": route.response.headers,
3712                                "body": route.response.body
3713                            })
3714                        })
3715                    })
3716                    .collect();
3717
3718                let response = serde_json::json!({
3719                    "routes": routes,
3720                    "variables": serde_json::json!({}),
3721                    "warnings": import_result.warnings
3722                });
3723
3724                Json(ApiResponse::success(response))
3725            }
3726            Err(e) => Json(ApiResponse::error(format!("Curl import failed: {}", e))),
3727        },
3728        _ => Json(ApiResponse::error("Unsupported import format".to_string())),
3729    }
3730}
3731
3732pub async fn get_import_history(
3733    State(state): State<AdminState>,
3734) -> Json<ApiResponse<serde_json::Value>> {
3735    let history = state.import_history.read().await;
3736    let total = history.len();
3737
3738    let imports: Vec<serde_json::Value> = history
3739        .iter()
3740        .rev()
3741        .take(50)
3742        .map(|entry| {
3743            serde_json::json!({
3744                "id": entry.id,
3745                "format": entry.format,
3746                "timestamp": entry.timestamp.to_rfc3339(),
3747                "routes_count": entry.routes_count,
3748                "variables_count": entry.variables_count,
3749                "warnings_count": entry.warnings_count,
3750                "success": entry.success,
3751                "filename": entry.filename,
3752                "environment": entry.environment,
3753                "base_url": entry.base_url,
3754                "error_message": entry.error_message
3755            })
3756        })
3757        .collect();
3758
3759    let response = serde_json::json!({
3760        "imports": imports,
3761        "total": total
3762    });
3763
3764    Json(ApiResponse::success(response))
3765}
3766
3767pub async fn get_admin_api_state(
3768    State(_state): State<AdminState>,
3769) -> Json<ApiResponse<serde_json::Value>> {
3770    Json(ApiResponse::success(serde_json::json!({
3771        "status": "active"
3772    })))
3773}
3774
3775pub async fn get_admin_api_replay(
3776    State(_state): State<AdminState>,
3777) -> Json<ApiResponse<serde_json::Value>> {
3778    Json(ApiResponse::success(serde_json::json!({
3779        "replay": []
3780    })))
3781}
3782
3783pub async fn get_sse_status(
3784    State(_state): State<AdminState>,
3785) -> Json<ApiResponse<serde_json::Value>> {
3786    Json(ApiResponse::success(serde_json::json!({
3787        "available": true,
3788        "endpoint": "/sse",
3789        "config": {
3790            "event_type": "status",
3791            "interval_ms": 1000,
3792            "data_template": "{}"
3793        }
3794    })))
3795}
3796
3797pub async fn get_sse_connections(
3798    State(_state): State<AdminState>,
3799) -> Json<ApiResponse<serde_json::Value>> {
3800    Json(ApiResponse::success(serde_json::json!({
3801        "active_connections": 0
3802    })))
3803}
3804
3805// Workspace management functions
3806pub async fn get_workspaces(
3807    State(_state): State<AdminState>,
3808) -> Json<ApiResponse<Vec<serde_json::Value>>> {
3809    Json(ApiResponse::success(vec![]))
3810}
3811
3812pub async fn create_workspace(
3813    State(_state): State<AdminState>,
3814    Json(_request): Json<serde_json::Value>,
3815) -> Json<ApiResponse<String>> {
3816    Json(ApiResponse::success("Workspace created".to_string()))
3817}
3818
3819pub async fn open_workspace_from_directory(
3820    State(_state): State<AdminState>,
3821    Json(_request): Json<serde_json::Value>,
3822) -> Json<ApiResponse<String>> {
3823    Json(ApiResponse::success("Workspace opened from directory".to_string()))
3824}
3825
3826// Reality Slider API handlers
3827
3828/// Get current reality level
3829pub async fn get_reality_level(
3830    State(state): State<AdminState>,
3831) -> Json<ApiResponse<serde_json::Value>> {
3832    let engine = state.reality_engine.read().await;
3833    let level = engine.get_level().await;
3834    let config = engine.get_config().await;
3835
3836    Json(ApiResponse::success(serde_json::json!({
3837        "level": level.value(),
3838        "level_name": level.name(),
3839        "description": level.description(),
3840        "chaos": {
3841            "enabled": config.chaos.enabled,
3842            "error_rate": config.chaos.error_rate,
3843            "delay_rate": config.chaos.delay_rate,
3844        },
3845        "latency": {
3846            "base_ms": config.latency.base_ms,
3847            "jitter_ms": config.latency.jitter_ms,
3848        },
3849        "mockai": {
3850            "enabled": config.mockai.enabled,
3851        },
3852    })))
3853}
3854
3855/// Set reality level
3856#[derive(Deserialize)]
3857pub struct SetRealityLevelRequest {
3858    level: u8,
3859}
3860
3861pub async fn set_reality_level(
3862    State(state): State<AdminState>,
3863    Json(request): Json<SetRealityLevelRequest>,
3864) -> Json<ApiResponse<serde_json::Value>> {
3865    let level = match mockforge_core::RealityLevel::from_value(request.level) {
3866        Some(l) => l,
3867        None => {
3868            return Json(ApiResponse::error(format!(
3869                "Invalid reality level: {}. Must be between 1 and 5.",
3870                request.level
3871            )));
3872        }
3873    };
3874
3875    // Update reality engine
3876    let engine = state.reality_engine.write().await;
3877    engine.set_level(level).await;
3878    let config = engine.get_config().await;
3879    drop(engine); // Release lock early
3880
3881    // Apply hot-reload updates to subsystems
3882    let mut update_errors = Vec::new();
3883
3884    // Update chaos config if available
3885    if let Some(ref chaos_api_state) = state.chaos_api_state {
3886        let mut chaos_config = chaos_api_state.config.write().await;
3887
3888        // Convert reality config to chaos config using helper function
3889        // This ensures proper mapping of all fields
3890        use mockforge_chaos::config::{FaultInjectionConfig, LatencyConfig};
3891
3892        let latency_config = if config.latency.base_ms > 0 {
3893            Some(LatencyConfig {
3894                enabled: true,
3895                fixed_delay_ms: Some(config.latency.base_ms),
3896                random_delay_range_ms: config
3897                    .latency
3898                    .max_ms
3899                    .map(|max| (config.latency.min_ms, max)),
3900                jitter_percent: if config.latency.jitter_ms > 0 {
3901                    (config.latency.jitter_ms as f64 / config.latency.base_ms as f64).min(1.0)
3902                } else {
3903                    0.0
3904                },
3905                probability: 1.0,
3906            })
3907        } else {
3908            None
3909        };
3910
3911        let fault_injection_config = if config.chaos.enabled {
3912            Some(FaultInjectionConfig {
3913                enabled: true,
3914                http_errors: config.chaos.status_codes.clone(),
3915                http_error_probability: config.chaos.error_rate,
3916                connection_errors: false,
3917                connection_error_probability: 0.0,
3918                timeout_errors: config.chaos.inject_timeouts,
3919                timeout_ms: config.chaos.timeout_ms,
3920                timeout_probability: if config.chaos.inject_timeouts {
3921                    config.chaos.error_rate
3922                } else {
3923                    0.0
3924                },
3925                partial_responses: false,
3926                partial_response_probability: 0.0,
3927                payload_corruption: false,
3928                payload_corruption_probability: 0.0,
3929                corruption_type: mockforge_chaos::config::CorruptionType::None,
3930                error_pattern: Some(mockforge_chaos::config::ErrorPattern::Random {
3931                    probability: config.chaos.error_rate,
3932                }),
3933                mockai_enabled: false,
3934            })
3935        } else {
3936            None
3937        };
3938
3939        // Update chaos config from converted config
3940        chaos_config.enabled = config.chaos.enabled;
3941        chaos_config.latency = latency_config;
3942        chaos_config.fault_injection = fault_injection_config;
3943
3944        drop(chaos_config);
3945        tracing::info!("✅ Updated chaos config for reality level {}", level.value());
3946
3947        // Update middleware injectors if middleware is accessible
3948        // Note: The middleware reads from shared config, so injectors will be updated
3949        // on next request, but we can also trigger an update if middleware is stored
3950        // For now, the middleware reads config directly, so this is sufficient
3951    }
3952
3953    // Update latency injector if available
3954    if let Some(ref latency_injector) = state.latency_injector {
3955        match mockforge_core::latency::LatencyInjector::update_profile_async(
3956            latency_injector,
3957            config.latency.clone(),
3958        )
3959        .await
3960        {
3961            Ok(_) => {
3962                tracing::info!("✅ Updated latency injector for reality level {}", level.value());
3963            }
3964            Err(e) => {
3965                let error_msg = format!("Failed to update latency injector: {}", e);
3966                tracing::warn!("{}", error_msg);
3967                update_errors.push(error_msg);
3968            }
3969        }
3970    }
3971
3972    // Update MockAI if available
3973    if let Some(ref mockai) = state.mockai {
3974        match mockforge_core::intelligent_behavior::MockAI::update_config_async(
3975            mockai,
3976            config.mockai.clone(),
3977        )
3978        .await
3979        {
3980            Ok(_) => {
3981                tracing::info!("✅ Updated MockAI config for reality level {}", level.value());
3982            }
3983            Err(e) => {
3984                let error_msg = format!("Failed to update MockAI: {}", e);
3985                tracing::warn!("{}", error_msg);
3986                update_errors.push(error_msg);
3987            }
3988        }
3989    }
3990
3991    // Build response
3992    let mut response = serde_json::json!({
3993        "level": level.value(),
3994        "level_name": level.name(),
3995        "description": level.description(),
3996        "chaos": {
3997            "enabled": config.chaos.enabled,
3998            "error_rate": config.chaos.error_rate,
3999            "delay_rate": config.chaos.delay_rate,
4000        },
4001        "latency": {
4002            "base_ms": config.latency.base_ms,
4003            "jitter_ms": config.latency.jitter_ms,
4004        },
4005        "mockai": {
4006            "enabled": config.mockai.enabled,
4007        },
4008    });
4009
4010    // Add warnings if any updates failed
4011    if !update_errors.is_empty() {
4012        response["warnings"] = serde_json::json!(update_errors);
4013        tracing::warn!(
4014            "Reality level updated to {} but some subsystems failed to update: {:?}",
4015            level.value(),
4016            update_errors
4017        );
4018    } else {
4019        tracing::info!(
4020            "✅ Reality level successfully updated to {} (hot-reload applied)",
4021            level.value()
4022        );
4023    }
4024
4025    Json(ApiResponse::success(response))
4026}
4027
4028/// List all available reality presets
4029pub async fn list_reality_presets(
4030    State(state): State<AdminState>,
4031) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4032    let persistence = &state.workspace_persistence;
4033    match persistence.list_reality_presets().await {
4034        Ok(preset_paths) => {
4035            let presets: Vec<serde_json::Value> = preset_paths
4036                .iter()
4037                .map(|path| {
4038                    serde_json::json!({
4039                        "id": path.file_name().and_then(|n| n.to_str()).unwrap_or("unknown"),
4040                        "path": path.to_string_lossy(),
4041                        "name": path.file_stem().and_then(|n| n.to_str()).unwrap_or("unknown"),
4042                    })
4043                })
4044                .collect();
4045            Json(ApiResponse::success(presets))
4046        }
4047        Err(e) => Json(ApiResponse::error(format!("Failed to list presets: {}", e))),
4048    }
4049}
4050
4051/// Import a reality preset
4052#[derive(Deserialize)]
4053pub struct ImportPresetRequest {
4054    path: String,
4055}
4056
4057pub async fn import_reality_preset(
4058    State(state): State<AdminState>,
4059    Json(request): Json<ImportPresetRequest>,
4060) -> Json<ApiResponse<serde_json::Value>> {
4061    let persistence = &state.workspace_persistence;
4062    let path = std::path::Path::new(&request.path);
4063
4064    match persistence.import_reality_preset(path).await {
4065        Ok(preset) => {
4066            // Apply the preset to the reality engine
4067            let engine = state.reality_engine.write().await;
4068            engine.apply_preset(preset.clone()).await;
4069
4070            Json(ApiResponse::success(serde_json::json!({
4071                "name": preset.name,
4072                "description": preset.description,
4073                "level": preset.config.level.value(),
4074                "level_name": preset.config.level.name(),
4075            })))
4076        }
4077        Err(e) => Json(ApiResponse::error(format!("Failed to import preset: {}", e))),
4078    }
4079}
4080
4081/// Export current reality configuration as a preset
4082#[derive(Deserialize)]
4083pub struct ExportPresetRequest {
4084    name: String,
4085    description: Option<String>,
4086}
4087
4088pub async fn export_reality_preset(
4089    State(state): State<AdminState>,
4090    Json(request): Json<ExportPresetRequest>,
4091) -> Json<ApiResponse<serde_json::Value>> {
4092    let engine = state.reality_engine.read().await;
4093    let preset = engine.create_preset(request.name.clone(), request.description.clone()).await;
4094
4095    let persistence = &state.workspace_persistence;
4096    let presets_dir = persistence.presets_dir();
4097    let filename = format!("{}.json", request.name.replace(' ', "_").to_lowercase());
4098    let output_path = presets_dir.join(&filename);
4099
4100    match persistence.export_reality_preset(&preset, &output_path).await {
4101        Ok(_) => Json(ApiResponse::success(serde_json::json!({
4102            "name": preset.name,
4103            "description": preset.description,
4104            "path": output_path.to_string_lossy(),
4105            "level": preset.config.level.value(),
4106        }))),
4107        Err(e) => Json(ApiResponse::error(format!("Failed to export preset: {}", e))),
4108    }
4109}
4110
4111// Reality Continuum API handlers
4112
4113/// Get current blend ratio for a path
4114pub async fn get_continuum_ratio(
4115    State(state): State<AdminState>,
4116    Query(params): Query<std::collections::HashMap<String, String>>,
4117) -> Json<ApiResponse<serde_json::Value>> {
4118    let path = params.get("path").cloned().unwrap_or_else(|| "/".to_string());
4119    let engine = state.continuum_engine.read().await;
4120    let ratio = engine.get_blend_ratio(&path).await;
4121    let config = engine.get_config().await;
4122    let enabled = engine.is_enabled().await;
4123
4124    Json(ApiResponse::success(serde_json::json!({
4125        "path": path,
4126        "blend_ratio": ratio,
4127        "enabled": enabled,
4128        "transition_mode": format!("{:?}", config.transition_mode),
4129        "merge_strategy": format!("{:?}", config.merge_strategy),
4130        "default_ratio": config.default_ratio,
4131    })))
4132}
4133
4134/// Set blend ratio for a path
4135#[derive(Deserialize)]
4136pub struct SetContinuumRatioRequest {
4137    path: String,
4138    ratio: f64,
4139}
4140
4141pub async fn set_continuum_ratio(
4142    State(state): State<AdminState>,
4143    Json(request): Json<SetContinuumRatioRequest>,
4144) -> Json<ApiResponse<serde_json::Value>> {
4145    let ratio = request.ratio.clamp(0.0, 1.0);
4146    let engine = state.continuum_engine.read().await;
4147    engine.set_blend_ratio(&request.path, ratio).await;
4148
4149    Json(ApiResponse::success(serde_json::json!({
4150        "path": request.path,
4151        "blend_ratio": ratio,
4152    })))
4153}
4154
4155/// Get time schedule
4156pub async fn get_continuum_schedule(
4157    State(state): State<AdminState>,
4158) -> Json<ApiResponse<serde_json::Value>> {
4159    let engine = state.continuum_engine.read().await;
4160    let schedule = engine.get_time_schedule().await;
4161
4162    match schedule {
4163        Some(s) => Json(ApiResponse::success(serde_json::json!({
4164            "start_time": s.start_time.to_rfc3339(),
4165            "end_time": s.end_time.to_rfc3339(),
4166            "start_ratio": s.start_ratio,
4167            "end_ratio": s.end_ratio,
4168            "curve": format!("{:?}", s.curve),
4169            "duration_days": s.duration().num_days(),
4170        }))),
4171        None => Json(ApiResponse::success(serde_json::json!(null))),
4172    }
4173}
4174
4175/// Update time schedule
4176#[derive(Deserialize)]
4177pub struct SetContinuumScheduleRequest {
4178    start_time: String,
4179    end_time: String,
4180    start_ratio: f64,
4181    end_ratio: f64,
4182    curve: Option<String>,
4183}
4184
4185pub async fn set_continuum_schedule(
4186    State(state): State<AdminState>,
4187    Json(request): Json<SetContinuumScheduleRequest>,
4188) -> Json<ApiResponse<serde_json::Value>> {
4189    let start_time = chrono::DateTime::parse_from_rfc3339(&request.start_time)
4190        .map_err(|e| format!("Invalid start_time: {}", e))
4191        .map(|dt| dt.with_timezone(&chrono::Utc));
4192
4193    let end_time = chrono::DateTime::parse_from_rfc3339(&request.end_time)
4194        .map_err(|e| format!("Invalid end_time: {}", e))
4195        .map(|dt| dt.with_timezone(&chrono::Utc));
4196
4197    match (start_time, end_time) {
4198        (Ok(start), Ok(end)) => {
4199            let curve = request
4200                .curve
4201                .as_deref()
4202                .map(|c| match c {
4203                    "linear" => mockforge_core::TransitionCurve::Linear,
4204                    "exponential" => mockforge_core::TransitionCurve::Exponential,
4205                    "sigmoid" => mockforge_core::TransitionCurve::Sigmoid,
4206                    _ => mockforge_core::TransitionCurve::Linear,
4207                })
4208                .unwrap_or(mockforge_core::TransitionCurve::Linear);
4209
4210            let schedule = mockforge_core::TimeSchedule::with_curve(
4211                start,
4212                end,
4213                request.start_ratio.clamp(0.0, 1.0),
4214                request.end_ratio.clamp(0.0, 1.0),
4215                curve,
4216            );
4217
4218            let engine = state.continuum_engine.read().await;
4219            engine.set_time_schedule(schedule.clone()).await;
4220
4221            Json(ApiResponse::success(serde_json::json!({
4222                "start_time": schedule.start_time.to_rfc3339(),
4223                "end_time": schedule.end_time.to_rfc3339(),
4224                "start_ratio": schedule.start_ratio,
4225                "end_ratio": schedule.end_ratio,
4226                "curve": format!("{:?}", schedule.curve),
4227            })))
4228        }
4229        (Err(e), _) | (_, Err(e)) => Json(ApiResponse::error(e)),
4230    }
4231}
4232
4233/// Manually advance blend ratio
4234#[derive(Deserialize)]
4235pub struct AdvanceContinuumRatioRequest {
4236    increment: Option<f64>,
4237}
4238
4239pub async fn advance_continuum_ratio(
4240    State(state): State<AdminState>,
4241    Json(request): Json<AdvanceContinuumRatioRequest>,
4242) -> Json<ApiResponse<serde_json::Value>> {
4243    let increment = request.increment.unwrap_or(0.1);
4244    let engine = state.continuum_engine.read().await;
4245    engine.advance_ratio(increment).await;
4246    let config = engine.get_config().await;
4247
4248    Json(ApiResponse::success(serde_json::json!({
4249        "default_ratio": config.default_ratio,
4250        "increment": increment,
4251    })))
4252}
4253
4254/// Enable or disable continuum
4255#[derive(Deserialize)]
4256pub struct SetContinuumEnabledRequest {
4257    enabled: bool,
4258}
4259
4260pub async fn set_continuum_enabled(
4261    State(state): State<AdminState>,
4262    Json(request): Json<SetContinuumEnabledRequest>,
4263) -> Json<ApiResponse<serde_json::Value>> {
4264    let engine = state.continuum_engine.read().await;
4265    engine.set_enabled(request.enabled).await;
4266
4267    Json(ApiResponse::success(serde_json::json!({
4268        "enabled": request.enabled,
4269    })))
4270}
4271
4272/// Get all manual overrides
4273pub async fn get_continuum_overrides(
4274    State(state): State<AdminState>,
4275) -> Json<ApiResponse<serde_json::Value>> {
4276    let engine = state.continuum_engine.read().await;
4277    let overrides = engine.get_manual_overrides().await;
4278
4279    Json(ApiResponse::success(serde_json::json!(overrides)))
4280}
4281
4282/// Clear all manual overrides
4283pub async fn clear_continuum_overrides(
4284    State(state): State<AdminState>,
4285) -> Json<ApiResponse<serde_json::Value>> {
4286    let engine = state.continuum_engine.read().await;
4287    engine.clear_manual_overrides().await;
4288
4289    Json(ApiResponse::success(serde_json::json!({
4290        "message": "All manual overrides cleared",
4291    })))
4292}
4293
4294pub async fn get_workspace(
4295    State(_state): State<AdminState>,
4296    axum::extract::Path(workspace_id): axum::extract::Path<String>,
4297) -> Json<ApiResponse<serde_json::Value>> {
4298    Json(ApiResponse::success(serde_json::json!({
4299        "workspace": {
4300            "summary": {
4301                "id": workspace_id,
4302                "name": "Mock Workspace",
4303                "description": "A mock workspace"
4304            },
4305            "folders": [],
4306            "requests": []
4307        }
4308    })))
4309}
4310
4311pub async fn delete_workspace(
4312    State(_state): State<AdminState>,
4313    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4314) -> Json<ApiResponse<String>> {
4315    Json(ApiResponse::success("Workspace deleted".to_string()))
4316}
4317
4318pub async fn set_active_workspace(
4319    State(_state): State<AdminState>,
4320    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4321) -> Json<ApiResponse<String>> {
4322    Json(ApiResponse::success("Workspace activated".to_string()))
4323}
4324
4325pub async fn create_folder(
4326    State(_state): State<AdminState>,
4327    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4328    Json(_request): Json<serde_json::Value>,
4329) -> Json<ApiResponse<String>> {
4330    Json(ApiResponse::success("Folder created".to_string()))
4331}
4332
4333pub async fn create_request(
4334    State(_state): State<AdminState>,
4335    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4336    Json(_request): Json<serde_json::Value>,
4337) -> Json<ApiResponse<String>> {
4338    Json(ApiResponse::success("Request created".to_string()))
4339}
4340
4341pub async fn execute_workspace_request(
4342    State(_state): State<AdminState>,
4343    axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
4344    Json(_request): Json<serde_json::Value>,
4345) -> Json<ApiResponse<serde_json::Value>> {
4346    Json(ApiResponse::success(serde_json::json!({
4347        "status": "executed",
4348        "response": {}
4349    })))
4350}
4351
4352pub async fn get_request_history(
4353    State(_state): State<AdminState>,
4354    axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
4355) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4356    Json(ApiResponse::success(vec![]))
4357}
4358
4359pub async fn get_folder(
4360    State(_state): State<AdminState>,
4361    axum::extract::Path((_workspace_id, folder_id)): axum::extract::Path<(String, String)>,
4362) -> Json<ApiResponse<serde_json::Value>> {
4363    Json(ApiResponse::success(serde_json::json!({
4364        "folder": {
4365            "summary": {
4366                "id": folder_id,
4367                "name": "Mock Folder",
4368                "description": "A mock folder"
4369            },
4370            "requests": []
4371        }
4372    })))
4373}
4374
4375pub async fn import_to_workspace(
4376    State(_state): State<AdminState>,
4377    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4378    Json(_request): Json<serde_json::Value>,
4379) -> Json<ApiResponse<String>> {
4380    Json(ApiResponse::success("Import to workspace completed".to_string()))
4381}
4382
4383pub async fn export_workspaces(
4384    State(_state): State<AdminState>,
4385    Json(_request): Json<serde_json::Value>,
4386) -> Json<ApiResponse<String>> {
4387    Json(ApiResponse::success("Workspaces exported".to_string()))
4388}
4389
4390// Environment management functions
4391pub async fn get_environments(
4392    State(_state): State<AdminState>,
4393    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4394) -> Json<ApiResponse<serde_json::Value>> {
4395    // Return a default global environment
4396    let environments = vec![serde_json::json!({
4397        "id": "global",
4398        "name": "Global",
4399        "description": "Global environment variables",
4400        "variable_count": 0,
4401        "is_global": true,
4402        "active": true,
4403        "order": 0
4404    })];
4405
4406    Json(ApiResponse::success(serde_json::json!({
4407        "environments": environments,
4408        "total": 1
4409    })))
4410}
4411
4412pub async fn create_environment(
4413    State(_state): State<AdminState>,
4414    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4415    Json(_request): Json<serde_json::Value>,
4416) -> Json<ApiResponse<String>> {
4417    Json(ApiResponse::success("Environment created".to_string()))
4418}
4419
4420pub async fn update_environment(
4421    State(_state): State<AdminState>,
4422    axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4423    Json(_request): Json<serde_json::Value>,
4424) -> Json<ApiResponse<String>> {
4425    Json(ApiResponse::success("Environment updated".to_string()))
4426}
4427
4428pub async fn delete_environment(
4429    State(_state): State<AdminState>,
4430    axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4431) -> Json<ApiResponse<String>> {
4432    Json(ApiResponse::success("Environment deleted".to_string()))
4433}
4434
4435pub async fn set_active_environment(
4436    State(_state): State<AdminState>,
4437    axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4438) -> Json<ApiResponse<String>> {
4439    Json(ApiResponse::success("Environment activated".to_string()))
4440}
4441
4442pub async fn update_environments_order(
4443    State(_state): State<AdminState>,
4444    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4445    Json(_request): Json<serde_json::Value>,
4446) -> Json<ApiResponse<String>> {
4447    Json(ApiResponse::success("Environment order updated".to_string()))
4448}
4449
4450pub async fn get_environment_variables(
4451    State(_state): State<AdminState>,
4452    axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4453) -> Json<ApiResponse<serde_json::Value>> {
4454    Json(ApiResponse::success(serde_json::json!({
4455        "variables": []
4456    })))
4457}
4458
4459pub async fn set_environment_variable(
4460    State(_state): State<AdminState>,
4461    axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4462    Json(_request): Json<serde_json::Value>,
4463) -> Json<ApiResponse<String>> {
4464    Json(ApiResponse::success("Environment variable set".to_string()))
4465}
4466
4467pub async fn remove_environment_variable(
4468    State(_state): State<AdminState>,
4469    axum::extract::Path((_workspace_id, _environment_id, _variable_name)): axum::extract::Path<(
4470        String,
4471        String,
4472        String,
4473    )>,
4474) -> Json<ApiResponse<String>> {
4475    Json(ApiResponse::success("Environment variable removed".to_string()))
4476}
4477
4478// Autocomplete functions
4479pub async fn get_autocomplete_suggestions(
4480    State(_state): State<AdminState>,
4481    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4482    Json(_request): Json<serde_json::Value>,
4483) -> Json<ApiResponse<serde_json::Value>> {
4484    Json(ApiResponse::success(serde_json::json!({
4485        "suggestions": [],
4486        "start_position": 0,
4487        "end_position": 0
4488    })))
4489}
4490
4491// Sync management functions
4492pub async fn get_sync_status(
4493    State(_state): State<AdminState>,
4494    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4495) -> Json<ApiResponse<serde_json::Value>> {
4496    Json(ApiResponse::success(serde_json::json!({
4497        "status": "disabled"
4498    })))
4499}
4500
4501pub async fn configure_sync(
4502    State(_state): State<AdminState>,
4503    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4504    Json(_request): Json<serde_json::Value>,
4505) -> Json<ApiResponse<String>> {
4506    Json(ApiResponse::success("Sync configured".to_string()))
4507}
4508
4509pub async fn disable_sync(
4510    State(_state): State<AdminState>,
4511    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4512) -> Json<ApiResponse<String>> {
4513    Json(ApiResponse::success("Sync disabled".to_string()))
4514}
4515
4516pub async fn trigger_sync(
4517    State(_state): State<AdminState>,
4518    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4519) -> Json<ApiResponse<String>> {
4520    Json(ApiResponse::success("Sync triggered".to_string()))
4521}
4522
4523pub async fn get_sync_changes(
4524    State(_state): State<AdminState>,
4525    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4526) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4527    Json(ApiResponse::success(vec![]))
4528}
4529
4530pub async fn confirm_sync_changes(
4531    State(_state): State<AdminState>,
4532    axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4533    Json(_request): Json<serde_json::Value>,
4534) -> Json<ApiResponse<String>> {
4535    Json(ApiResponse::success("Sync changes confirmed".to_string()))
4536}
4537
4538// Plugin management functions
4539pub async fn validate_plugin(
4540    State(_state): State<AdminState>,
4541    Json(_request): Json<serde_json::Value>,
4542) -> Json<ApiResponse<String>> {
4543    Json(ApiResponse::success("Plugin validated".to_string()))
4544}
4545
4546// Missing functions that routes.rs expects
4547pub async fn clear_import_history(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
4548    let mut history = state.import_history.write().await;
4549    history.clear();
4550    Json(ApiResponse::success("Import history cleared".to_string()))
4551}
4552
4553#[cfg(test)]
4554mod tests {
4555    use super::*;
4556
4557    #[test]
4558    fn test_request_metrics_creation() {
4559        use std::collections::HashMap;
4560
4561        let metrics = RequestMetrics {
4562            total_requests: 100,
4563            active_connections: 5,
4564            requests_by_endpoint: HashMap::new(),
4565            response_times: vec![10, 20, 30],
4566            response_times_by_endpoint: HashMap::new(),
4567            errors_by_endpoint: HashMap::new(),
4568            last_request_by_endpoint: HashMap::new(),
4569        };
4570
4571        assert_eq!(metrics.total_requests, 100);
4572        assert_eq!(metrics.active_connections, 5);
4573        assert_eq!(metrics.response_times.len(), 3);
4574    }
4575
4576    #[test]
4577    fn test_system_metrics_creation() {
4578        let metrics = SystemMetrics {
4579            cpu_usage_percent: 45.5,
4580            memory_usage_mb: 100,
4581            active_threads: 10,
4582        };
4583
4584        assert_eq!(metrics.active_threads, 10);
4585        assert!(metrics.cpu_usage_percent > 0.0);
4586        assert_eq!(metrics.memory_usage_mb, 100);
4587    }
4588
4589    #[test]
4590    fn test_time_series_point() {
4591        let point = TimeSeriesPoint {
4592            timestamp: chrono::Utc::now(),
4593            value: 42.5,
4594        };
4595
4596        assert_eq!(point.value, 42.5);
4597    }
4598
4599    #[test]
4600    fn test_restart_status() {
4601        let status = RestartStatus {
4602            in_progress: true,
4603            initiated_at: Some(chrono::Utc::now()),
4604            reason: Some("Manual restart".to_string()),
4605            success: None,
4606        };
4607
4608        assert!(status.in_progress);
4609        assert!(status.reason.is_some());
4610    }
4611
4612    #[test]
4613    fn test_configuration_state() {
4614        use std::collections::HashMap;
4615
4616        let state = ConfigurationState {
4617            latency_profile: crate::models::LatencyProfile {
4618                name: "default".to_string(),
4619                base_ms: 100,
4620                jitter_ms: 10,
4621                tag_overrides: HashMap::new(),
4622            },
4623            fault_config: crate::models::FaultConfig {
4624                enabled: false,
4625                failure_rate: 0.0,
4626                status_codes: vec![],
4627                active_failures: 0,
4628            },
4629            proxy_config: crate::models::ProxyConfig {
4630                enabled: false,
4631                upstream_url: None,
4632                timeout_seconds: 30,
4633                requests_proxied: 0,
4634            },
4635            validation_settings: crate::models::ValidationSettings {
4636                mode: "off".to_string(),
4637                aggregate_errors: false,
4638                validate_responses: false,
4639                overrides: HashMap::new(),
4640            },
4641        };
4642
4643        assert_eq!(state.latency_profile.name, "default");
4644        assert!(!state.fault_config.enabled);
4645        assert!(!state.proxy_config.enabled);
4646    }
4647
4648    #[test]
4649    fn test_admin_state_new() {
4650        let http_addr: std::net::SocketAddr = "127.0.0.1:3000".parse().unwrap();
4651        let state = AdminState::new(
4652            Some(http_addr),
4653            None,
4654            None,
4655            None,
4656            true,
4657            8080,
4658            None,
4659            None,
4660            None,
4661            None,
4662            None,
4663        );
4664
4665        assert_eq!(state.http_server_addr, Some(http_addr));
4666        assert!(state.api_enabled);
4667        assert_eq!(state.admin_port, 8080);
4668    }
4669}