Skip to main content

mockforge_ui/
handlers.rs

1//! Request handlers for the admin UI
2//!
3//! This module has been refactored into sub-modules for better organization:
4//! - assets: Static asset serving
5//! - admin: Admin dashboard and server management
6//! - workspace: Workspace management operations
7//! - plugin: Plugin management operations
8//! - sync: Synchronization operations
9//! - import: Data import operations
10//! - fixtures: Fixture management operations
11
12use axum::{
13    extract::{Path, Query, State},
14    http::{self, StatusCode},
15    response::{
16        sse::{Event, Sse},
17        Html, IntoResponse, Json,
18    },
19};
20use chrono::Utc;
21use futures_util::stream::{self, Stream};
22use mockforge_core::{Error, Result};
23use mockforge_plugin_loader::PluginRegistry;
24use serde::{Deserialize, Serialize};
25use serde_json::json;
26use std::collections::HashMap;
27use std::convert::Infallible;
28use std::process::Command;
29use std::process::Stdio;
30use std::sync::Arc;
31use std::time::Duration;
32use sysinfo::System;
33use tokio::sync::RwLock;
34
35// Import all types from models
36use crate::models::{
37    ApiResponse, ConfigUpdate, DashboardData, DashboardSystemInfo, FaultConfig, HealthCheck,
38    LatencyProfile, LogFilter, MetricsData, ProxyConfig, RequestLog, RouteInfo, ServerInfo,
39    ServerStatus, SimpleMetricsData, SystemInfo, TrafficShapingConfig, ValidationSettings,
40    ValidationUpdate,
41};
42
43// Import import types from core
44use mockforge_core::workspace_import::{ImportResponse, ImportRoute};
45use mockforge_plugin_loader::{
46    GitPluginConfig, GitPluginLoader, PluginLoader, PluginLoaderConfig, PluginSource,
47    RemotePluginConfig, RemotePluginLoader,
48};
49
50// Handler sub-modules
51pub mod admin;
52pub mod ai_studio;
53pub mod analytics;
54pub mod analytics_stream;
55pub mod analytics_v2;
56pub mod assets;
57pub mod behavioral_cloning;
58pub mod chains;
59pub mod chaos_api;
60pub mod community;
61pub mod contract_diff;
62pub mod coverage_metrics;
63pub mod failure_analysis;
64pub mod federation_api;
65pub mod graph;
66pub mod health;
67pub mod migration;
68pub mod pillar_analytics;
69pub mod playground;
70pub mod plugin;
71pub mod promotions;
72pub mod protocol_contracts;
73pub mod recorder_api;
74pub mod vbr_api;
75pub mod verification;
76pub mod voice;
77pub mod workspaces;
78pub mod world_state_proxy;
79
80// Re-export commonly used types
81pub use assets::*;
82pub use chains::*;
83pub use graph::*;
84pub use migration::*;
85pub use plugin::*;
86
87// Import workspace persistence
88use mockforge_core::workspace_import::WorkspaceImportConfig;
89use mockforge_core::workspace_persistence::WorkspacePersistence;
90
91/// Request metrics for tracking
92#[derive(Debug, Clone, Default)]
93pub struct RequestMetrics {
94    /// Total requests served
95    pub total_requests: u64,
96    /// Active connections
97    pub active_connections: u64,
98    /// Requests by endpoint
99    pub requests_by_endpoint: HashMap<String, u64>,
100    /// Response times (last N measurements)
101    pub response_times: Vec<u64>,
102    /// Response times by endpoint (last N measurements per endpoint)
103    pub response_times_by_endpoint: HashMap<String, Vec<u64>>,
104    /// Error count by endpoint
105    pub errors_by_endpoint: HashMap<String, u64>,
106    /// Last request timestamp by endpoint
107    pub last_request_by_endpoint: HashMap<String, chrono::DateTime<Utc>>,
108}
109
110/// System metrics
111#[derive(Debug, Clone)]
112pub struct SystemMetrics {
113    /// Memory usage in MB
114    pub memory_usage_mb: u64,
115    /// CPU usage percentage
116    pub cpu_usage_percent: f64,
117    /// Active threads
118    pub active_threads: u32,
119}
120
121/// Time series data point
122#[derive(Debug, Clone)]
123pub struct TimeSeriesPoint {
124    /// Timestamp
125    pub timestamp: chrono::DateTime<Utc>,
126    /// Value
127    pub value: f64,
128}
129
130/// Time series data for tracking metrics over time
131#[derive(Debug, Clone, Default)]
132pub struct TimeSeriesData {
133    /// Memory usage over time
134    pub memory_usage: Vec<TimeSeriesPoint>,
135    /// CPU usage over time
136    pub cpu_usage: Vec<TimeSeriesPoint>,
137    /// Request count over time
138    pub request_count: Vec<TimeSeriesPoint>,
139    /// Response time over time
140    pub response_time: Vec<TimeSeriesPoint>,
141}
142
143/// Restart status tracking
144#[derive(Debug, Clone, Serialize, Deserialize)]
145pub struct RestartStatus {
146    /// Whether a restart is currently in progress
147    pub in_progress: bool,
148    /// Timestamp when restart was initiated
149    pub initiated_at: Option<chrono::DateTime<Utc>>,
150    /// Restart reason/message
151    pub reason: Option<String>,
152    /// Whether restart was successful
153    pub success: Option<bool>,
154}
155
156/// Fixture metadata
157#[derive(Debug, Clone, Serialize, Deserialize)]
158pub struct FixtureInfo {
159    /// Unique identifier for the fixture
160    pub id: String,
161    /// Protocol type (http, websocket, grpc)
162    pub protocol: String,
163    /// HTTP method or operation type
164    pub method: String,
165    /// Request path
166    pub path: String,
167    /// When the fixture was saved
168    pub saved_at: chrono::DateTime<Utc>,
169    /// File size in bytes
170    pub file_size: u64,
171    /// File path relative to fixtures directory
172    pub file_path: String,
173    /// Request fingerprint hash
174    pub fingerprint: String,
175    /// Additional metadata from the fixture file
176    pub metadata: serde_json::Value,
177}
178
179/// Smoke test result
180#[derive(Debug, Clone, Serialize, Deserialize)]
181pub struct SmokeTestResult {
182    /// Test ID
183    pub id: String,
184    /// Test name
185    pub name: String,
186    /// HTTP method
187    pub method: String,
188    /// Request path
189    pub path: String,
190    /// Test description
191    pub description: String,
192    /// When the test was last run
193    pub last_run: Option<chrono::DateTime<Utc>>,
194    /// Test status (passed, failed, running, pending)
195    pub status: String,
196    /// Response time in milliseconds
197    pub response_time_ms: Option<u64>,
198    /// Error message if test failed
199    pub error_message: Option<String>,
200    /// HTTP status code received
201    pub status_code: Option<u16>,
202    /// Test duration in seconds
203    pub duration_seconds: Option<f64>,
204}
205
206/// Smoke test execution context
207#[derive(Debug, Clone)]
208pub struct SmokeTestContext {
209    /// Base URL for the service being tested
210    pub base_url: String,
211    /// Timeout for individual tests
212    pub timeout_seconds: u64,
213    /// Whether to run tests in parallel
214    pub parallel: bool,
215}
216
217/// Configuration state
218#[derive(Debug, Clone, Serialize)]
219pub struct ConfigurationState {
220    /// Latency profile
221    pub latency_profile: LatencyProfile,
222    /// Fault configuration
223    pub fault_config: FaultConfig,
224    /// Proxy configuration
225    pub proxy_config: ProxyConfig,
226    /// Validation settings
227    pub validation_settings: ValidationSettings,
228    /// Traffic shaping settings
229    pub traffic_shaping: TrafficShapingConfig,
230}
231
232/// Import history entry
233#[derive(Debug, Clone, Serialize, Deserialize)]
234pub struct ImportHistoryEntry {
235    /// Unique ID for the import
236    pub id: String,
237    /// Import format (postman, insomnia, curl)
238    pub format: String,
239    /// Timestamp of the import
240    pub timestamp: chrono::DateTime<Utc>,
241    /// Number of routes imported
242    pub routes_count: usize,
243    /// Number of variables imported
244    pub variables_count: usize,
245    /// Number of warnings
246    pub warnings_count: usize,
247    /// Whether the import was successful
248    pub success: bool,
249    /// Filename of the imported file
250    pub filename: Option<String>,
251    /// Environment used
252    pub environment: Option<String>,
253    /// Base URL used
254    pub base_url: Option<String>,
255    /// Error message if failed
256    pub error_message: Option<String>,
257}
258
259/// Shared state for the admin UI
260#[derive(Clone)]
261pub struct AdminState {
262    /// HTTP server address
263    pub http_server_addr: Option<std::net::SocketAddr>,
264    /// WebSocket server address
265    pub ws_server_addr: Option<std::net::SocketAddr>,
266    /// gRPC server address
267    pub grpc_server_addr: Option<std::net::SocketAddr>,
268    /// GraphQL server address
269    pub graphql_server_addr: Option<std::net::SocketAddr>,
270    /// Whether API endpoints are enabled
271    pub api_enabled: bool,
272    /// Admin server port
273    pub admin_port: u16,
274    /// Start time
275    pub start_time: chrono::DateTime<Utc>,
276    /// Request metrics (protected by RwLock)
277    pub metrics: Arc<RwLock<RequestMetrics>>,
278    /// System metrics (protected by RwLock)
279    pub system_metrics: Arc<RwLock<SystemMetrics>>,
280    /// Configuration (protected by RwLock)
281    pub config: Arc<RwLock<ConfigurationState>>,
282    /// Request logs (protected by RwLock)
283    pub logs: Arc<RwLock<Vec<RequestLog>>>,
284    /// Time series data (protected by RwLock)
285    pub time_series: Arc<RwLock<TimeSeriesData>>,
286    /// Restart status (protected by RwLock)
287    pub restart_status: Arc<RwLock<RestartStatus>>,
288    /// Smoke test results (protected by RwLock)
289    pub smoke_test_results: Arc<RwLock<Vec<SmokeTestResult>>>,
290    /// Import history (protected by RwLock)
291    pub import_history: Arc<RwLock<Vec<ImportHistoryEntry>>>,
292    /// Workspace persistence
293    pub workspace_persistence: Arc<WorkspacePersistence>,
294    /// Plugin registry (protected by RwLock)
295    pub plugin_registry: Arc<RwLock<PluginRegistry>>,
296    /// Reality engine for managing realism levels
297    pub reality_engine: Arc<RwLock<mockforge_core::RealityEngine>>,
298    /// Reality Continuum engine for blending mock and real data sources
299    pub continuum_engine: Arc<RwLock<mockforge_core::RealityContinuumEngine>>,
300    /// Chaos API state for hot-reload support (optional)
301    /// Contains config that can be updated at runtime
302    pub chaos_api_state: Option<Arc<mockforge_chaos::api::ChaosApiState>>,
303    /// Latency injector for HTTP middleware (optional)
304    /// Allows updating latency profile at runtime
305    pub latency_injector: Option<Arc<RwLock<mockforge_core::latency::LatencyInjector>>>,
306    /// MockAI instance (optional)
307    /// Allows updating MockAI configuration at runtime
308    pub mockai: Option<Arc<RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
309    /// Traffic recorder (optional)
310    pub recorder: Option<Arc<mockforge_recorder::Recorder>>,
311    /// Federation instance (optional)
312    pub federation: Option<Arc<mockforge_federation::Federation>>,
313    /// VBR engine (optional)
314    pub vbr_engine: Option<Arc<mockforge_vbr::VbrEngine>>,
315}
316
317impl AdminState {
318    /// Start system monitoring background task
319    pub async fn start_system_monitoring(&self) {
320        let state_clone = self.clone();
321        tokio::spawn(async move {
322            let mut sys = System::new_all();
323            let mut refresh_count = 0u64;
324
325            tracing::info!("Starting system monitoring background task");
326
327            loop {
328                // Refresh system information
329                sys.refresh_all();
330
331                // Get CPU usage
332                let cpu_usage = sys.global_cpu_usage();
333
334                // Get memory usage
335                let _total_memory = sys.total_memory() as f64;
336                let used_memory = sys.used_memory() as f64;
337                let memory_usage_mb = used_memory / 1024.0 / 1024.0;
338
339                // Get thread count (use available CPU cores as approximate measure)
340                let active_threads = sys.cpus().len() as u32;
341
342                // Update system metrics
343                let memory_mb_u64 = memory_usage_mb as u64;
344
345                // Only log every 10 refreshes to avoid spam
346                if refresh_count.is_multiple_of(10) {
347                    tracing::debug!(
348                        "System metrics updated: CPU={:.1}%, Mem={}MB, Threads={}",
349                        cpu_usage,
350                        memory_mb_u64,
351                        active_threads
352                    );
353                }
354
355                state_clone
356                    .update_system_metrics(memory_mb_u64, cpu_usage as f64, active_threads)
357                    .await;
358
359                refresh_count += 1;
360
361                // Sleep for 10 seconds between updates
362                tokio::time::sleep(Duration::from_secs(10)).await;
363            }
364        });
365    }
366
367    /// Create new admin state
368    ///
369    /// # Arguments
370    /// * `http_server_addr` - HTTP server address
371    /// * `ws_server_addr` - WebSocket server address
372    /// * `grpc_server_addr` - gRPC server address
373    /// * `graphql_server_addr` - GraphQL server address
374    /// * `api_enabled` - Whether API endpoints are enabled
375    /// * `admin_port` - Admin server port
376    /// * `chaos_api_state` - Optional chaos API state for hot-reload support
377    /// * `latency_injector` - Optional latency injector for hot-reload support
378    /// * `mockai` - Optional MockAI instance for hot-reload support
379    /// * `continuum_config` - Optional Reality Continuum configuration
380    /// * `virtual_clock` - Optional virtual clock for time-based progression
381    /// * `recorder` - Optional traffic recorder
382    /// * `federation` - Optional federation instance
383    /// * `vbr_engine` - Optional VBR engine
384    #[allow(clippy::too_many_arguments)]
385    pub fn new(
386        http_server_addr: Option<std::net::SocketAddr>,
387        ws_server_addr: Option<std::net::SocketAddr>,
388        grpc_server_addr: Option<std::net::SocketAddr>,
389        graphql_server_addr: Option<std::net::SocketAddr>,
390        api_enabled: bool,
391        admin_port: u16,
392        chaos_api_state: Option<Arc<mockforge_chaos::api::ChaosApiState>>,
393        latency_injector: Option<Arc<RwLock<mockforge_core::latency::LatencyInjector>>>,
394        mockai: Option<Arc<RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
395        continuum_config: Option<mockforge_core::ContinuumConfig>,
396        virtual_clock: Option<Arc<mockforge_core::VirtualClock>>,
397        recorder: Option<Arc<mockforge_recorder::Recorder>>,
398        federation: Option<Arc<mockforge_federation::Federation>>,
399        vbr_engine: Option<Arc<mockforge_vbr::VbrEngine>>,
400    ) -> Self {
401        let start_time = Utc::now();
402
403        Self {
404            http_server_addr,
405            ws_server_addr,
406            grpc_server_addr,
407            graphql_server_addr,
408            api_enabled,
409            admin_port,
410            start_time,
411            metrics: Arc::new(RwLock::new(RequestMetrics::default())),
412            system_metrics: Arc::new(RwLock::new(SystemMetrics {
413                memory_usage_mb: 0,
414                cpu_usage_percent: 0.0,
415                active_threads: 0,
416            })),
417            config: Arc::new(RwLock::new(ConfigurationState {
418                latency_profile: LatencyProfile {
419                    name: "default".to_string(),
420                    base_ms: 50,
421                    jitter_ms: 20,
422                    tag_overrides: HashMap::new(),
423                },
424                fault_config: FaultConfig {
425                    enabled: false,
426                    failure_rate: 0.0,
427                    status_codes: vec![500, 502, 503],
428                    active_failures: 0,
429                },
430                proxy_config: ProxyConfig {
431                    enabled: false,
432                    upstream_url: None,
433                    timeout_seconds: 30,
434                    requests_proxied: 0,
435                },
436                validation_settings: ValidationSettings {
437                    mode: "enforce".to_string(),
438                    aggregate_errors: true,
439                    validate_responses: false,
440                    overrides: HashMap::new(),
441                },
442                traffic_shaping: TrafficShapingConfig {
443                    enabled: false,
444                    bandwidth: crate::models::BandwidthConfig {
445                        enabled: false,
446                        max_bytes_per_sec: 1_048_576,
447                        burst_capacity_bytes: 10_485_760,
448                        tag_overrides: HashMap::new(),
449                    },
450                    burst_loss: crate::models::BurstLossConfig {
451                        enabled: false,
452                        burst_probability: 0.1,
453                        burst_duration_ms: 5000,
454                        loss_rate_during_burst: 0.5,
455                        recovery_time_ms: 30000,
456                        tag_overrides: HashMap::new(),
457                    },
458                },
459            })),
460            logs: Arc::new(RwLock::new(Vec::new())),
461            time_series: Arc::new(RwLock::new(TimeSeriesData::default())),
462            restart_status: Arc::new(RwLock::new(RestartStatus {
463                in_progress: false,
464                initiated_at: None,
465                reason: None,
466                success: None,
467            })),
468            smoke_test_results: Arc::new(RwLock::new(Vec::new())),
469            import_history: Arc::new(RwLock::new(Vec::new())),
470            workspace_persistence: Arc::new(WorkspacePersistence::new("./workspaces")),
471            plugin_registry: Arc::new(RwLock::new(PluginRegistry::new())),
472            reality_engine: Arc::new(RwLock::new(mockforge_core::RealityEngine::new())),
473            continuum_engine: Arc::new(RwLock::new({
474                let config = continuum_config.unwrap_or_default();
475                if let Some(clock) = virtual_clock {
476                    mockforge_core::RealityContinuumEngine::with_virtual_clock(config, clock)
477                } else {
478                    mockforge_core::RealityContinuumEngine::new(config)
479                }
480            })),
481            chaos_api_state,
482            latency_injector,
483            mockai,
484            recorder,
485            federation,
486            vbr_engine,
487        }
488    }
489
490    /// Record a request
491    pub async fn record_request(
492        &self,
493        method: &str,
494        path: &str,
495        status_code: u16,
496        response_time_ms: u64,
497        error: Option<String>,
498    ) {
499        let mut metrics = self.metrics.write().await;
500
501        metrics.total_requests += 1;
502        let endpoint = format!("{} {}", method, path);
503        *metrics.requests_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
504
505        if status_code >= 400 {
506            *metrics.errors_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
507        }
508
509        // Keep only last 100 response times globally
510        metrics.response_times.push(response_time_ms);
511        if metrics.response_times.len() > 100 {
512            metrics.response_times.remove(0);
513        }
514
515        // Keep only last 50 response times per endpoint
516        let endpoint_times = metrics
517            .response_times_by_endpoint
518            .entry(endpoint.clone())
519            .or_insert_with(Vec::new);
520        endpoint_times.push(response_time_ms);
521        if endpoint_times.len() > 50 {
522            endpoint_times.remove(0);
523        }
524
525        // Update last request timestamp for this endpoint
526        metrics.last_request_by_endpoint.insert(endpoint, Utc::now());
527
528        // Capture total_requests before releasing the lock
529        let total_requests = metrics.total_requests;
530
531        // Release metrics lock before acquiring other locks
532        drop(metrics);
533
534        // Update time series data for request count and response time
535        self.update_time_series_on_request(response_time_ms, total_requests).await;
536
537        // Record the log
538        let mut logs = self.logs.write().await;
539        let log_entry = RequestLog {
540            id: format!("req_{}", total_requests),
541            timestamp: Utc::now(),
542            method: method.to_string(),
543            path: path.to_string(),
544            status_code,
545            response_time_ms,
546            client_ip: None,
547            user_agent: None,
548            headers: HashMap::new(),
549            response_size_bytes: 0,
550            error_message: error,
551        };
552
553        logs.push(log_entry);
554
555        // Keep only last 1000 logs
556        if logs.len() > 1000 {
557            logs.remove(0);
558        }
559    }
560
561    /// Get current metrics
562    pub async fn get_metrics(&self) -> RequestMetrics {
563        self.metrics.read().await.clone()
564    }
565
566    /// Update system metrics
567    pub async fn update_system_metrics(&self, memory_mb: u64, cpu_percent: f64, threads: u32) {
568        let mut system_metrics = self.system_metrics.write().await;
569        system_metrics.memory_usage_mb = memory_mb;
570        system_metrics.cpu_usage_percent = cpu_percent;
571        system_metrics.active_threads = threads;
572
573        // Update time series data
574        self.update_time_series_data(memory_mb as f64, cpu_percent).await;
575    }
576
577    /// Update time series data with new metrics
578    async fn update_time_series_data(&self, memory_mb: f64, cpu_percent: f64) {
579        let now = Utc::now();
580        let mut time_series = self.time_series.write().await;
581
582        // Add memory usage data point
583        time_series.memory_usage.push(TimeSeriesPoint {
584            timestamp: now,
585            value: memory_mb,
586        });
587
588        // Add CPU usage data point
589        time_series.cpu_usage.push(TimeSeriesPoint {
590            timestamp: now,
591            value: cpu_percent,
592        });
593
594        // Add request count data point (from current metrics)
595        let metrics = self.metrics.read().await;
596        time_series.request_count.push(TimeSeriesPoint {
597            timestamp: now,
598            value: metrics.total_requests as f64,
599        });
600
601        // Add average response time data point
602        let avg_response_time = if !metrics.response_times.is_empty() {
603            metrics.response_times.iter().sum::<u64>() as f64 / metrics.response_times.len() as f64
604        } else {
605            0.0
606        };
607        time_series.response_time.push(TimeSeriesPoint {
608            timestamp: now,
609            value: avg_response_time,
610        });
611
612        // Keep only last 100 data points for each metric to prevent memory bloat
613        const MAX_POINTS: usize = 100;
614        if time_series.memory_usage.len() > MAX_POINTS {
615            time_series.memory_usage.remove(0);
616        }
617        if time_series.cpu_usage.len() > MAX_POINTS {
618            time_series.cpu_usage.remove(0);
619        }
620        if time_series.request_count.len() > MAX_POINTS {
621            time_series.request_count.remove(0);
622        }
623        if time_series.response_time.len() > MAX_POINTS {
624            time_series.response_time.remove(0);
625        }
626    }
627
628    /// Get system metrics
629    pub async fn get_system_metrics(&self) -> SystemMetrics {
630        self.system_metrics.read().await.clone()
631    }
632
633    /// Get time series data
634    pub async fn get_time_series_data(&self) -> TimeSeriesData {
635        self.time_series.read().await.clone()
636    }
637
638    /// Get restart status
639    pub async fn get_restart_status(&self) -> RestartStatus {
640        self.restart_status.read().await.clone()
641    }
642
643    /// Initiate server restart
644    pub async fn initiate_restart(&self, reason: String) -> Result<()> {
645        let mut status = self.restart_status.write().await;
646
647        if status.in_progress {
648            return Err(Error::generic("Restart already in progress".to_string()));
649        }
650
651        status.in_progress = true;
652        status.initiated_at = Some(Utc::now());
653        status.reason = Some(reason);
654        status.success = None;
655
656        Ok(())
657    }
658
659    /// Complete restart (success or failure)
660    pub async fn complete_restart(&self, success: bool) {
661        let mut status = self.restart_status.write().await;
662        status.in_progress = false;
663        status.success = Some(success);
664    }
665
666    /// Get smoke test results
667    pub async fn get_smoke_test_results(&self) -> Vec<SmokeTestResult> {
668        self.smoke_test_results.read().await.clone()
669    }
670
671    /// Update smoke test result
672    pub async fn update_smoke_test_result(&self, result: SmokeTestResult) {
673        let mut results = self.smoke_test_results.write().await;
674
675        // Find existing result by ID and update, or add new one
676        if let Some(existing) = results.iter_mut().find(|r| r.id == result.id) {
677            *existing = result;
678        } else {
679            results.push(result);
680        }
681
682        // Keep only last 100 test results
683        if results.len() > 100 {
684            results.remove(0);
685        }
686    }
687
688    /// Clear all smoke test results
689    pub async fn clear_smoke_test_results(&self) {
690        let mut results = self.smoke_test_results.write().await;
691        results.clear();
692    }
693
694    /// Update time series data when a request is recorded
695    async fn update_time_series_on_request(&self, response_time_ms: u64, total_requests: u64) {
696        let now = Utc::now();
697        let mut time_series = self.time_series.write().await;
698
699        // Add request count data point
700        time_series.request_count.push(TimeSeriesPoint {
701            timestamp: now,
702            value: total_requests as f64,
703        });
704
705        // Add response time data point
706        time_series.response_time.push(TimeSeriesPoint {
707            timestamp: now,
708            value: response_time_ms as f64,
709        });
710
711        // Keep only last 100 data points for each metric to prevent memory bloat
712        const MAX_POINTS: usize = 100;
713        if time_series.request_count.len() > MAX_POINTS {
714            time_series.request_count.remove(0);
715        }
716        if time_series.response_time.len() > MAX_POINTS {
717            time_series.response_time.remove(0);
718        }
719    }
720
721    /// Get current configuration
722    pub async fn get_config(&self) -> ConfigurationState {
723        self.config.read().await.clone()
724    }
725
726    /// Update latency configuration
727    pub async fn update_latency_config(
728        &self,
729        base_ms: u64,
730        jitter_ms: u64,
731        tag_overrides: HashMap<String, u64>,
732    ) {
733        let mut config = self.config.write().await;
734        config.latency_profile.base_ms = base_ms;
735        config.latency_profile.jitter_ms = jitter_ms;
736        config.latency_profile.tag_overrides = tag_overrides;
737    }
738
739    /// Update fault configuration
740    pub async fn update_fault_config(
741        &self,
742        enabled: bool,
743        failure_rate: f64,
744        status_codes: Vec<u16>,
745    ) {
746        let mut config = self.config.write().await;
747        config.fault_config.enabled = enabled;
748        config.fault_config.failure_rate = failure_rate;
749        config.fault_config.status_codes = status_codes;
750    }
751
752    /// Update proxy configuration
753    pub async fn update_proxy_config(
754        &self,
755        enabled: bool,
756        upstream_url: Option<String>,
757        timeout_seconds: u64,
758    ) {
759        let mut config = self.config.write().await;
760        config.proxy_config.enabled = enabled;
761        config.proxy_config.upstream_url = upstream_url;
762        config.proxy_config.timeout_seconds = timeout_seconds;
763    }
764
765    /// Update validation settings
766    pub async fn update_validation_config(
767        &self,
768        mode: String,
769        aggregate_errors: bool,
770        validate_responses: bool,
771        overrides: HashMap<String, String>,
772    ) {
773        let mut config = self.config.write().await;
774        config.validation_settings.mode = mode;
775        config.validation_settings.aggregate_errors = aggregate_errors;
776        config.validation_settings.validate_responses = validate_responses;
777        config.validation_settings.overrides = overrides;
778    }
779
780    /// Get filtered logs
781    pub async fn get_logs_filtered(&self, filter: &LogFilter) -> Vec<RequestLog> {
782        let logs = self.logs.read().await;
783
784        logs.iter()
785            .rev() // Most recent first
786            .filter(|log| {
787                if let Some(ref method) = filter.method {
788                    if log.method != *method {
789                        return false;
790                    }
791                }
792                if let Some(ref path_pattern) = filter.path_pattern {
793                    if !log.path.contains(path_pattern) {
794                        return false;
795                    }
796                }
797                if let Some(status) = filter.status_code {
798                    if log.status_code != status {
799                        return false;
800                    }
801                }
802                true
803            })
804            .take(filter.limit.unwrap_or(100))
805            .cloned()
806            .collect()
807    }
808
809    /// Clear all logs
810    pub async fn clear_logs(&self) {
811        let mut logs = self.logs.write().await;
812        logs.clear();
813    }
814}
815
816/// Serve the main admin interface
817pub async fn serve_admin_html() -> Html<&'static str> {
818    Html(crate::get_admin_html())
819}
820
821/// Serve admin CSS
822pub async fn serve_admin_css() -> ([(http::HeaderName, &'static str); 1], &'static str) {
823    ([(http::header::CONTENT_TYPE, "text/css")], crate::get_admin_css())
824}
825
826/// Serve admin JavaScript
827pub async fn serve_admin_js() -> ([(http::HeaderName, &'static str); 1], &'static str) {
828    ([(http::header::CONTENT_TYPE, "application/javascript")], crate::get_admin_js())
829}
830
831/// Get dashboard data
832pub async fn get_dashboard(State(state): State<AdminState>) -> Json<ApiResponse<DashboardData>> {
833    let uptime = Utc::now().signed_duration_since(state.start_time).num_seconds() as u64;
834
835    // Get system metrics from state
836    let system_metrics = state.get_system_metrics().await;
837    let _config = state.get_config().await;
838
839    // Get recent logs and calculate metrics from centralized logger
840    let (recent_logs, calculated_metrics): (Vec<RequestLog>, RequestMetrics) =
841        if let Some(global_logger) = mockforge_core::get_global_logger() {
842            // Get all logs to calculate metrics
843            let all_logs = global_logger.get_recent_logs(None).await;
844            let recent_logs_subset = global_logger.get_recent_logs(Some(20)).await;
845
846            // Calculate metrics from logs
847            let total_requests = all_logs.len() as u64;
848            let mut requests_by_endpoint = HashMap::new();
849            let mut errors_by_endpoint = HashMap::new();
850            let mut response_times = Vec::new();
851            let mut last_request_by_endpoint = HashMap::new();
852
853            for log in &all_logs {
854                let endpoint_key = format!("{} {}", log.method, log.path);
855                *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
856
857                if log.status_code >= 400 {
858                    *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
859                }
860
861                response_times.push(log.response_time_ms);
862                last_request_by_endpoint.insert(endpoint_key, log.timestamp);
863            }
864
865            let calculated_metrics = RequestMetrics {
866                total_requests,
867                active_connections: 0, // We don't track this from logs
868                requests_by_endpoint,
869                response_times,
870                response_times_by_endpoint: HashMap::new(), // Simplified for now
871                errors_by_endpoint,
872                last_request_by_endpoint,
873            };
874
875            // Convert to RequestLog format for admin UI
876            let recent_logs = recent_logs_subset
877                .into_iter()
878                .map(|log| RequestLog {
879                    id: log.id,
880                    timestamp: log.timestamp,
881                    method: log.method,
882                    path: log.path,
883                    status_code: log.status_code,
884                    response_time_ms: log.response_time_ms,
885                    client_ip: log.client_ip,
886                    user_agent: log.user_agent,
887                    headers: log.headers,
888                    response_size_bytes: log.response_size_bytes,
889                    error_message: log.error_message,
890                })
891                .collect();
892
893            (recent_logs, calculated_metrics)
894        } else {
895            // Fallback to local logs if centralized logger not available
896            let logs = state.logs.read().await;
897            let recent_logs = logs.iter().rev().take(10).cloned().collect();
898            let metrics = state.get_metrics().await;
899            (recent_logs, metrics)
900        };
901
902    let metrics = calculated_metrics;
903
904    let system_info = SystemInfo {
905        version: env!("CARGO_PKG_VERSION").to_string(),
906        uptime_seconds: uptime,
907        memory_usage_mb: system_metrics.memory_usage_mb,
908        cpu_usage_percent: system_metrics.cpu_usage_percent,
909        active_threads: system_metrics.active_threads as usize,
910        total_routes: metrics.requests_by_endpoint.len(),
911        total_fixtures: count_fixtures().unwrap_or(0),
912    };
913
914    let servers = vec![
915        ServerStatus {
916            server_type: "HTTP".to_string(),
917            address: state.http_server_addr.map(|addr| addr.to_string()),
918            running: state.http_server_addr.is_some(),
919            start_time: Some(state.start_time),
920            uptime_seconds: Some(uptime),
921            active_connections: metrics.active_connections,
922            total_requests: count_requests_by_server_type(&metrics, "HTTP"),
923        },
924        ServerStatus {
925            server_type: "WebSocket".to_string(),
926            address: state.ws_server_addr.map(|addr| addr.to_string()),
927            running: state.ws_server_addr.is_some(),
928            start_time: Some(state.start_time),
929            uptime_seconds: Some(uptime),
930            active_connections: metrics.active_connections / 2, // Estimate
931            total_requests: count_requests_by_server_type(&metrics, "WebSocket"),
932        },
933        ServerStatus {
934            server_type: "gRPC".to_string(),
935            address: state.grpc_server_addr.map(|addr| addr.to_string()),
936            running: state.grpc_server_addr.is_some(),
937            start_time: Some(state.start_time),
938            uptime_seconds: Some(uptime),
939            active_connections: metrics.active_connections / 3, // Estimate
940            total_requests: count_requests_by_server_type(&metrics, "gRPC"),
941        },
942    ];
943
944    // Build routes info from actual request metrics
945    let mut routes = Vec::new();
946    for (endpoint, count) in &metrics.requests_by_endpoint {
947        let parts: Vec<&str> = endpoint.splitn(2, ' ').collect();
948        if parts.len() == 2 {
949            let method = parts[0].to_string();
950            let path = parts[1].to_string();
951            let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
952
953            routes.push(RouteInfo {
954                method: Some(method.clone()),
955                path: path.clone(),
956                priority: 0,
957                has_fixtures: route_has_fixtures(&method, &path),
958                latency_ms: calculate_endpoint_latency(&metrics, endpoint),
959                request_count: *count,
960                last_request: get_endpoint_last_request(&metrics, endpoint),
961                error_count,
962            });
963        }
964    }
965
966    let dashboard = DashboardData {
967        server_info: ServerInfo {
968            version: env!("CARGO_PKG_VERSION").to_string(),
969            build_time: option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown").to_string(),
970            git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("unknown").to_string(),
971            http_server: state.http_server_addr.map(|addr| addr.to_string()),
972            ws_server: state.ws_server_addr.map(|addr| addr.to_string()),
973            grpc_server: state.grpc_server_addr.map(|addr| addr.to_string()),
974            graphql_server: state.graphql_server_addr.map(|addr| addr.to_string()),
975            api_enabled: state.api_enabled,
976            admin_port: state.admin_port,
977        },
978        system_info: DashboardSystemInfo {
979            os: std::env::consts::OS.to_string(),
980            arch: std::env::consts::ARCH.to_string(),
981            uptime,
982            memory_usage: system_metrics.memory_usage_mb * 1024 * 1024, // Convert MB to bytes
983        },
984        metrics: SimpleMetricsData {
985            total_requests: metrics.requests_by_endpoint.values().sum(),
986            active_requests: metrics.active_connections,
987            average_response_time: if metrics.response_times.is_empty() {
988                0.0
989            } else {
990                metrics.response_times.iter().sum::<u64>() as f64
991                    / metrics.response_times.len() as f64
992            },
993            error_rate: {
994                let total_requests = metrics.requests_by_endpoint.values().sum::<u64>();
995                let total_errors = metrics.errors_by_endpoint.values().sum::<u64>();
996                if total_requests == 0 {
997                    0.0
998                } else {
999                    total_errors as f64 / total_requests as f64
1000                }
1001            },
1002        },
1003        servers,
1004        recent_logs,
1005        system: system_info,
1006    };
1007
1008    Json(ApiResponse::success(dashboard))
1009}
1010
1011/// Get routes from the global route store (populated by the HTTP server at startup)
1012pub async fn get_routes() -> impl IntoResponse {
1013    let routes = mockforge_core::request_logger::get_global_routes();
1014    let json = serde_json::json!({
1015        "routes": routes,
1016        "total": routes.len()
1017    });
1018    (
1019        StatusCode::OK,
1020        [("content-type", "application/json")],
1021        serde_json::to_string(&json).unwrap_or_else(|_| r#"{"routes":[]}"#.to_string()),
1022    )
1023}
1024
1025/// Get server info (HTTP server address for API calls)
1026pub async fn get_server_info(State(state): State<AdminState>) -> Json<serde_json::Value> {
1027    Json(json!({
1028        "http_server": state.http_server_addr.map(|addr| addr.to_string()),
1029        "ws_server": state.ws_server_addr.map(|addr| addr.to_string()),
1030        "grpc_server": state.grpc_server_addr.map(|addr| addr.to_string()),
1031        "admin_port": state.admin_port
1032    }))
1033}
1034
1035/// Get health check status
1036pub async fn get_health() -> Json<HealthCheck> {
1037    Json(
1038        HealthCheck::healthy()
1039            .with_service("http".to_string(), "healthy".to_string())
1040            .with_service("websocket".to_string(), "healthy".to_string())
1041            .with_service("grpc".to_string(), "healthy".to_string()),
1042    )
1043}
1044
1045/// Get request logs with optional filtering
1046pub async fn get_logs(
1047    State(state): State<AdminState>,
1048    Query(params): Query<HashMap<String, String>>,
1049) -> Json<ApiResponse<Vec<RequestLog>>> {
1050    let mut filter = LogFilter::default();
1051
1052    if let Some(method) = params.get("method") {
1053        filter.method = Some(method.clone());
1054    }
1055    if let Some(path) = params.get("path") {
1056        filter.path_pattern = Some(path.clone());
1057    }
1058    if let Some(status) = params.get("status").and_then(|s| s.parse().ok()) {
1059        filter.status_code = Some(status);
1060    }
1061    if let Some(limit) = params.get("limit").and_then(|s| s.parse().ok()) {
1062        filter.limit = Some(limit);
1063    }
1064
1065    // Get logs from centralized logger (same as dashboard)
1066    let logs = if let Some(global_logger) = mockforge_core::get_global_logger() {
1067        // Get logs from centralized logger
1068        let centralized_logs = global_logger.get_recent_logs(filter.limit).await;
1069
1070        // Convert to RequestLog format and apply filters
1071        centralized_logs
1072            .into_iter()
1073            .filter(|log| {
1074                if let Some(ref method) = filter.method {
1075                    if log.method != *method {
1076                        return false;
1077                    }
1078                }
1079                if let Some(ref path_pattern) = filter.path_pattern {
1080                    if !log.path.contains(path_pattern) {
1081                        return false;
1082                    }
1083                }
1084                if let Some(status) = filter.status_code {
1085                    if log.status_code != status {
1086                        return false;
1087                    }
1088                }
1089                true
1090            })
1091            .map(|log| RequestLog {
1092                id: log.id,
1093                timestamp: log.timestamp,
1094                method: log.method,
1095                path: log.path,
1096                status_code: log.status_code,
1097                response_time_ms: log.response_time_ms,
1098                client_ip: log.client_ip,
1099                user_agent: log.user_agent,
1100                headers: log.headers,
1101                response_size_bytes: log.response_size_bytes,
1102                error_message: log.error_message,
1103            })
1104            .collect()
1105    } else {
1106        // Fallback to local logs if centralized logger not available
1107        state.get_logs_filtered(&filter).await
1108    };
1109
1110    Json(ApiResponse::success(logs))
1111}
1112
1113/// Get reality trace metadata for a specific request
1114///
1115/// GET /__mockforge/api/reality/trace/:request_id
1116pub async fn get_reality_trace(
1117    Path(request_id): Path<String>,
1118) -> Json<ApiResponse<Option<mockforge_core::request_logger::RealityTraceMetadata>>> {
1119    if let Some(global_logger) = mockforge_core::get_global_logger() {
1120        let logs = global_logger.get_recent_logs(None).await;
1121        if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1122            Json(ApiResponse::success(log_entry.reality_metadata))
1123        } else {
1124            Json(ApiResponse::error(format!("Request {} not found", request_id)))
1125        }
1126    } else {
1127        Json(ApiResponse::error("Request logger not initialized".to_string()))
1128    }
1129}
1130
1131/// Get response generation trace for a specific request
1132///
1133/// GET /__mockforge/api/reality/response-trace/:request_id
1134pub async fn get_response_trace(
1135    Path(request_id): Path<String>,
1136) -> Json<ApiResponse<Option<serde_json::Value>>> {
1137    if let Some(global_logger) = mockforge_core::get_global_logger() {
1138        let logs = global_logger.get_recent_logs(None).await;
1139        if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1140            // Response generation trace would be stored in metadata
1141            // For now, return the metadata as JSON
1142            let trace = log_entry
1143                .metadata
1144                .get("response_generation_trace")
1145                .and_then(|s| serde_json::from_str::<serde_json::Value>(s).ok());
1146            Json(ApiResponse::success(trace))
1147        } else {
1148            Json(ApiResponse::error(format!("Request {} not found", request_id)))
1149        }
1150    } else {
1151        Json(ApiResponse::error("Request logger not initialized".to_string()))
1152    }
1153}
1154
1155// Configuration for recent logs display
1156const RECENT_LOGS_LIMIT: usize = 20;
1157const RECENT_LOGS_TTL_MINUTES: i64 = 5;
1158
1159/// SSE endpoint for real-time log streaming
1160pub async fn logs_sse(
1161    State(_state): State<AdminState>,
1162) -> Sse<impl Stream<Item = std::result::Result<Event, Infallible>>> {
1163    tracing::info!("SSE endpoint /logs/sse accessed - starting real-time log streaming for recent requests only");
1164
1165    let stream = stream::unfold(std::collections::HashSet::new(), |mut seen_ids| async move {
1166        tokio::time::sleep(Duration::from_millis(500)).await;
1167
1168        // Get recent logs from centralized logger (limit to recent entries for dashboard)
1169        if let Some(global_logger) = mockforge_core::get_global_logger() {
1170            let centralized_logs = global_logger.get_recent_logs(Some(RECENT_LOGS_LIMIT)).await;
1171
1172            tracing::debug!(
1173                "SSE: Checking logs - total logs: {}, seen logs: {}",
1174                centralized_logs.len(),
1175                seen_ids.len()
1176            );
1177
1178            // Filter for recent logs within TTL
1179            let now = Utc::now();
1180            let ttl_cutoff = now - chrono::Duration::minutes(RECENT_LOGS_TTL_MINUTES);
1181
1182            // Find new logs that haven't been seen before
1183            let new_logs: Vec<RequestLog> = centralized_logs
1184                .into_iter()
1185                .filter(|log| {
1186                    // Only include logs from the last X minutes and not yet seen
1187                    log.timestamp > ttl_cutoff && !seen_ids.contains(&log.id)
1188                })
1189                .map(|log| RequestLog {
1190                    id: log.id,
1191                    timestamp: log.timestamp,
1192                    method: log.method,
1193                    path: log.path,
1194                    status_code: log.status_code,
1195                    response_time_ms: log.response_time_ms,
1196                    client_ip: log.client_ip,
1197                    user_agent: log.user_agent,
1198                    headers: log.headers,
1199                    response_size_bytes: log.response_size_bytes,
1200                    error_message: log.error_message,
1201                })
1202                .collect();
1203
1204            // Add new log IDs to the seen set
1205            for log in &new_logs {
1206                seen_ids.insert(log.id.clone());
1207            }
1208
1209            // Send new logs if any
1210            if !new_logs.is_empty() {
1211                tracing::info!("SSE: Sending {} new logs to client", new_logs.len());
1212
1213                let event_data = serde_json::to_string(&new_logs).unwrap_or_default();
1214                let event = Ok(Event::default().event("new_logs").data(event_data));
1215
1216                return Some((event, seen_ids));
1217            }
1218        }
1219
1220        // Send keep-alive
1221        let event = Ok(Event::default().event("keep_alive").data(""));
1222        Some((event, seen_ids))
1223    });
1224
1225    Sse::new(stream).keep_alive(
1226        axum::response::sse::KeepAlive::new()
1227            .interval(Duration::from_secs(15))
1228            .text("keep-alive-text"),
1229    )
1230}
1231
1232/// Get metrics data
1233pub async fn get_metrics(State(state): State<AdminState>) -> Json<ApiResponse<MetricsData>> {
1234    // Get metrics from global logger (same as get_dashboard)
1235    let metrics = if let Some(global_logger) = mockforge_core::get_global_logger() {
1236        let all_logs = global_logger.get_recent_logs(None).await;
1237
1238        let total_requests = all_logs.len() as u64;
1239        let mut requests_by_endpoint = HashMap::new();
1240        let mut errors_by_endpoint = HashMap::new();
1241        let mut response_times = Vec::new();
1242        let mut last_request_by_endpoint = HashMap::new();
1243
1244        for log in &all_logs {
1245            let endpoint_key = format!("{} {}", log.method, log.path);
1246            *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1247
1248            if log.status_code >= 400 {
1249                *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1250            }
1251
1252            response_times.push(log.response_time_ms);
1253            last_request_by_endpoint.insert(endpoint_key, log.timestamp);
1254        }
1255
1256        RequestMetrics {
1257            total_requests,
1258            active_connections: 0,
1259            requests_by_endpoint,
1260            response_times,
1261            response_times_by_endpoint: HashMap::new(),
1262            errors_by_endpoint,
1263            last_request_by_endpoint,
1264        }
1265    } else {
1266        state.get_metrics().await
1267    };
1268
1269    let system_metrics = state.get_system_metrics().await;
1270    let time_series = state.get_time_series_data().await;
1271
1272    // Helper function to calculate percentile from sorted array
1273    fn calculate_percentile(sorted_data: &[u64], percentile: f64) -> u64 {
1274        if sorted_data.is_empty() {
1275            return 0;
1276        }
1277        let idx = ((sorted_data.len() as f64) * percentile).ceil() as usize;
1278        let idx = idx.min(sorted_data.len().saturating_sub(1));
1279        sorted_data[idx]
1280    }
1281
1282    // Calculate percentiles from response times (p50, p75, p90, p95, p99, p99.9)
1283    let mut response_times = metrics.response_times.clone();
1284    response_times.sort();
1285
1286    let p50 = calculate_percentile(&response_times, 0.50);
1287    let p75 = calculate_percentile(&response_times, 0.75);
1288    let p90 = calculate_percentile(&response_times, 0.90);
1289    let p95 = calculate_percentile(&response_times, 0.95);
1290    let p99 = calculate_percentile(&response_times, 0.99);
1291    let p999 = calculate_percentile(&response_times, 0.999);
1292
1293    // Calculate per-endpoint percentiles for detailed analysis
1294    let mut response_times_by_endpoint: HashMap<String, Vec<u64>> = HashMap::new();
1295    if let Some(global_logger) = mockforge_core::get_global_logger() {
1296        let all_logs = global_logger.get_recent_logs(None).await;
1297        for log in &all_logs {
1298            let endpoint_key = format!("{} {}", log.method, log.path);
1299            response_times_by_endpoint
1300                .entry(endpoint_key)
1301                .or_default()
1302                .push(log.response_time_ms);
1303        }
1304    }
1305
1306    // Calculate percentiles for each endpoint
1307    let mut endpoint_percentiles: HashMap<String, HashMap<String, u64>> = HashMap::new();
1308    for (endpoint, times) in &mut response_times_by_endpoint {
1309        times.sort();
1310        if !times.is_empty() {
1311            endpoint_percentiles.insert(
1312                endpoint.clone(),
1313                HashMap::from([
1314                    ("p50".to_string(), calculate_percentile(times, 0.50)),
1315                    ("p75".to_string(), calculate_percentile(times, 0.75)),
1316                    ("p90".to_string(), calculate_percentile(times, 0.90)),
1317                    ("p95".to_string(), calculate_percentile(times, 0.95)),
1318                    ("p99".to_string(), calculate_percentile(times, 0.99)),
1319                    ("p999".to_string(), calculate_percentile(times, 0.999)),
1320                ]),
1321            );
1322        }
1323    }
1324
1325    // Calculate error rates
1326    let mut error_rate_by_endpoint = HashMap::new();
1327    for (endpoint, total_count) in &metrics.requests_by_endpoint {
1328        let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
1329        let error_rate = if *total_count > 0 {
1330            error_count as f64 / *total_count as f64
1331        } else {
1332            0.0
1333        };
1334        error_rate_by_endpoint.insert(endpoint.clone(), error_rate);
1335    }
1336
1337    // Convert time series data to the format expected by the frontend
1338    // If no time series data exists yet, use current system metrics as a fallback
1339    let memory_usage_over_time = if time_series.memory_usage.is_empty() {
1340        vec![(Utc::now(), system_metrics.memory_usage_mb)]
1341    } else {
1342        time_series
1343            .memory_usage
1344            .iter()
1345            .map(|point| (point.timestamp, point.value as u64))
1346            .collect()
1347    };
1348
1349    let cpu_usage_over_time = if time_series.cpu_usage.is_empty() {
1350        vec![(Utc::now(), system_metrics.cpu_usage_percent)]
1351    } else {
1352        time_series
1353            .cpu_usage
1354            .iter()
1355            .map(|point| (point.timestamp, point.value))
1356            .collect()
1357    };
1358
1359    // Build time-series latency data (last 100 data points)
1360    let latency_over_time: Vec<(chrono::DateTime<Utc>, u64)> =
1361        if let Some(global_logger) = mockforge_core::get_global_logger() {
1362            let all_logs = global_logger.get_recent_logs(Some(100)).await;
1363            all_logs.iter().map(|log| (log.timestamp, log.response_time_ms)).collect()
1364        } else {
1365            Vec::new()
1366        };
1367
1368    let metrics_data = MetricsData {
1369        requests_by_endpoint: metrics.requests_by_endpoint,
1370        response_time_percentiles: HashMap::from([
1371            ("p50".to_string(), p50),
1372            ("p75".to_string(), p75),
1373            ("p90".to_string(), p90),
1374            ("p95".to_string(), p95),
1375            ("p99".to_string(), p99),
1376            ("p999".to_string(), p999),
1377        ]),
1378        endpoint_percentiles: Some(endpoint_percentiles),
1379        latency_over_time: Some(latency_over_time),
1380        error_rate_by_endpoint,
1381        memory_usage_over_time,
1382        cpu_usage_over_time,
1383    };
1384
1385    Json(ApiResponse::success(metrics_data))
1386}
1387
1388/// Update latency profile
1389pub async fn update_latency(
1390    State(state): State<AdminState>,
1391    headers: http::HeaderMap,
1392    Json(update): Json<ConfigUpdate>,
1393) -> Json<ApiResponse<String>> {
1394    use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1395    use crate::rbac::{extract_user_context, get_default_user_context};
1396
1397    if update.config_type != "latency" {
1398        return Json(ApiResponse::error("Invalid config type".to_string()));
1399    }
1400
1401    // Extract latency configuration from the update data
1402    let base_ms = update.data.get("base_ms").and_then(|v| v.as_u64()).unwrap_or(50);
1403    let jitter_ms = update.data.get("jitter_ms").and_then(|v| v.as_u64()).unwrap_or(20);
1404
1405    let tag_overrides: HashMap<String, u64> = update
1406        .data
1407        .get("tag_overrides")
1408        .and_then(|v| v.as_object())
1409        .map(|obj| obj.iter().filter_map(|(k, v)| v.as_u64().map(|val| (k.clone(), val))).collect())
1410        .unwrap_or_default();
1411
1412    // Update the actual configuration
1413    state.update_latency_config(base_ms, jitter_ms, tag_overrides.clone()).await;
1414
1415    // Record audit log with user context
1416    if let Some(audit_store) = get_global_audit_store() {
1417        let metadata = serde_json::json!({
1418            "base_ms": base_ms,
1419            "jitter_ms": jitter_ms,
1420            "tag_overrides": tag_overrides,
1421        });
1422        let mut audit_log = create_audit_log(
1423            AdminActionType::ConfigLatencyUpdated,
1424            format!("Latency profile updated: base_ms={}, jitter_ms={}", base_ms, jitter_ms),
1425            None,
1426            true,
1427            None,
1428            Some(metadata),
1429        );
1430
1431        // Extract user context from headers
1432        if let Some(user_ctx) = extract_user_context(&headers).or_else(get_default_user_context) {
1433            audit_log.user_id = Some(user_ctx.user_id);
1434            audit_log.username = Some(user_ctx.username);
1435        }
1436
1437        // Extract IP address from headers
1438        if let Some(ip) = headers
1439            .get("x-forwarded-for")
1440            .or_else(|| headers.get("x-real-ip"))
1441            .and_then(|h| h.to_str().ok())
1442        {
1443            audit_log.ip_address = Some(ip.to_string());
1444        }
1445
1446        // Extract user agent
1447        if let Some(ua) = headers.get("user-agent").and_then(|h| h.to_str().ok()) {
1448            audit_log.user_agent = Some(ua.to_string());
1449        }
1450
1451        audit_store.record(audit_log).await;
1452    }
1453
1454    tracing::info!("Updated latency profile: base_ms={}, jitter_ms={}", base_ms, jitter_ms);
1455
1456    Json(ApiResponse::success("Latency profile updated".to_string()))
1457}
1458
1459/// Update fault injection configuration
1460pub async fn update_faults(
1461    State(state): State<AdminState>,
1462    Json(update): Json<ConfigUpdate>,
1463) -> Json<ApiResponse<String>> {
1464    if update.config_type != "faults" {
1465        return Json(ApiResponse::error("Invalid config type".to_string()));
1466    }
1467
1468    // Extract fault configuration from the update data
1469    let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1470
1471    let failure_rate = update.data.get("failure_rate").and_then(|v| v.as_f64()).unwrap_or(0.0);
1472
1473    let status_codes = update
1474        .data
1475        .get("status_codes")
1476        .and_then(|v| v.as_array())
1477        .map(|arr| arr.iter().filter_map(|v| v.as_u64().map(|n| n as u16)).collect())
1478        .unwrap_or_else(|| vec![500, 502, 503]);
1479
1480    // Update the actual configuration
1481    state.update_fault_config(enabled, failure_rate, status_codes).await;
1482
1483    tracing::info!(
1484        "Updated fault configuration: enabled={}, failure_rate={}",
1485        enabled,
1486        failure_rate
1487    );
1488
1489    Json(ApiResponse::success("Fault configuration updated".to_string()))
1490}
1491
1492/// Update proxy configuration
1493pub async fn update_proxy(
1494    State(state): State<AdminState>,
1495    Json(update): Json<ConfigUpdate>,
1496) -> Json<ApiResponse<String>> {
1497    if update.config_type != "proxy" {
1498        return Json(ApiResponse::error("Invalid config type".to_string()));
1499    }
1500
1501    // Extract proxy configuration from the update data
1502    let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1503
1504    let upstream_url =
1505        update.data.get("upstream_url").and_then(|v| v.as_str()).map(|s| s.to_string());
1506
1507    let timeout_seconds = update.data.get("timeout_seconds").and_then(|v| v.as_u64()).unwrap_or(30);
1508
1509    // Update the actual configuration
1510    state.update_proxy_config(enabled, upstream_url.clone(), timeout_seconds).await;
1511
1512    tracing::info!(
1513        "Updated proxy configuration: enabled={}, upstream_url={:?}",
1514        enabled,
1515        upstream_url
1516    );
1517
1518    Json(ApiResponse::success("Proxy configuration updated".to_string()))
1519}
1520
1521/// Clear request logs
1522pub async fn clear_logs(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1523    // Clear the actual logs from state
1524    state.clear_logs().await;
1525    tracing::info!("Cleared all request logs");
1526
1527    Json(ApiResponse::success("Logs cleared".to_string()))
1528}
1529
1530/// Restart servers
1531pub async fn restart_servers(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1532    use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1533    // Check if restart is already in progress
1534    let current_status = state.get_restart_status().await;
1535    if current_status.in_progress {
1536        return Json(ApiResponse::error("Server restart already in progress".to_string()));
1537    }
1538
1539    // Initiate restart status
1540    let restart_result = state
1541        .initiate_restart("Manual restart requested via admin UI".to_string())
1542        .await;
1543
1544    let success = restart_result.is_ok();
1545    let error_msg = restart_result.as_ref().err().map(|e| format!("{}", e));
1546
1547    // Record audit log
1548    if let Some(audit_store) = get_global_audit_store() {
1549        let audit_log = create_audit_log(
1550            AdminActionType::ServerRestarted,
1551            "Server restart initiated via admin UI".to_string(),
1552            None,
1553            success,
1554            error_msg.clone(),
1555            None,
1556        );
1557        audit_store.record(audit_log).await;
1558    }
1559
1560    if let Err(e) = restart_result {
1561        return Json(ApiResponse::error(format!("Failed to initiate restart: {}", e)));
1562    }
1563
1564    // Spawn restart task to avoid blocking the response
1565    let state_clone = state.clone();
1566    tokio::spawn(async move {
1567        if let Err(e) = perform_server_restart(&state_clone).await {
1568            tracing::error!("Server restart failed: {}", e);
1569            state_clone.complete_restart(false).await;
1570        } else {
1571            tracing::info!("Server restart completed successfully");
1572            state_clone.complete_restart(true).await;
1573        }
1574    });
1575
1576    tracing::info!("Server restart initiated via admin UI");
1577    Json(ApiResponse::success(
1578        "Server restart initiated. Please wait for completion.".to_string(),
1579    ))
1580}
1581
1582/// Perform the actual server restart
1583async fn perform_server_restart(_state: &AdminState) -> Result<()> {
1584    // Get the current process ID
1585    let current_pid = std::process::id();
1586    tracing::info!("Initiating restart for process PID: {}", current_pid);
1587
1588    // Try to find the parent process (MockForge CLI)
1589    let parent_pid = get_parent_process_id(current_pid).await?;
1590    tracing::info!("Found parent process PID: {}", parent_pid);
1591
1592    // Method 1: Try to restart via parent process signal
1593    if let Ok(()) = restart_via_parent_signal(parent_pid).await {
1594        tracing::info!("Restart initiated via parent process signal");
1595        return Ok(());
1596    }
1597
1598    // Method 2: Fallback to process replacement
1599    if let Ok(()) = restart_via_process_replacement().await {
1600        tracing::info!("Restart initiated via process replacement");
1601        return Ok(());
1602    }
1603
1604    // Method 3: Last resort - graceful shutdown with restart script
1605    restart_via_script().await
1606}
1607
1608/// Get parent process ID
1609async fn get_parent_process_id(pid: u32) -> Result<u32> {
1610    // Try to read from /proc/pid/stat on Linux
1611    #[cfg(target_os = "linux")]
1612    {
1613        // Read /proc filesystem using spawn_blocking
1614        let stat_path = format!("/proc/{}/stat", pid);
1615        if let Ok(ppid) = tokio::task::spawn_blocking(move || -> Result<u32> {
1616            let content = std::fs::read_to_string(&stat_path)
1617                .map_err(|e| Error::generic(format!("Failed to read {}: {}", stat_path, e)))?;
1618
1619            let fields: Vec<&str> = content.split_whitespace().collect();
1620            if fields.len() > 3 {
1621                fields[3]
1622                    .parse::<u32>()
1623                    .map_err(|e| Error::generic(format!("Failed to parse PPID: {}", e)))
1624            } else {
1625                Err(Error::generic("Insufficient fields in /proc/pid/stat".to_string()))
1626            }
1627        })
1628        .await
1629        {
1630            return ppid;
1631        }
1632    }
1633
1634    // Fallback: assume we're running under a shell/process manager
1635    Ok(1) // PID 1 as fallback
1636}
1637
1638/// Restart via parent process signal
1639async fn restart_via_parent_signal(parent_pid: u32) -> Result<()> {
1640    #[cfg(unix)]
1641    {
1642        use std::process::Command;
1643
1644        // Send SIGTERM to parent process to trigger restart
1645        let output = Command::new("kill")
1646            .args(["-TERM", &parent_pid.to_string()])
1647            .output()
1648            .map_err(|e| Error::generic(format!("Failed to send signal: {}", e)))?;
1649
1650        if !output.status.success() {
1651            return Err(Error::generic(
1652                "Failed to send restart signal to parent process".to_string(),
1653            ));
1654        }
1655
1656        // Wait a moment for the signal to be processed
1657        tokio::time::sleep(Duration::from_millis(100)).await;
1658        Ok(())
1659    }
1660
1661    #[cfg(not(unix))]
1662    {
1663        Err(Error::generic(
1664            "Signal-based restart not supported on this platform".to_string(),
1665        ))
1666    }
1667}
1668
1669/// Restart via process replacement
1670async fn restart_via_process_replacement() -> Result<()> {
1671    // Get the current executable path
1672    let current_exe = std::env::current_exe()
1673        .map_err(|e| Error::generic(format!("Failed to get current executable: {}", e)))?;
1674
1675    // Get current command line arguments
1676    let args: Vec<String> = std::env::args().collect();
1677
1678    tracing::info!("Restarting with command: {:?}", args);
1679
1680    // Start new process
1681    let mut child = Command::new(&current_exe)
1682        .args(&args[1..]) // Skip the program name
1683        .stdout(Stdio::inherit())
1684        .stderr(Stdio::inherit())
1685        .spawn()
1686        .map_err(|e| Error::generic(format!("Failed to start new process: {}", e)))?;
1687
1688    // Give the new process a moment to start
1689    tokio::time::sleep(Duration::from_millis(500)).await;
1690
1691    // Check if the new process is still running
1692    match child.try_wait() {
1693        Ok(Some(status)) => {
1694            if status.success() {
1695                tracing::info!("New process started successfully");
1696                Ok(())
1697            } else {
1698                Err(Error::generic("New process exited with error".to_string()))
1699            }
1700        }
1701        Ok(None) => {
1702            tracing::info!("New process is running, exiting current process");
1703            // Exit current process
1704            std::process::exit(0);
1705        }
1706        Err(e) => Err(Error::generic(format!("Failed to check new process status: {}", e))),
1707    }
1708}
1709
1710/// Restart via external script
1711async fn restart_via_script() -> Result<()> {
1712    // Look for restart script in common locations
1713    let script_paths = ["./scripts/restart.sh", "./restart.sh", "restart.sh"];
1714
1715    for script_path in &script_paths {
1716        if std::path::Path::new(script_path).exists() {
1717            tracing::info!("Using restart script: {}", script_path);
1718
1719            let output = Command::new("bash")
1720                .arg(script_path)
1721                .output()
1722                .map_err(|e| Error::generic(format!("Failed to execute restart script: {}", e)))?;
1723
1724            if output.status.success() {
1725                return Ok(());
1726            } else {
1727                tracing::warn!(
1728                    "Restart script failed: {}",
1729                    String::from_utf8_lossy(&output.stderr)
1730                );
1731            }
1732        }
1733    }
1734
1735    // If no script found, try to use the clear-ports script as a fallback
1736    let clear_script = "./scripts/clear-ports.sh";
1737    if std::path::Path::new(clear_script).exists() {
1738        tracing::info!("Using clear-ports script as fallback");
1739
1740        let _ = Command::new("bash").arg(clear_script).output();
1741    }
1742
1743    Err(Error::generic(
1744        "No restart mechanism available. Please restart manually.".to_string(),
1745    ))
1746}
1747
1748/// Get restart status
1749pub async fn get_restart_status(
1750    State(state): State<AdminState>,
1751) -> Json<ApiResponse<RestartStatus>> {
1752    let status = state.get_restart_status().await;
1753    Json(ApiResponse::success(status))
1754}
1755
1756/// Get audit logs
1757pub async fn get_audit_logs(
1758    Query(params): Query<HashMap<String, String>>,
1759) -> Json<ApiResponse<Vec<crate::audit::AdminAuditLog>>> {
1760    use crate::audit::{get_global_audit_store, AdminActionType};
1761
1762    let action_type_str = params.get("action_type");
1763    let user_id = params.get("user_id").map(|s| s.as_str());
1764    let limit = params.get("limit").and_then(|s| s.parse::<usize>().ok());
1765    let offset = params.get("offset").and_then(|s| s.parse::<usize>().ok());
1766
1767    // Parse action type if provided
1768    let action_type = action_type_str.and_then(|s| {
1769        // Simple string matching - could be enhanced with proper parsing
1770        match s.as_str() {
1771            "config_latency_updated" => Some(AdminActionType::ConfigLatencyUpdated),
1772            "config_faults_updated" => Some(AdminActionType::ConfigFaultsUpdated),
1773            "server_restarted" => Some(AdminActionType::ServerRestarted),
1774            "logs_cleared" => Some(AdminActionType::LogsCleared),
1775            _ => None,
1776        }
1777    });
1778
1779    if let Some(audit_store) = get_global_audit_store() {
1780        let logs = audit_store.get_logs(action_type, user_id, limit, offset).await;
1781        Json(ApiResponse::success(logs))
1782    } else {
1783        Json(ApiResponse::error("Audit logging not initialized".to_string()))
1784    }
1785}
1786
1787/// Get audit log statistics
1788pub async fn get_audit_stats() -> Json<ApiResponse<crate::audit::AuditLogStats>> {
1789    use crate::audit::get_global_audit_store;
1790
1791    if let Some(audit_store) = get_global_audit_store() {
1792        let stats = audit_store.get_stats().await;
1793        Json(ApiResponse::success(stats))
1794    } else {
1795        Json(ApiResponse::error("Audit logging not initialized".to_string()))
1796    }
1797}
1798
1799/// Get server configuration
1800pub async fn get_config(State(state): State<AdminState>) -> Json<ApiResponse<serde_json::Value>> {
1801    let config_state = state.get_config().await;
1802
1803    let config = json!({
1804        "latency": {
1805            "enabled": true,
1806            "base_ms": config_state.latency_profile.base_ms,
1807            "jitter_ms": config_state.latency_profile.jitter_ms,
1808            "tag_overrides": config_state.latency_profile.tag_overrides
1809        },
1810        "faults": {
1811            "enabled": config_state.fault_config.enabled,
1812            "failure_rate": config_state.fault_config.failure_rate,
1813            "status_codes": config_state.fault_config.status_codes
1814        },
1815        "proxy": {
1816            "enabled": config_state.proxy_config.enabled,
1817            "upstream_url": config_state.proxy_config.upstream_url,
1818            "timeout_seconds": config_state.proxy_config.timeout_seconds
1819        },
1820        "traffic_shaping": {
1821            "enabled": config_state.traffic_shaping.enabled,
1822            "bandwidth": config_state.traffic_shaping.bandwidth,
1823            "burst_loss": config_state.traffic_shaping.burst_loss
1824        },
1825        "validation": {
1826            "mode": config_state.validation_settings.mode,
1827            "aggregate_errors": config_state.validation_settings.aggregate_errors,
1828            "validate_responses": config_state.validation_settings.validate_responses,
1829            "overrides": config_state.validation_settings.overrides
1830        }
1831    });
1832
1833    Json(ApiResponse::success(config))
1834}
1835
1836/// Count total fixtures in the fixtures directory
1837pub fn count_fixtures() -> Result<usize> {
1838    // Get the fixtures directory from environment or use default
1839    let fixtures_dir =
1840        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1841    let fixtures_path = std::path::Path::new(&fixtures_dir);
1842
1843    if !fixtures_path.exists() {
1844        return Ok(0);
1845    }
1846
1847    let mut total_count = 0;
1848
1849    // Count HTTP fixtures
1850    let http_fixtures_path = fixtures_path.join("http");
1851    if http_fixtures_path.exists() {
1852        total_count += count_fixtures_in_directory(&http_fixtures_path)?;
1853    }
1854
1855    // Count WebSocket fixtures
1856    let ws_fixtures_path = fixtures_path.join("websocket");
1857    if ws_fixtures_path.exists() {
1858        total_count += count_fixtures_in_directory(&ws_fixtures_path)?;
1859    }
1860
1861    // Count gRPC fixtures
1862    let grpc_fixtures_path = fixtures_path.join("grpc");
1863    if grpc_fixtures_path.exists() {
1864        total_count += count_fixtures_in_directory(&grpc_fixtures_path)?;
1865    }
1866
1867    Ok(total_count)
1868}
1869
1870/// Helper function to count JSON files in a directory recursively (blocking version)
1871fn count_fixtures_in_directory(dir_path: &std::path::Path) -> Result<usize> {
1872    let mut count = 0;
1873
1874    if let Ok(entries) = std::fs::read_dir(dir_path) {
1875        for entry in entries {
1876            let entry = entry
1877                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1878            let path = entry.path();
1879
1880            if path.is_dir() {
1881                // Recursively count fixtures in subdirectories
1882                count += count_fixtures_in_directory(&path)?;
1883            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1884                // Count JSON files as fixtures
1885                count += 1;
1886            }
1887        }
1888    }
1889
1890    Ok(count)
1891}
1892
1893/// Check if a specific route has fixtures
1894pub fn route_has_fixtures(method: &str, path: &str) -> bool {
1895    // Get the fixtures directory from environment or use default
1896    let fixtures_dir =
1897        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1898    let fixtures_path = std::path::Path::new(&fixtures_dir);
1899
1900    if !fixtures_path.exists() {
1901        return false;
1902    }
1903
1904    // Check HTTP fixtures
1905    let method_lower = method.to_lowercase();
1906    let path_hash = path.replace(['/', ':'], "_");
1907    let http_fixtures_path = fixtures_path.join("http").join(&method_lower).join(&path_hash);
1908
1909    if http_fixtures_path.exists() {
1910        // Check if there are any JSON files in this directory
1911        if let Ok(entries) = std::fs::read_dir(&http_fixtures_path) {
1912            for entry in entries.flatten() {
1913                if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1914                    return true;
1915                }
1916            }
1917        }
1918    }
1919
1920    // Check WebSocket fixtures for WS method
1921    if method.to_uppercase() == "WS" {
1922        let ws_fixtures_path = fixtures_path.join("websocket").join(&path_hash);
1923
1924        if ws_fixtures_path.exists() {
1925            if let Ok(entries) = std::fs::read_dir(&ws_fixtures_path) {
1926                for entry in entries.flatten() {
1927                    if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1928                        return true;
1929                    }
1930                }
1931            }
1932        }
1933    }
1934
1935    false
1936}
1937
1938/// Calculate average latency for a specific endpoint
1939fn calculate_endpoint_latency(metrics: &RequestMetrics, endpoint: &str) -> Option<u64> {
1940    metrics.response_times_by_endpoint.get(endpoint).and_then(|times| {
1941        if times.is_empty() {
1942            None
1943        } else {
1944            let sum: u64 = times.iter().sum();
1945            Some(sum / times.len() as u64)
1946        }
1947    })
1948}
1949
1950/// Get the last request timestamp for a specific endpoint
1951fn get_endpoint_last_request(
1952    metrics: &RequestMetrics,
1953    endpoint: &str,
1954) -> Option<chrono::DateTime<Utc>> {
1955    metrics.last_request_by_endpoint.get(endpoint).copied()
1956}
1957
1958/// Count total requests for a specific server type
1959fn count_requests_by_server_type(metrics: &RequestMetrics, server_type: &str) -> u64 {
1960    match server_type {
1961        "HTTP" => {
1962            // Count all HTTP requests (GET, POST, PUT, DELETE, etc.)
1963            metrics
1964                .requests_by_endpoint
1965                .iter()
1966                .filter(|(endpoint, _)| {
1967                    let method = endpoint.split(' ').next().unwrap_or("");
1968                    matches!(
1969                        method,
1970                        "GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "HEAD" | "OPTIONS"
1971                    )
1972                })
1973                .map(|(_, count)| count)
1974                .sum()
1975        }
1976        "WebSocket" => {
1977            // Count WebSocket requests (WS method)
1978            metrics
1979                .requests_by_endpoint
1980                .iter()
1981                .filter(|(endpoint, _)| {
1982                    let method = endpoint.split(' ').next().unwrap_or("");
1983                    method == "WS"
1984                })
1985                .map(|(_, count)| count)
1986                .sum()
1987        }
1988        "gRPC" => {
1989            // Count gRPC requests (gRPC method)
1990            metrics
1991                .requests_by_endpoint
1992                .iter()
1993                .filter(|(endpoint, _)| {
1994                    let method = endpoint.split(' ').next().unwrap_or("");
1995                    method == "gRPC"
1996                })
1997                .map(|(_, count)| count)
1998                .sum()
1999        }
2000        _ => 0,
2001    }
2002}
2003
2004/// Get fixtures/replay data
2005pub async fn get_fixtures() -> Json<ApiResponse<Vec<FixtureInfo>>> {
2006    match scan_fixtures_directory() {
2007        Ok(fixtures) => Json(ApiResponse::success(fixtures)),
2008        Err(e) => {
2009            tracing::error!("Failed to scan fixtures directory: {}", e);
2010            Json(ApiResponse::error(format!("Failed to load fixtures: {}", e)))
2011        }
2012    }
2013}
2014
2015/// Scan the fixtures directory and return all fixture information
2016fn scan_fixtures_directory() -> Result<Vec<FixtureInfo>> {
2017    let fixtures_dir =
2018        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2019    let fixtures_path = std::path::Path::new(&fixtures_dir);
2020
2021    if !fixtures_path.exists() {
2022        tracing::warn!("Fixtures directory does not exist: {}", fixtures_dir);
2023        return Ok(Vec::new());
2024    }
2025
2026    let mut all_fixtures = Vec::new();
2027
2028    // Scan HTTP fixtures
2029    let http_fixtures = scan_protocol_fixtures(fixtures_path, "http")?;
2030    all_fixtures.extend(http_fixtures);
2031
2032    // Scan WebSocket fixtures
2033    let ws_fixtures = scan_protocol_fixtures(fixtures_path, "websocket")?;
2034    all_fixtures.extend(ws_fixtures);
2035
2036    // Scan gRPC fixtures
2037    let grpc_fixtures = scan_protocol_fixtures(fixtures_path, "grpc")?;
2038    all_fixtures.extend(grpc_fixtures);
2039
2040    // Sort by saved_at timestamp (newest first)
2041    all_fixtures.sort_by(|a, b| b.saved_at.cmp(&a.saved_at));
2042
2043    tracing::info!("Found {} fixtures in directory: {}", all_fixtures.len(), fixtures_dir);
2044    Ok(all_fixtures)
2045}
2046
2047/// Scan fixtures for a specific protocol
2048fn scan_protocol_fixtures(
2049    fixtures_path: &std::path::Path,
2050    protocol: &str,
2051) -> Result<Vec<FixtureInfo>> {
2052    let protocol_path = fixtures_path.join(protocol);
2053    let mut fixtures = Vec::new();
2054
2055    if !protocol_path.exists() {
2056        return Ok(fixtures);
2057    }
2058
2059    // Walk through the protocol directory recursively
2060    if let Ok(entries) = std::fs::read_dir(&protocol_path) {
2061        for entry in entries {
2062            let entry = entry
2063                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2064            let path = entry.path();
2065
2066            if path.is_dir() {
2067                // Recursively scan subdirectories
2068                let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2069                fixtures.extend(sub_fixtures);
2070            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2071                // Process individual JSON fixture file
2072                if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2073                    fixtures.push(fixture);
2074                }
2075            }
2076        }
2077    }
2078
2079    Ok(fixtures)
2080}
2081
2082/// Recursively scan a directory for fixture files
2083fn scan_directory_recursive(
2084    dir_path: &std::path::Path,
2085    protocol: &str,
2086) -> Result<Vec<FixtureInfo>> {
2087    let mut fixtures = Vec::new();
2088
2089    if let Ok(entries) = std::fs::read_dir(dir_path) {
2090        for entry in entries {
2091            let entry = entry
2092                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2093            let path = entry.path();
2094
2095            if path.is_dir() {
2096                // Recursively scan subdirectories
2097                let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2098                fixtures.extend(sub_fixtures);
2099            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2100                // Process individual JSON fixture file
2101                if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2102                    fixtures.push(fixture);
2103                }
2104            }
2105        }
2106    }
2107
2108    Ok(fixtures)
2109}
2110
2111/// Parse a single fixture file and extract metadata (synchronous version)
2112fn parse_fixture_file_sync(file_path: &std::path::Path, protocol: &str) -> Result<FixtureInfo> {
2113    // Get file metadata
2114    let metadata = std::fs::metadata(file_path)
2115        .map_err(|e| Error::generic(format!("Failed to read file metadata: {}", e)))?;
2116
2117    let file_size = metadata.len();
2118    let modified_time = metadata
2119        .modified()
2120        .map_err(|e| Error::generic(format!("Failed to get file modification time: {}", e)))?;
2121
2122    let saved_at = chrono::DateTime::from(modified_time);
2123
2124    // Read and parse the fixture file (blocking - called from spawn_blocking context)
2125    let content = std::fs::read_to_string(file_path)
2126        .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2127
2128    let fixture_data: serde_json::Value = serde_json::from_str(&content)
2129        .map_err(|e| Error::generic(format!("Failed to parse fixture JSON: {}", e)))?;
2130
2131    // Extract method and path from the fixture data
2132    let (method, path) = extract_method_and_path(&fixture_data, protocol)?;
2133
2134    // Generate a unique ID based on file path and content
2135    let id = generate_fixture_id(file_path, &content);
2136
2137    // Extract fingerprint from file path or fixture data
2138    let fingerprint = extract_fingerprint(file_path, &fixture_data)?;
2139
2140    // Get relative file path
2141    let fixtures_dir =
2142        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2143    let fixtures_path = std::path::Path::new(&fixtures_dir);
2144    let file_path_str = file_path
2145        .strip_prefix(fixtures_path)
2146        .unwrap_or(file_path)
2147        .to_string_lossy()
2148        .to_string();
2149
2150    Ok(FixtureInfo {
2151        id,
2152        protocol: protocol.to_string(),
2153        method,
2154        path,
2155        saved_at,
2156        file_size,
2157        file_path: file_path_str,
2158        fingerprint,
2159        metadata: fixture_data,
2160    })
2161}
2162
2163/// Extract method and path from fixture data
2164fn extract_method_and_path(
2165    fixture_data: &serde_json::Value,
2166    protocol: &str,
2167) -> Result<(String, String)> {
2168    match protocol {
2169        "http" => {
2170            // For HTTP fixtures, look for request.method and request.path
2171            let method = fixture_data
2172                .get("request")
2173                .and_then(|req| req.get("method"))
2174                .and_then(|m| m.as_str())
2175                .unwrap_or("UNKNOWN")
2176                .to_uppercase();
2177
2178            let path = fixture_data
2179                .get("request")
2180                .and_then(|req| req.get("path"))
2181                .and_then(|p| p.as_str())
2182                .unwrap_or("/unknown")
2183                .to_string();
2184
2185            Ok((method, path))
2186        }
2187        "websocket" => {
2188            // For WebSocket fixtures, use WS method and extract path from metadata
2189            let path = fixture_data
2190                .get("path")
2191                .and_then(|p| p.as_str())
2192                .or_else(|| {
2193                    fixture_data
2194                        .get("request")
2195                        .and_then(|req| req.get("path"))
2196                        .and_then(|p| p.as_str())
2197                })
2198                .unwrap_or("/ws")
2199                .to_string();
2200
2201            Ok(("WS".to_string(), path))
2202        }
2203        "grpc" => {
2204            // For gRPC fixtures, extract service and method
2205            let service =
2206                fixture_data.get("service").and_then(|s| s.as_str()).unwrap_or("UnknownService");
2207
2208            let method =
2209                fixture_data.get("method").and_then(|m| m.as_str()).unwrap_or("UnknownMethod");
2210
2211            let path = format!("/{}/{}", service, method);
2212            Ok(("gRPC".to_string(), path))
2213        }
2214        _ => {
2215            let path = fixture_data
2216                .get("path")
2217                .and_then(|p| p.as_str())
2218                .unwrap_or("/unknown")
2219                .to_string();
2220            Ok((protocol.to_uppercase(), path))
2221        }
2222    }
2223}
2224
2225/// Generate a unique fixture ID
2226fn generate_fixture_id(file_path: &std::path::Path, content: &str) -> String {
2227    use std::collections::hash_map::DefaultHasher;
2228    use std::hash::{Hash, Hasher};
2229
2230    let mut hasher = DefaultHasher::new();
2231    file_path.hash(&mut hasher);
2232    content.hash(&mut hasher);
2233    format!("fixture_{:x}", hasher.finish())
2234}
2235
2236/// Extract fingerprint from file path or fixture data
2237fn extract_fingerprint(
2238    file_path: &std::path::Path,
2239    fixture_data: &serde_json::Value,
2240) -> Result<String> {
2241    // Try to extract from fixture data first
2242    if let Some(fingerprint) = fixture_data.get("fingerprint").and_then(|f| f.as_str()) {
2243        return Ok(fingerprint.to_string());
2244    }
2245
2246    // Try to extract from file path (common pattern: method_path_hash.json)
2247    if let Some(file_name) = file_path.file_stem().and_then(|s| s.to_str()) {
2248        // Look for hash pattern at the end of filename
2249        if let Some(hash) = file_name.split('_').next_back() {
2250            if hash.len() >= 8 && hash.chars().all(|c| c.is_alphanumeric()) {
2251                return Ok(hash.to_string());
2252            }
2253        }
2254    }
2255
2256    // Fallback: generate from file path
2257    use std::collections::hash_map::DefaultHasher;
2258    use std::hash::{Hash, Hasher};
2259
2260    let mut hasher = DefaultHasher::new();
2261    file_path.hash(&mut hasher);
2262    Ok(format!("{:x}", hasher.finish()))
2263}
2264
2265/// Delete a fixture
2266pub async fn delete_fixture(
2267    Json(payload): Json<FixtureDeleteRequest>,
2268) -> Json<ApiResponse<String>> {
2269    match delete_fixture_by_id(&payload.fixture_id).await {
2270        Ok(_) => {
2271            tracing::info!("Successfully deleted fixture: {}", payload.fixture_id);
2272            Json(ApiResponse::success("Fixture deleted successfully".to_string()))
2273        }
2274        Err(e) => {
2275            tracing::error!("Failed to delete fixture {}: {}", payload.fixture_id, e);
2276            Json(ApiResponse::error(format!("Failed to delete fixture: {}", e)))
2277        }
2278    }
2279}
2280
2281/// Delete multiple fixtures
2282pub async fn delete_fixtures_bulk(
2283    Json(payload): Json<FixtureBulkDeleteRequest>,
2284) -> Json<ApiResponse<FixtureBulkDeleteResult>> {
2285    let mut deleted_count = 0;
2286    let mut errors = Vec::new();
2287
2288    for fixture_id in &payload.fixture_ids {
2289        match delete_fixture_by_id(fixture_id).await {
2290            Ok(_) => {
2291                deleted_count += 1;
2292                tracing::info!("Successfully deleted fixture: {}", fixture_id);
2293            }
2294            Err(e) => {
2295                errors.push(format!("Failed to delete {}: {}", fixture_id, e));
2296                tracing::error!("Failed to delete fixture {}: {}", fixture_id, e);
2297            }
2298        }
2299    }
2300
2301    let result = FixtureBulkDeleteResult {
2302        deleted_count,
2303        total_requested: payload.fixture_ids.len(),
2304        errors: errors.clone(),
2305    };
2306
2307    if errors.is_empty() {
2308        Json(ApiResponse::success(result))
2309    } else {
2310        Json(ApiResponse::error(format!(
2311            "Partial success: {} deleted, {} errors",
2312            deleted_count,
2313            errors.len()
2314        )))
2315    }
2316}
2317
2318/// Delete a single fixture by ID
2319async fn delete_fixture_by_id(fixture_id: &str) -> Result<()> {
2320    // First, try to find the fixture by scanning the fixtures directory
2321    // This is more robust than trying to parse the ID format
2322    let fixtures_dir =
2323        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2324    let fixtures_path = std::path::Path::new(&fixtures_dir);
2325
2326    if !fixtures_path.exists() {
2327        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2328    }
2329
2330    // Search for the fixture file by ID across all protocols
2331    let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2332
2333    // Delete the file using spawn_blocking
2334    let file_path_clone = file_path.clone();
2335    tokio::task::spawn_blocking(move || {
2336        if file_path_clone.exists() {
2337            std::fs::remove_file(&file_path_clone).map_err(|e| {
2338                Error::generic(format!(
2339                    "Failed to delete fixture file {}: {}",
2340                    file_path_clone.display(),
2341                    e
2342                ))
2343            })
2344        } else {
2345            Err(Error::generic(format!("Fixture file not found: {}", file_path_clone.display())))
2346        }
2347    })
2348    .await
2349    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2350
2351    tracing::info!("Deleted fixture file: {}", file_path.display());
2352
2353    // Also try to remove empty parent directories
2354    cleanup_empty_directories(&file_path).await;
2355
2356    Ok(())
2357}
2358
2359/// Find a fixture file by its ID across all protocols
2360fn find_fixture_file_by_id(
2361    fixtures_path: &std::path::Path,
2362    fixture_id: &str,
2363) -> Result<std::path::PathBuf> {
2364    // Search in all protocol directories
2365    let protocols = ["http", "websocket", "grpc"];
2366
2367    for protocol in &protocols {
2368        let protocol_path = fixtures_path.join(protocol);
2369        if let Ok(found_path) = search_fixture_in_directory(&protocol_path, fixture_id) {
2370            return Ok(found_path);
2371        }
2372    }
2373
2374    Err(Error::generic(format!(
2375        "Fixture with ID '{}' not found in any protocol directory",
2376        fixture_id
2377    )))
2378}
2379
2380/// Recursively search for a fixture file by ID in a directory
2381fn search_fixture_in_directory(
2382    dir_path: &std::path::Path,
2383    fixture_id: &str,
2384) -> Result<std::path::PathBuf> {
2385    if !dir_path.exists() {
2386        return Err(Error::generic(format!("Directory does not exist: {}", dir_path.display())));
2387    }
2388
2389    if let Ok(entries) = std::fs::read_dir(dir_path) {
2390        for entry in entries {
2391            let entry = entry
2392                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2393            let path = entry.path();
2394
2395            if path.is_dir() {
2396                // Recursively search subdirectories
2397                if let Ok(found_path) = search_fixture_in_directory(&path, fixture_id) {
2398                    return Ok(found_path);
2399                }
2400            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2401                // Check if this file matches the fixture ID
2402                if let Ok(fixture_info) = parse_fixture_file_sync(&path, "unknown") {
2403                    if fixture_info.id == fixture_id {
2404                        return Ok(path);
2405                    }
2406                }
2407            }
2408        }
2409    }
2410
2411    Err(Error::generic(format!(
2412        "Fixture not found in directory: {}",
2413        dir_path.display()
2414    )))
2415}
2416
2417/// Clean up empty directories after file deletion
2418async fn cleanup_empty_directories(file_path: &std::path::Path) {
2419    let file_path = file_path.to_path_buf();
2420
2421    // Use spawn_blocking for directory operations
2422    let _ = tokio::task::spawn_blocking(move || {
2423        if let Some(parent) = file_path.parent() {
2424            // Try to remove empty directories up to the protocol level
2425            let mut current = parent;
2426            let fixtures_dir =
2427                std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2428            let fixtures_path = std::path::Path::new(&fixtures_dir);
2429
2430            while current != fixtures_path && current.parent().is_some() {
2431                if let Ok(entries) = std::fs::read_dir(current) {
2432                    if entries.count() == 0 {
2433                        if let Err(e) = std::fs::remove_dir(current) {
2434                            tracing::debug!(
2435                                "Failed to remove empty directory {}: {}",
2436                                current.display(),
2437                                e
2438                            );
2439                            break;
2440                        } else {
2441                            tracing::debug!("Removed empty directory: {}", current.display());
2442                        }
2443                    } else {
2444                        break;
2445                    }
2446                } else {
2447                    break;
2448                }
2449
2450                if let Some(next_parent) = current.parent() {
2451                    current = next_parent;
2452                } else {
2453                    break;
2454                }
2455            }
2456        }
2457    })
2458    .await;
2459}
2460
2461/// Download a fixture file
2462pub async fn download_fixture(Path(fixture_id): Path<String>) -> impl IntoResponse {
2463    // Find and read the fixture file
2464    match download_fixture_by_id(&fixture_id).await {
2465        Ok((content, file_name)) => (
2466            StatusCode::OK,
2467            [
2468                (http::header::CONTENT_TYPE, "application/json".to_string()),
2469                (
2470                    http::header::CONTENT_DISPOSITION,
2471                    format!("attachment; filename=\"{}\"", file_name),
2472                ),
2473            ],
2474            content,
2475        )
2476            .into_response(),
2477        Err(e) => {
2478            tracing::error!("Failed to download fixture {}: {}", fixture_id, e);
2479            let error_response = format!(r#"{{"error": "Failed to download fixture: {}"}}"#, e);
2480            (
2481                StatusCode::NOT_FOUND,
2482                [(http::header::CONTENT_TYPE, "application/json".to_string())],
2483                error_response,
2484            )
2485                .into_response()
2486        }
2487    }
2488}
2489
2490/// Download a fixture file by ID
2491async fn download_fixture_by_id(fixture_id: &str) -> Result<(String, String)> {
2492    // Find the fixture file by ID
2493    let fixtures_dir =
2494        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2495    let fixtures_path = std::path::Path::new(&fixtures_dir);
2496
2497    if !fixtures_path.exists() {
2498        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2499    }
2500
2501    let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2502
2503    // Read the file content using spawn_blocking
2504    let file_path_clone = file_path.clone();
2505    let (content, file_name) = tokio::task::spawn_blocking(move || {
2506        let content = std::fs::read_to_string(&file_path_clone)
2507            .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2508
2509        let file_name = file_path_clone
2510            .file_name()
2511            .and_then(|name| name.to_str())
2512            .unwrap_or("fixture.json")
2513            .to_string();
2514
2515        Ok::<_, Error>((content, file_name))
2516    })
2517    .await
2518    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2519
2520    tracing::info!("Downloaded fixture file: {} ({} bytes)", file_path.display(), content.len());
2521    Ok((content, file_name))
2522}
2523
2524/// Rename a fixture
2525pub async fn rename_fixture(
2526    Path(fixture_id): Path<String>,
2527    Json(payload): Json<FixtureRenameRequest>,
2528) -> Json<ApiResponse<String>> {
2529    match rename_fixture_by_id(&fixture_id, &payload.new_name).await {
2530        Ok(new_path) => {
2531            tracing::info!("Successfully renamed fixture: {} -> {}", fixture_id, payload.new_name);
2532            Json(ApiResponse::success(format!("Fixture renamed successfully to: {}", new_path)))
2533        }
2534        Err(e) => {
2535            tracing::error!("Failed to rename fixture {}: {}", fixture_id, e);
2536            Json(ApiResponse::error(format!("Failed to rename fixture: {}", e)))
2537        }
2538    }
2539}
2540
2541/// Rename a fixture by ID
2542async fn rename_fixture_by_id(fixture_id: &str, new_name: &str) -> Result<String> {
2543    // Validate new name
2544    if new_name.is_empty() {
2545        return Err(Error::generic("New name cannot be empty".to_string()));
2546    }
2547
2548    // Ensure new name ends with .json
2549    let new_name = if new_name.ends_with(".json") {
2550        new_name.to_string()
2551    } else {
2552        format!("{}.json", new_name)
2553    };
2554
2555    // Find the fixture file
2556    let fixtures_dir =
2557        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2558    let fixtures_path = std::path::Path::new(&fixtures_dir);
2559
2560    if !fixtures_path.exists() {
2561        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2562    }
2563
2564    let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2565
2566    // Get the parent directory and construct new path
2567    let parent = old_path
2568        .parent()
2569        .ok_or_else(|| Error::generic("Could not determine parent directory".to_string()))?;
2570
2571    let new_path = parent.join(&new_name);
2572
2573    // Check if target already exists
2574    if new_path.exists() {
2575        return Err(Error::generic(format!(
2576            "A fixture with name '{}' already exists in the same directory",
2577            new_name
2578        )));
2579    }
2580
2581    // Rename the file using spawn_blocking
2582    let old_path_clone = old_path.clone();
2583    let new_path_clone = new_path.clone();
2584    tokio::task::spawn_blocking(move || {
2585        std::fs::rename(&old_path_clone, &new_path_clone)
2586            .map_err(|e| Error::generic(format!("Failed to rename fixture file: {}", e)))
2587    })
2588    .await
2589    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2590
2591    tracing::info!("Renamed fixture file: {} -> {}", old_path.display(), new_path.display());
2592
2593    // Return relative path for display
2594    Ok(new_path
2595        .strip_prefix(fixtures_path)
2596        .unwrap_or(&new_path)
2597        .to_string_lossy()
2598        .to_string())
2599}
2600
2601/// Move a fixture to a new path
2602pub async fn move_fixture(
2603    Path(fixture_id): Path<String>,
2604    Json(payload): Json<FixtureMoveRequest>,
2605) -> Json<ApiResponse<String>> {
2606    match move_fixture_by_id(&fixture_id, &payload.new_path).await {
2607        Ok(new_location) => {
2608            tracing::info!("Successfully moved fixture: {} -> {}", fixture_id, payload.new_path);
2609            Json(ApiResponse::success(format!("Fixture moved successfully to: {}", new_location)))
2610        }
2611        Err(e) => {
2612            tracing::error!("Failed to move fixture {}: {}", fixture_id, e);
2613            Json(ApiResponse::error(format!("Failed to move fixture: {}", e)))
2614        }
2615    }
2616}
2617
2618/// Move a fixture by ID to a new path
2619async fn move_fixture_by_id(fixture_id: &str, new_path: &str) -> Result<String> {
2620    // Validate new path
2621    if new_path.is_empty() {
2622        return Err(Error::generic("New path cannot be empty".to_string()));
2623    }
2624
2625    // Find the fixture file
2626    let fixtures_dir =
2627        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2628    let fixtures_path = std::path::Path::new(&fixtures_dir);
2629
2630    if !fixtures_path.exists() {
2631        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2632    }
2633
2634    let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2635
2636    // Construct the new path - can be either relative to fixtures_dir or absolute within it
2637    let new_full_path = if new_path.starts_with('/') {
2638        // Absolute path within fixtures directory
2639        fixtures_path.join(new_path.trim_start_matches('/'))
2640    } else {
2641        // Relative path from fixtures directory
2642        fixtures_path.join(new_path)
2643    };
2644
2645    // Ensure target ends with .json if it doesn't already
2646    let new_full_path = if new_full_path.extension().and_then(|s| s.to_str()) == Some("json") {
2647        new_full_path
2648    } else {
2649        // If the path is a directory or doesn't have .json extension, append the original filename
2650        if new_full_path.is_dir() || !new_path.contains('.') {
2651            let file_name = old_path.file_name().ok_or_else(|| {
2652                Error::generic("Could not determine original file name".to_string())
2653            })?;
2654            new_full_path.join(file_name)
2655        } else {
2656            new_full_path.with_extension("json")
2657        }
2658    };
2659
2660    // Check if target already exists
2661    if new_full_path.exists() {
2662        return Err(Error::generic(format!(
2663            "A fixture already exists at path: {}",
2664            new_full_path.display()
2665        )));
2666    }
2667
2668    // Create parent directories and move file using spawn_blocking
2669    let old_path_clone = old_path.clone();
2670    let new_full_path_clone = new_full_path.clone();
2671    tokio::task::spawn_blocking(move || {
2672        // Create parent directories if they don't exist
2673        if let Some(parent) = new_full_path_clone.parent() {
2674            std::fs::create_dir_all(parent)
2675                .map_err(|e| Error::generic(format!("Failed to create target directory: {}", e)))?;
2676        }
2677
2678        // Move the file
2679        std::fs::rename(&old_path_clone, &new_full_path_clone)
2680            .map_err(|e| Error::generic(format!("Failed to move fixture file: {}", e)))
2681    })
2682    .await
2683    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2684
2685    tracing::info!("Moved fixture file: {} -> {}", old_path.display(), new_full_path.display());
2686
2687    // Clean up empty directories from the old location
2688    cleanup_empty_directories(&old_path).await;
2689
2690    // Return relative path for display
2691    Ok(new_full_path
2692        .strip_prefix(fixtures_path)
2693        .unwrap_or(&new_full_path)
2694        .to_string_lossy()
2695        .to_string())
2696}
2697
2698/// Get current validation settings
2699pub async fn get_validation(
2700    State(state): State<AdminState>,
2701) -> Json<ApiResponse<ValidationSettings>> {
2702    // Get real validation settings from configuration
2703    let config_state = state.get_config().await;
2704
2705    Json(ApiResponse::success(config_state.validation_settings))
2706}
2707
2708/// Update validation settings
2709pub async fn update_validation(
2710    State(state): State<AdminState>,
2711    Json(update): Json<ValidationUpdate>,
2712) -> Json<ApiResponse<String>> {
2713    // Validate the mode
2714    match update.mode.as_str() {
2715        "enforce" | "warn" | "off" => {}
2716        _ => {
2717            return Json(ApiResponse::error(
2718                "Invalid validation mode. Must be 'enforce', 'warn', or 'off'".to_string(),
2719            ))
2720        }
2721    }
2722
2723    // Update the actual validation configuration
2724    let mode = update.mode.clone();
2725    state
2726        .update_validation_config(
2727            update.mode,
2728            update.aggregate_errors,
2729            update.validate_responses,
2730            update.overrides.unwrap_or_default(),
2731        )
2732        .await;
2733
2734    tracing::info!(
2735        "Updated validation settings: mode={}, aggregate_errors={}",
2736        mode,
2737        update.aggregate_errors
2738    );
2739
2740    Json(ApiResponse::success("Validation settings updated".to_string()))
2741}
2742
2743/// Get environment variables
2744pub async fn get_env_vars() -> Json<ApiResponse<HashMap<String, String>>> {
2745    // Get actual environment variables that are relevant to MockForge
2746    let mut env_vars = HashMap::new();
2747
2748    let relevant_vars = [
2749        // Core functionality
2750        "MOCKFORGE_LATENCY_ENABLED",
2751        "MOCKFORGE_FAILURES_ENABLED",
2752        "MOCKFORGE_PROXY_ENABLED",
2753        "MOCKFORGE_RECORD_ENABLED",
2754        "MOCKFORGE_REPLAY_ENABLED",
2755        "MOCKFORGE_LOG_LEVEL",
2756        "MOCKFORGE_CONFIG_FILE",
2757        "RUST_LOG",
2758        // HTTP server configuration
2759        "MOCKFORGE_HTTP_PORT",
2760        "MOCKFORGE_HTTP_HOST",
2761        "MOCKFORGE_HTTP_OPENAPI_SPEC",
2762        "MOCKFORGE_CORS_ENABLED",
2763        "MOCKFORGE_REQUEST_TIMEOUT_SECS",
2764        // WebSocket server configuration
2765        "MOCKFORGE_WS_PORT",
2766        "MOCKFORGE_WS_HOST",
2767        "MOCKFORGE_WS_REPLAY_FILE",
2768        "MOCKFORGE_WS_CONNECTION_TIMEOUT_SECS",
2769        // gRPC server configuration
2770        "MOCKFORGE_GRPC_PORT",
2771        "MOCKFORGE_GRPC_HOST",
2772        // Admin UI configuration
2773        "MOCKFORGE_ADMIN_ENABLED",
2774        "MOCKFORGE_ADMIN_PORT",
2775        "MOCKFORGE_ADMIN_HOST",
2776        "MOCKFORGE_ADMIN_MOUNT_PATH",
2777        "MOCKFORGE_ADMIN_API_ENABLED",
2778        // Template and validation
2779        "MOCKFORGE_RESPONSE_TEMPLATE_EXPAND",
2780        "MOCKFORGE_REQUEST_VALIDATION",
2781        "MOCKFORGE_AGGREGATE_ERRORS",
2782        "MOCKFORGE_RESPONSE_VALIDATION",
2783        "MOCKFORGE_VALIDATION_STATUS",
2784        // Data generation
2785        "MOCKFORGE_RAG_ENABLED",
2786        "MOCKFORGE_FAKE_TOKENS",
2787        // Other settings
2788        "MOCKFORGE_FIXTURES_DIR",
2789    ];
2790
2791    for var_name in &relevant_vars {
2792        if let Ok(value) = std::env::var(var_name) {
2793            env_vars.insert(var_name.to_string(), value);
2794        }
2795    }
2796
2797    Json(ApiResponse::success(env_vars))
2798}
2799
2800/// Update environment variable
2801pub async fn update_env_var(Json(update): Json<EnvVarUpdate>) -> Json<ApiResponse<String>> {
2802    // Set the environment variable (runtime only - not persisted)
2803    std::env::set_var(&update.key, &update.value);
2804
2805    tracing::info!("Updated environment variable: {}={}", update.key, update.value);
2806
2807    // Note: Environment variables set at runtime are not persisted
2808    // In a production system, you might want to write to a .env file or config file
2809    Json(ApiResponse::success(format!(
2810        "Environment variable {} updated to '{}'. Note: This change is not persisted and will be lost on restart.",
2811        update.key, update.value
2812    )))
2813}
2814
2815/// Get file content
2816pub async fn get_file_content(
2817    Json(request): Json<FileContentRequest>,
2818) -> Json<ApiResponse<String>> {
2819    // Validate the file path for security
2820    if let Err(e) = validate_file_path(&request.file_path) {
2821        return Json(ApiResponse::error(format!("Invalid file path: {}", e)));
2822    }
2823
2824    // Read the actual file content
2825    match tokio::fs::read_to_string(&request.file_path).await {
2826        Ok(content) => {
2827            // Validate the file content for security
2828            if let Err(e) = validate_file_content(&content) {
2829                return Json(ApiResponse::error(format!("Invalid file content: {}", e)));
2830            }
2831            Json(ApiResponse::success(content))
2832        }
2833        Err(e) => Json(ApiResponse::error(format!("Failed to read file: {}", e))),
2834    }
2835}
2836
2837/// Save file content
2838pub async fn save_file_content(Json(request): Json<FileSaveRequest>) -> Json<ApiResponse<String>> {
2839    match save_file_to_filesystem(&request.file_path, &request.content).await {
2840        Ok(_) => {
2841            tracing::info!("Successfully saved file: {}", request.file_path);
2842            Json(ApiResponse::success("File saved successfully".to_string()))
2843        }
2844        Err(e) => {
2845            tracing::error!("Failed to save file {}: {}", request.file_path, e);
2846            Json(ApiResponse::error(format!("Failed to save file: {}", e)))
2847        }
2848    }
2849}
2850
2851/// Save content to a file on the filesystem
2852async fn save_file_to_filesystem(file_path: &str, content: &str) -> Result<()> {
2853    // Validate the file path for security
2854    validate_file_path(file_path)?;
2855
2856    // Validate the file content for security
2857    validate_file_content(content)?;
2858
2859    // Convert to PathBuf and clone data for spawn_blocking
2860    let path = std::path::PathBuf::from(file_path);
2861    let content = content.to_string();
2862
2863    // Perform file operations using spawn_blocking
2864    let path_clone = path.clone();
2865    let content_clone = content.clone();
2866    tokio::task::spawn_blocking(move || {
2867        // Create parent directories if they don't exist
2868        if let Some(parent) = path_clone.parent() {
2869            std::fs::create_dir_all(parent).map_err(|e| {
2870                Error::generic(format!("Failed to create directory {}: {}", parent.display(), e))
2871            })?;
2872        }
2873
2874        // Write the content to the file
2875        std::fs::write(&path_clone, &content_clone).map_err(|e| {
2876            Error::generic(format!("Failed to write file {}: {}", path_clone.display(), e))
2877        })?;
2878
2879        // Verify the file was written correctly
2880        let written_content = std::fs::read_to_string(&path_clone).map_err(|e| {
2881            Error::generic(format!("Failed to verify written file {}: {}", path_clone.display(), e))
2882        })?;
2883
2884        if written_content != content_clone {
2885            return Err(Error::generic(format!(
2886                "File content verification failed for {}",
2887                path_clone.display()
2888            )));
2889        }
2890
2891        Ok::<_, Error>(())
2892    })
2893    .await
2894    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2895
2896    tracing::info!("File saved successfully: {} ({} bytes)", path.display(), content.len());
2897    Ok(())
2898}
2899
2900/// Validate file path for security
2901fn validate_file_path(file_path: &str) -> Result<()> {
2902    // Check for path traversal attacks
2903    if file_path.contains("..") {
2904        return Err(Error::generic("Path traversal detected in file path".to_string()));
2905    }
2906
2907    // Check for absolute paths that might be outside allowed directories
2908    let path = std::path::Path::new(file_path);
2909    if path.is_absolute() {
2910        // For absolute paths, ensure they're within allowed directories
2911        let allowed_dirs = [
2912            std::env::current_dir().unwrap_or_default(),
2913            std::path::PathBuf::from("."),
2914            std::path::PathBuf::from("fixtures"),
2915            std::path::PathBuf::from("config"),
2916        ];
2917
2918        let mut is_allowed = false;
2919        for allowed_dir in &allowed_dirs {
2920            if path.starts_with(allowed_dir) {
2921                is_allowed = true;
2922                break;
2923            }
2924        }
2925
2926        if !is_allowed {
2927            return Err(Error::generic("File path is outside allowed directories".to_string()));
2928        }
2929    }
2930
2931    // Check for dangerous file extensions or names
2932    let dangerous_extensions = ["exe", "bat", "cmd", "sh", "ps1", "scr", "com"];
2933    if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
2934        if dangerous_extensions.contains(&extension.to_lowercase().as_str()) {
2935            return Err(Error::generic(format!(
2936                "Dangerous file extension not allowed: {}",
2937                extension
2938            )));
2939        }
2940    }
2941
2942    Ok(())
2943}
2944
2945/// Validate file content for security
2946fn validate_file_content(content: &str) -> Result<()> {
2947    // Check for reasonable file size (prevent DoS)
2948    if content.len() > 10 * 1024 * 1024 {
2949        // 10MB limit
2950        return Err(Error::generic("File content too large (max 10MB)".to_string()));
2951    }
2952
2953    // Check for null bytes (potential security issue)
2954    if content.contains('\0') {
2955        return Err(Error::generic("File content contains null bytes".to_string()));
2956    }
2957
2958    Ok(())
2959}
2960
2961/// Fixture delete request
2962#[derive(Debug, Clone, Serialize, Deserialize)]
2963pub struct FixtureDeleteRequest {
2964    pub fixture_id: String,
2965}
2966
2967/// Environment variable update
2968#[derive(Debug, Clone, Serialize, Deserialize)]
2969pub struct EnvVarUpdate {
2970    pub key: String,
2971    pub value: String,
2972}
2973
2974/// Fixture bulk delete request
2975#[derive(Debug, Clone, Serialize, Deserialize)]
2976pub struct FixtureBulkDeleteRequest {
2977    pub fixture_ids: Vec<String>,
2978}
2979
2980/// Fixture bulk delete result
2981#[derive(Debug, Clone, Serialize, Deserialize)]
2982pub struct FixtureBulkDeleteResult {
2983    pub deleted_count: usize,
2984    pub total_requested: usize,
2985    pub errors: Vec<String>,
2986}
2987
2988/// Fixture rename request
2989#[derive(Debug, Clone, Serialize, Deserialize)]
2990pub struct FixtureRenameRequest {
2991    pub new_name: String,
2992}
2993
2994/// Fixture move request
2995#[derive(Debug, Clone, Serialize, Deserialize)]
2996pub struct FixtureMoveRequest {
2997    pub new_path: String,
2998}
2999
3000/// File content request
3001#[derive(Debug, Clone, Serialize, Deserialize)]
3002pub struct FileContentRequest {
3003    pub file_path: String,
3004    pub file_type: String,
3005}
3006
3007/// File save request
3008#[derive(Debug, Clone, Serialize, Deserialize)]
3009pub struct FileSaveRequest {
3010    pub file_path: String,
3011    pub content: String,
3012}
3013
3014/// Get smoke tests
3015pub async fn get_smoke_tests(
3016    State(state): State<AdminState>,
3017) -> Json<ApiResponse<Vec<SmokeTestResult>>> {
3018    let results = state.get_smoke_test_results().await;
3019    Json(ApiResponse::success(results))
3020}
3021
3022/// Run smoke tests endpoint
3023pub async fn run_smoke_tests_endpoint(
3024    State(state): State<AdminState>,
3025) -> Json<ApiResponse<String>> {
3026    tracing::info!("Starting smoke test execution");
3027
3028    // Spawn smoke test execution in background to avoid blocking
3029    let state_clone = state.clone();
3030    tokio::spawn(async move {
3031        if let Err(e) = execute_smoke_tests(&state_clone).await {
3032            tracing::error!("Smoke test execution failed: {}", e);
3033        } else {
3034            tracing::info!("Smoke test execution completed successfully");
3035        }
3036    });
3037
3038    Json(ApiResponse::success(
3039        "Smoke tests started. Check results in the smoke tests section.".to_string(),
3040    ))
3041}
3042
3043/// Execute smoke tests against fixtures
3044async fn execute_smoke_tests(state: &AdminState) -> Result<()> {
3045    // Get base URL from environment or use default
3046    let base_url =
3047        std::env::var("MOCKFORGE_BASE_URL").unwrap_or_else(|_| "http://localhost:3000".to_string());
3048
3049    let context = SmokeTestContext {
3050        base_url,
3051        timeout_seconds: 30,
3052        parallel: true,
3053    };
3054
3055    // Get all fixtures to create smoke tests from
3056    let fixtures = scan_fixtures_directory()?;
3057
3058    // Filter for HTTP fixtures only (smoke tests are typically HTTP)
3059    let http_fixtures: Vec<&FixtureInfo> =
3060        fixtures.iter().filter(|f| f.protocol == "http").collect();
3061
3062    if http_fixtures.is_empty() {
3063        tracing::warn!("No HTTP fixtures found for smoke testing");
3064        return Ok(());
3065    }
3066
3067    tracing::info!("Running smoke tests for {} HTTP fixtures", http_fixtures.len());
3068
3069    // Create smoke test results from fixtures
3070    let mut test_results = Vec::new();
3071
3072    for fixture in http_fixtures {
3073        let test_result = create_smoke_test_from_fixture(fixture);
3074        test_results.push(test_result);
3075    }
3076
3077    // Execute tests
3078    let mut executed_results = Vec::new();
3079    for mut test_result in test_results {
3080        // Update status to running
3081        test_result.status = "running".to_string();
3082        state.update_smoke_test_result(test_result.clone()).await;
3083
3084        // Execute the test
3085        let start_time = std::time::Instant::now();
3086        match execute_single_smoke_test(&test_result, &context).await {
3087            Ok((status_code, response_time_ms)) => {
3088                test_result.status = "passed".to_string();
3089                test_result.status_code = Some(status_code);
3090                test_result.response_time_ms = Some(response_time_ms);
3091                test_result.error_message = None;
3092            }
3093            Err(e) => {
3094                test_result.status = "failed".to_string();
3095                test_result.error_message = Some(e.to_string());
3096                test_result.status_code = None;
3097                test_result.response_time_ms = None;
3098            }
3099        }
3100
3101        let duration = start_time.elapsed();
3102        test_result.duration_seconds = Some(duration.as_secs_f64());
3103        test_result.last_run = Some(Utc::now());
3104
3105        executed_results.push(test_result.clone());
3106        state.update_smoke_test_result(test_result).await;
3107    }
3108
3109    tracing::info!("Smoke test execution completed: {} tests run", executed_results.len());
3110    Ok(())
3111}
3112
3113/// Create a smoke test result from a fixture
3114fn create_smoke_test_from_fixture(fixture: &FixtureInfo) -> SmokeTestResult {
3115    let test_name = format!("{} {}", fixture.method, fixture.path);
3116    let description = format!("Smoke test for {} endpoint", fixture.path);
3117
3118    SmokeTestResult {
3119        id: format!("smoke_{}", fixture.id),
3120        name: test_name,
3121        method: fixture.method.clone(),
3122        path: fixture.path.clone(),
3123        description,
3124        last_run: None,
3125        status: "pending".to_string(),
3126        response_time_ms: None,
3127        error_message: None,
3128        status_code: None,
3129        duration_seconds: None,
3130    }
3131}
3132
3133/// Execute a single smoke test
3134async fn execute_single_smoke_test(
3135    test: &SmokeTestResult,
3136    context: &SmokeTestContext,
3137) -> Result<(u16, u64)> {
3138    let url = format!("{}{}", context.base_url, test.path);
3139    let client = reqwest::Client::builder()
3140        .timeout(Duration::from_secs(context.timeout_seconds))
3141        .build()
3142        .map_err(|e| Error::generic(format!("Failed to create HTTP client: {}", e)))?;
3143
3144    let start_time = std::time::Instant::now();
3145
3146    let response = match test.method.as_str() {
3147        "GET" => client.get(&url).send().await,
3148        "POST" => client.post(&url).send().await,
3149        "PUT" => client.put(&url).send().await,
3150        "DELETE" => client.delete(&url).send().await,
3151        "PATCH" => client.patch(&url).send().await,
3152        "HEAD" => client.head(&url).send().await,
3153        "OPTIONS" => client.request(reqwest::Method::OPTIONS, &url).send().await,
3154        _ => {
3155            return Err(Error::generic(format!("Unsupported HTTP method: {}", test.method)));
3156        }
3157    };
3158
3159    let response_time = start_time.elapsed();
3160    let response_time_ms = response_time.as_millis() as u64;
3161
3162    match response {
3163        Ok(resp) => {
3164            let status_code = resp.status().as_u16();
3165            if (200..400).contains(&status_code) {
3166                Ok((status_code, response_time_ms))
3167            } else {
3168                Err(Error::generic(format!(
3169                    "HTTP error: {} {}",
3170                    status_code,
3171                    resp.status().canonical_reason().unwrap_or("Unknown")
3172                )))
3173            }
3174        }
3175        Err(e) => Err(Error::generic(format!("Request failed: {}", e))),
3176    }
3177}
3178
3179#[derive(Debug, Deserialize)]
3180pub struct PluginInstallRequest {
3181    pub source: String,
3182    #[serde(default)]
3183    pub force: bool,
3184    #[serde(default)]
3185    pub skip_validation: bool,
3186    #[serde(default)]
3187    pub no_verify: bool,
3188    pub checksum: Option<String>,
3189}
3190
3191#[derive(Debug, Deserialize)]
3192pub struct PluginValidateRequest {
3193    pub source: String,
3194}
3195
3196fn find_plugin_directory(path: &std::path::Path) -> Option<std::path::PathBuf> {
3197    if path.join("plugin.yaml").exists() {
3198        return Some(path.to_path_buf());
3199    }
3200
3201    let entries = std::fs::read_dir(path).ok()?;
3202    for entry in entries.filter_map(|e| e.ok()) {
3203        let child = entry.path();
3204        if child.is_dir() && child.join("plugin.yaml").exists() {
3205            return Some(child);
3206        }
3207    }
3208
3209    None
3210}
3211
3212async fn resolve_plugin_source_path(
3213    source: PluginSource,
3214    checksum: Option<&str>,
3215) -> std::result::Result<std::path::PathBuf, String> {
3216    match source {
3217        PluginSource::Local(path) => Ok(path),
3218        PluginSource::Url { url, .. } => {
3219            let loader = RemotePluginLoader::new(RemotePluginConfig::default())
3220                .map_err(|e| format!("Failed to initialize remote plugin loader: {}", e))?;
3221            loader
3222                .download_with_checksum(&url, checksum)
3223                .await
3224                .map_err(|e| format!("Failed to download plugin from URL: {}", e))
3225        }
3226        PluginSource::Git(git_source) => {
3227            let loader = GitPluginLoader::new(GitPluginConfig::default())
3228                .map_err(|e| format!("Failed to initialize git plugin loader: {}", e))?;
3229            loader
3230                .clone_from_git(&git_source)
3231                .await
3232                .map_err(|e| format!("Failed to clone plugin from git: {}", e))
3233        }
3234        PluginSource::Registry { name, version } => Err(format!(
3235            "Registry plugin installation is not yet supported from the admin API (requested {}@{})",
3236            name,
3237            version.unwrap_or_else(|| "latest".to_string())
3238        )),
3239    }
3240}
3241
3242/// Install a plugin from a path, URL, or Git source and register it in-process.
3243pub async fn install_plugin(
3244    State(state): State<AdminState>,
3245    Json(request): Json<PluginInstallRequest>,
3246) -> impl IntoResponse {
3247    let source = request.source.trim().to_string();
3248    if source.is_empty() {
3249        return Json(json!({
3250            "success": false,
3251            "error": "Plugin source is required"
3252        }));
3253    }
3254
3255    if request.skip_validation {
3256        return Json(json!({
3257            "success": false,
3258            "error": "Skipping validation is not supported in admin install flow."
3259        }));
3260    }
3261
3262    if request.no_verify {
3263        return Json(json!({
3264            "success": false,
3265            "error": "Disabling signature verification is not supported in admin install flow."
3266        }));
3267    }
3268
3269    let force = request.force;
3270    let checksum = request.checksum.clone();
3271    let state_for_install = state.clone();
3272
3273    let install_result = tokio::task::spawn_blocking(
3274        move || -> std::result::Result<(String, String, String), String> {
3275            let runtime = tokio::runtime::Builder::new_current_thread()
3276                .enable_all()
3277                .build()
3278                .map_err(|e| format!("Failed to initialize install runtime: {}", e))?;
3279
3280            let (plugin_instance, plugin_id, plugin_name, plugin_version) =
3281                runtime.block_on(async move {
3282                    let parsed_source = PluginSource::parse(&source)
3283                        .map_err(|e| format!("Invalid plugin source: {}", e))?;
3284
3285                    let source_path =
3286                        resolve_plugin_source_path(parsed_source, checksum.as_deref()).await?;
3287
3288                    let plugin_root = if source_path.is_dir() {
3289                        find_plugin_directory(&source_path).unwrap_or(source_path.clone())
3290                    } else {
3291                        source_path.clone()
3292                    };
3293
3294                    if !plugin_root.exists() || !plugin_root.is_dir() {
3295                        return Err(format!(
3296                            "Resolved plugin path is not a directory: {}",
3297                            plugin_root.display()
3298                        ));
3299                    }
3300
3301                    let loader = PluginLoader::new(PluginLoaderConfig::default());
3302                    let manifest = loader
3303                        .validate_plugin(&plugin_root)
3304                        .await
3305                        .map_err(|e| format!("Failed to validate plugin: {}", e))?;
3306
3307                    let plugin_id = manifest.info.id.clone();
3308                    let plugin_name = manifest.info.name.clone();
3309                    let plugin_version = manifest.info.version.to_string();
3310
3311                    let parent_dir = plugin_root
3312                        .parent()
3313                        .unwrap_or_else(|| std::path::Path::new("."))
3314                        .to_string_lossy()
3315                        .to_string();
3316
3317                    let runtime_loader = PluginLoader::new(PluginLoaderConfig {
3318                        plugin_dirs: vec![parent_dir],
3319                        ..PluginLoaderConfig::default()
3320                    });
3321
3322                    runtime_loader
3323                        .load_plugin(&plugin_id)
3324                        .await
3325                        .map_err(|e| format!("Failed to load plugin into runtime: {}", e))?;
3326
3327                    let plugin_instance =
3328                        runtime_loader.get_plugin(&plugin_id).await.ok_or_else(|| {
3329                            "Plugin loaded but instance was not retrievable from loader".to_string()
3330                        })?;
3331
3332                    Ok::<_, String>((
3333                        plugin_instance,
3334                        plugin_id.to_string(),
3335                        plugin_name,
3336                        plugin_version,
3337                    ))
3338                })?;
3339
3340            let mut registry = state_for_install.plugin_registry.blocking_write();
3341
3342            if let Some(existing_id) =
3343                registry.list_plugins().into_iter().find(|id| id.as_str() == plugin_id)
3344            {
3345                if force {
3346                    registry.remove_plugin(&existing_id).map_err(|e| {
3347                        format!("Failed to remove existing plugin before reinstall: {}", e)
3348                    })?;
3349                } else {
3350                    return Err(format!(
3351                        "Plugin '{}' is already installed. Use force=true to reinstall.",
3352                        plugin_id
3353                    ));
3354                }
3355            }
3356
3357            registry
3358                .add_plugin(plugin_instance)
3359                .map_err(|e| format!("Failed to register plugin in admin registry: {}", e))?;
3360
3361            Ok((plugin_id, plugin_name, plugin_version))
3362        },
3363    )
3364    .await;
3365
3366    let (plugin_id, plugin_name, plugin_version) = match install_result {
3367        Ok(Ok(result)) => result,
3368        Ok(Err(err)) => {
3369            return Json(json!({
3370                "success": false,
3371                "error": err
3372            }))
3373        }
3374        Err(err) => {
3375            return Json(json!({
3376                "success": false,
3377                "error": format!("Plugin installation task failed: {}", err)
3378            }))
3379        }
3380    };
3381
3382    Json(json!({
3383        "success": true,
3384        "data": {
3385            "plugin_id": plugin_id,
3386            "name": plugin_name,
3387            "version": plugin_version
3388        },
3389        "message": "Plugin installed and registered in runtime."
3390    }))
3391}
3392
3393/// Validate a plugin source without installing it.
3394pub async fn validate_plugin(Json(request): Json<PluginValidateRequest>) -> impl IntoResponse {
3395    let source = request.source.trim();
3396    if source.is_empty() {
3397        return Json(json!({
3398            "success": false,
3399            "error": "Plugin source is required"
3400        }));
3401    }
3402
3403    let source = match PluginSource::parse(source) {
3404        Ok(source) => source,
3405        Err(e) => {
3406            return Json(json!({
3407                "success": false,
3408                "error": format!("Invalid plugin source: {}", e)
3409            }));
3410        }
3411    };
3412
3413    let path = match source.clone() {
3414        PluginSource::Local(path) => path,
3415        PluginSource::Url { .. } | PluginSource::Git(_) => {
3416            match resolve_plugin_source_path(source, None).await {
3417                Ok(path) => path,
3418                Err(err) => {
3419                    return Json(json!({
3420                        "success": false,
3421                        "error": err
3422                    }))
3423                }
3424            }
3425        }
3426        PluginSource::Registry { .. } => {
3427            return Json(json!({
3428                "success": false,
3429                "error": "Registry plugin validation is not yet supported from the admin API."
3430            }))
3431        }
3432    };
3433
3434    let plugin_root = if path.is_dir() {
3435        find_plugin_directory(&path).unwrap_or(path.clone())
3436    } else {
3437        path
3438    };
3439
3440    let loader = PluginLoader::new(PluginLoaderConfig::default());
3441    match loader.validate_plugin(&plugin_root).await {
3442        Ok(manifest) => Json(json!({
3443            "success": true,
3444            "data": {
3445                "valid": true,
3446                "id": manifest.info.id.to_string(),
3447                "name": manifest.info.name,
3448                "version": manifest.info.version.to_string()
3449            }
3450        })),
3451        Err(e) => Json(json!({
3452            "success": false,
3453            "data": { "valid": false },
3454            "error": format!("Plugin validation failed: {}", e)
3455        })),
3456    }
3457}
3458
3459// Missing handler functions that routes.rs expects
3460pub async fn update_traffic_shaping(
3461    State(state): State<AdminState>,
3462    Json(config): Json<TrafficShapingConfig>,
3463) -> Json<ApiResponse<String>> {
3464    if config.burst_loss.burst_probability > 1.0
3465        || config.burst_loss.loss_rate_during_burst > 1.0
3466        || config.burst_loss.burst_probability < 0.0
3467        || config.burst_loss.loss_rate_during_burst < 0.0
3468    {
3469        return Json(ApiResponse::error(
3470            "Burst loss probabilities must be between 0.0 and 1.0".to_string(),
3471        ));
3472    }
3473
3474    {
3475        let mut cfg = state.config.write().await;
3476        cfg.traffic_shaping = config.clone();
3477    }
3478
3479    if let Some(ref chaos_api_state) = state.chaos_api_state {
3480        let mut chaos_config = chaos_api_state.config.write().await;
3481        chaos_config.traffic_shaping = Some(mockforge_chaos::config::TrafficShapingConfig {
3482            enabled: config.enabled,
3483            bandwidth_limit_bps: config.bandwidth.max_bytes_per_sec,
3484            packet_loss_percent: config.burst_loss.loss_rate_during_burst * 100.0,
3485            max_connections: 0,
3486            connection_timeout_ms: 30000,
3487        });
3488    }
3489
3490    Json(ApiResponse::success("Traffic shaping updated".to_string()))
3491}
3492
3493pub async fn import_postman(
3494    State(state): State<AdminState>,
3495    Json(request): Json<serde_json::Value>,
3496) -> Json<ApiResponse<String>> {
3497    use mockforge_core::workspace_import::{import_postman_to_workspace, WorkspaceImportConfig};
3498    use uuid::Uuid;
3499
3500    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3501    let filename = request.get("filename").and_then(|v| v.as_str());
3502    let environment = request.get("environment").and_then(|v| v.as_str());
3503    let base_url = request.get("base_url").and_then(|v| v.as_str());
3504
3505    // Import the collection
3506    let import_result = match mockforge_core::import::import_postman_collection(content, base_url) {
3507        Ok(result) => result,
3508        Err(e) => {
3509            // Record failed import
3510            let entry = ImportHistoryEntry {
3511                id: Uuid::new_v4().to_string(),
3512                format: "postman".to_string(),
3513                timestamp: Utc::now(),
3514                routes_count: 0,
3515                variables_count: 0,
3516                warnings_count: 0,
3517                success: false,
3518                filename: filename.map(|s| s.to_string()),
3519                environment: environment.map(|s| s.to_string()),
3520                base_url: base_url.map(|s| s.to_string()),
3521                error_message: Some(e.clone()),
3522            };
3523            let mut history = state.import_history.write().await;
3524            history.push(entry);
3525
3526            return Json(ApiResponse::error(format!("Postman import failed: {}", e)));
3527        }
3528    };
3529
3530    // Create workspace from imported routes
3531    let workspace_name = filename
3532        .and_then(|f| f.split('.').next())
3533        .unwrap_or("Imported Postman Collection");
3534
3535    let config = WorkspaceImportConfig {
3536        create_folders: true,
3537        base_folder_name: None,
3538        preserve_hierarchy: true,
3539        max_depth: 5,
3540    };
3541
3542    // Convert MockForgeRoute to ImportRoute
3543    let routes: Vec<ImportRoute> = import_result
3544        .routes
3545        .into_iter()
3546        .map(|route| ImportRoute {
3547            method: route.method,
3548            path: route.path,
3549            headers: route.headers,
3550            body: route.body,
3551            response: ImportResponse {
3552                status: route.response.status,
3553                headers: route.response.headers,
3554                body: route.response.body,
3555            },
3556        })
3557        .collect();
3558
3559    match import_postman_to_workspace(routes, workspace_name.to_string(), config) {
3560        Ok(workspace_result) => {
3561            // Save the workspace to persistent storage
3562            if let Err(e) =
3563                state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3564            {
3565                tracing::error!("Failed to save workspace: {}", e);
3566                return Json(ApiResponse::error(format!(
3567                    "Import succeeded but failed to save workspace: {}",
3568                    e
3569                )));
3570            }
3571
3572            // Record successful import
3573            let entry = ImportHistoryEntry {
3574                id: Uuid::new_v4().to_string(),
3575                format: "postman".to_string(),
3576                timestamp: Utc::now(),
3577                routes_count: workspace_result.request_count,
3578                variables_count: import_result.variables.len(),
3579                warnings_count: workspace_result.warnings.len(),
3580                success: true,
3581                filename: filename.map(|s| s.to_string()),
3582                environment: environment.map(|s| s.to_string()),
3583                base_url: base_url.map(|s| s.to_string()),
3584                error_message: None,
3585            };
3586            let mut history = state.import_history.write().await;
3587            history.push(entry);
3588
3589            Json(ApiResponse::success(format!(
3590                "Successfully imported {} routes into workspace '{}'",
3591                workspace_result.request_count, workspace_name
3592            )))
3593        }
3594        Err(e) => {
3595            // Record failed import
3596            let entry = ImportHistoryEntry {
3597                id: Uuid::new_v4().to_string(),
3598                format: "postman".to_string(),
3599                timestamp: Utc::now(),
3600                routes_count: 0,
3601                variables_count: 0,
3602                warnings_count: 0,
3603                success: false,
3604                filename: filename.map(|s| s.to_string()),
3605                environment: environment.map(|s| s.to_string()),
3606                base_url: base_url.map(|s| s.to_string()),
3607                error_message: Some(e.to_string()),
3608            };
3609            let mut history = state.import_history.write().await;
3610            history.push(entry);
3611
3612            Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3613        }
3614    }
3615}
3616
3617pub async fn import_insomnia(
3618    State(state): State<AdminState>,
3619    Json(request): Json<serde_json::Value>,
3620) -> Json<ApiResponse<String>> {
3621    use uuid::Uuid;
3622
3623    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3624    let filename = request.get("filename").and_then(|v| v.as_str());
3625    let environment = request.get("environment").and_then(|v| v.as_str());
3626    let base_url = request.get("base_url").and_then(|v| v.as_str());
3627
3628    // Import the export
3629    let import_result = match mockforge_core::import::import_insomnia_export(content, environment) {
3630        Ok(result) => result,
3631        Err(e) => {
3632            // Record failed import
3633            let entry = ImportHistoryEntry {
3634                id: Uuid::new_v4().to_string(),
3635                format: "insomnia".to_string(),
3636                timestamp: Utc::now(),
3637                routes_count: 0,
3638                variables_count: 0,
3639                warnings_count: 0,
3640                success: false,
3641                filename: filename.map(|s| s.to_string()),
3642                environment: environment.map(|s| s.to_string()),
3643                base_url: base_url.map(|s| s.to_string()),
3644                error_message: Some(e.clone()),
3645            };
3646            let mut history = state.import_history.write().await;
3647            history.push(entry);
3648
3649            return Json(ApiResponse::error(format!("Insomnia import failed: {}", e)));
3650        }
3651    };
3652
3653    // Create workspace from imported routes
3654    let workspace_name = filename
3655        .and_then(|f| f.split('.').next())
3656        .unwrap_or("Imported Insomnia Collection");
3657
3658    let _config = WorkspaceImportConfig {
3659        create_folders: true,
3660        base_folder_name: None,
3661        preserve_hierarchy: true,
3662        max_depth: 5,
3663    };
3664
3665    // Extract variables count before moving import_result
3666    let variables_count = import_result.variables.len();
3667
3668    match mockforge_core::workspace_import::create_workspace_from_insomnia(
3669        import_result,
3670        Some(workspace_name.to_string()),
3671    ) {
3672        Ok(workspace_result) => {
3673            // Save the workspace to persistent storage
3674            if let Err(e) =
3675                state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3676            {
3677                tracing::error!("Failed to save workspace: {}", e);
3678                return Json(ApiResponse::error(format!(
3679                    "Import succeeded but failed to save workspace: {}",
3680                    e
3681                )));
3682            }
3683
3684            // Record successful import
3685            let entry = ImportHistoryEntry {
3686                id: Uuid::new_v4().to_string(),
3687                format: "insomnia".to_string(),
3688                timestamp: Utc::now(),
3689                routes_count: workspace_result.request_count,
3690                variables_count,
3691                warnings_count: workspace_result.warnings.len(),
3692                success: true,
3693                filename: filename.map(|s| s.to_string()),
3694                environment: environment.map(|s| s.to_string()),
3695                base_url: base_url.map(|s| s.to_string()),
3696                error_message: None,
3697            };
3698            let mut history = state.import_history.write().await;
3699            history.push(entry);
3700
3701            Json(ApiResponse::success(format!(
3702                "Successfully imported {} routes into workspace '{}'",
3703                workspace_result.request_count, workspace_name
3704            )))
3705        }
3706        Err(e) => {
3707            // Record failed import
3708            let entry = ImportHistoryEntry {
3709                id: Uuid::new_v4().to_string(),
3710                format: "insomnia".to_string(),
3711                timestamp: Utc::now(),
3712                routes_count: 0,
3713                variables_count: 0,
3714                warnings_count: 0,
3715                success: false,
3716                filename: filename.map(|s| s.to_string()),
3717                environment: environment.map(|s| s.to_string()),
3718                base_url: base_url.map(|s| s.to_string()),
3719                error_message: Some(e.to_string()),
3720            };
3721            let mut history = state.import_history.write().await;
3722            history.push(entry);
3723
3724            Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3725        }
3726    }
3727}
3728
3729pub async fn import_openapi(
3730    State(_state): State<AdminState>,
3731    Json(_request): Json<serde_json::Value>,
3732) -> Json<ApiResponse<String>> {
3733    Json(ApiResponse::success("OpenAPI import completed".to_string()))
3734}
3735
3736pub async fn import_curl(
3737    State(state): State<AdminState>,
3738    Json(request): Json<serde_json::Value>,
3739) -> Json<ApiResponse<String>> {
3740    use uuid::Uuid;
3741
3742    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3743    let filename = request.get("filename").and_then(|v| v.as_str());
3744    let base_url = request.get("base_url").and_then(|v| v.as_str());
3745
3746    // Import the commands
3747    let import_result = match mockforge_core::import::import_curl_commands(content, base_url) {
3748        Ok(result) => result,
3749        Err(e) => {
3750            // Record failed import
3751            let entry = ImportHistoryEntry {
3752                id: Uuid::new_v4().to_string(),
3753                format: "curl".to_string(),
3754                timestamp: Utc::now(),
3755                routes_count: 0,
3756                variables_count: 0,
3757                warnings_count: 0,
3758                success: false,
3759                filename: filename.map(|s| s.to_string()),
3760                environment: None,
3761                base_url: base_url.map(|s| s.to_string()),
3762                error_message: Some(e.clone()),
3763            };
3764            let mut history = state.import_history.write().await;
3765            history.push(entry);
3766
3767            return Json(ApiResponse::error(format!("Curl import failed: {}", e)));
3768        }
3769    };
3770
3771    // Create workspace from imported routes
3772    let workspace_name =
3773        filename.and_then(|f| f.split('.').next()).unwrap_or("Imported Curl Commands");
3774
3775    match mockforge_core::workspace_import::create_workspace_from_curl(
3776        import_result,
3777        Some(workspace_name.to_string()),
3778    ) {
3779        Ok(workspace_result) => {
3780            // Save the workspace to persistent storage
3781            if let Err(e) =
3782                state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3783            {
3784                tracing::error!("Failed to save workspace: {}", e);
3785                return Json(ApiResponse::error(format!(
3786                    "Import succeeded but failed to save workspace: {}",
3787                    e
3788                )));
3789            }
3790
3791            // Record successful import
3792            let entry = ImportHistoryEntry {
3793                id: Uuid::new_v4().to_string(),
3794                format: "curl".to_string(),
3795                timestamp: Utc::now(),
3796                routes_count: workspace_result.request_count,
3797                variables_count: 0, // Curl doesn't have variables
3798                warnings_count: workspace_result.warnings.len(),
3799                success: true,
3800                filename: filename.map(|s| s.to_string()),
3801                environment: None,
3802                base_url: base_url.map(|s| s.to_string()),
3803                error_message: None,
3804            };
3805            let mut history = state.import_history.write().await;
3806            history.push(entry);
3807
3808            Json(ApiResponse::success(format!(
3809                "Successfully imported {} routes into workspace '{}'",
3810                workspace_result.request_count, workspace_name
3811            )))
3812        }
3813        Err(e) => {
3814            // Record failed import
3815            let entry = ImportHistoryEntry {
3816                id: Uuid::new_v4().to_string(),
3817                format: "curl".to_string(),
3818                timestamp: Utc::now(),
3819                routes_count: 0,
3820                variables_count: 0,
3821                warnings_count: 0,
3822                success: false,
3823                filename: filename.map(|s| s.to_string()),
3824                environment: None,
3825                base_url: base_url.map(|s| s.to_string()),
3826                error_message: Some(e.to_string()),
3827            };
3828            let mut history = state.import_history.write().await;
3829            history.push(entry);
3830
3831            Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3832        }
3833    }
3834}
3835
3836pub async fn preview_import(
3837    State(_state): State<AdminState>,
3838    Json(request): Json<serde_json::Value>,
3839) -> Json<ApiResponse<serde_json::Value>> {
3840    use mockforge_core::import::{
3841        import_curl_commands, import_insomnia_export, import_postman_collection,
3842    };
3843
3844    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3845    let filename = request.get("filename").and_then(|v| v.as_str());
3846    let environment = request.get("environment").and_then(|v| v.as_str());
3847    let base_url = request.get("base_url").and_then(|v| v.as_str());
3848
3849    // Detect format from filename or content
3850    let format = if let Some(fname) = filename {
3851        if fname.to_lowercase().contains("postman")
3852            || fname.to_lowercase().ends_with(".postman_collection")
3853        {
3854            "postman"
3855        } else if fname.to_lowercase().contains("insomnia")
3856            || fname.to_lowercase().ends_with(".insomnia")
3857        {
3858            "insomnia"
3859        } else if fname.to_lowercase().contains("curl")
3860            || fname.to_lowercase().ends_with(".sh")
3861            || fname.to_lowercase().ends_with(".curl")
3862        {
3863            "curl"
3864        } else {
3865            "unknown"
3866        }
3867    } else {
3868        "unknown"
3869    };
3870
3871    match format {
3872        "postman" => match import_postman_collection(content, base_url) {
3873            Ok(import_result) => {
3874                let routes: Vec<serde_json::Value> = import_result
3875                    .routes
3876                    .into_iter()
3877                    .map(|route| {
3878                        serde_json::json!({
3879                            "method": route.method,
3880                            "path": route.path,
3881                            "headers": route.headers,
3882                            "body": route.body,
3883                            "status_code": route.response.status,
3884                            "response": serde_json::json!({
3885                                "status": route.response.status,
3886                                "headers": route.response.headers,
3887                                "body": route.response.body
3888                            })
3889                        })
3890                    })
3891                    .collect();
3892
3893                let response = serde_json::json!({
3894                    "routes": routes,
3895                    "variables": import_result.variables,
3896                    "warnings": import_result.warnings
3897                });
3898
3899                Json(ApiResponse::success(response))
3900            }
3901            Err(e) => Json(ApiResponse::error(format!("Postman import failed: {}", e))),
3902        },
3903        "insomnia" => match import_insomnia_export(content, environment) {
3904            Ok(import_result) => {
3905                let routes: Vec<serde_json::Value> = import_result
3906                    .routes
3907                    .into_iter()
3908                    .map(|route| {
3909                        serde_json::json!({
3910                            "method": route.method,
3911                            "path": route.path,
3912                            "headers": route.headers,
3913                            "body": route.body,
3914                            "status_code": route.response.status,
3915                            "response": serde_json::json!({
3916                                "status": route.response.status,
3917                                "headers": route.response.headers,
3918                                "body": route.response.body
3919                            })
3920                        })
3921                    })
3922                    .collect();
3923
3924                let response = serde_json::json!({
3925                    "routes": routes,
3926                    "variables": import_result.variables,
3927                    "warnings": import_result.warnings
3928                });
3929
3930                Json(ApiResponse::success(response))
3931            }
3932            Err(e) => Json(ApiResponse::error(format!("Insomnia import failed: {}", e))),
3933        },
3934        "curl" => match import_curl_commands(content, base_url) {
3935            Ok(import_result) => {
3936                let routes: Vec<serde_json::Value> = import_result
3937                    .routes
3938                    .into_iter()
3939                    .map(|route| {
3940                        serde_json::json!({
3941                            "method": route.method,
3942                            "path": route.path,
3943                            "headers": route.headers,
3944                            "body": route.body,
3945                            "status_code": route.response.status,
3946                            "response": serde_json::json!({
3947                                "status": route.response.status,
3948                                "headers": route.response.headers,
3949                                "body": route.response.body
3950                            })
3951                        })
3952                    })
3953                    .collect();
3954
3955                let response = serde_json::json!({
3956                    "routes": routes,
3957                    "variables": serde_json::json!({}),
3958                    "warnings": import_result.warnings
3959                });
3960
3961                Json(ApiResponse::success(response))
3962            }
3963            Err(e) => Json(ApiResponse::error(format!("Curl import failed: {}", e))),
3964        },
3965        _ => Json(ApiResponse::error("Unsupported import format".to_string())),
3966    }
3967}
3968
3969pub async fn get_import_history(
3970    State(state): State<AdminState>,
3971) -> Json<ApiResponse<serde_json::Value>> {
3972    let history = state.import_history.read().await;
3973    let total = history.len();
3974
3975    let imports: Vec<serde_json::Value> = history
3976        .iter()
3977        .rev()
3978        .take(50)
3979        .map(|entry| {
3980            serde_json::json!({
3981                "id": entry.id,
3982                "format": entry.format,
3983                "timestamp": entry.timestamp.to_rfc3339(),
3984                "routes_count": entry.routes_count,
3985                "variables_count": entry.variables_count,
3986                "warnings_count": entry.warnings_count,
3987                "success": entry.success,
3988                "filename": entry.filename,
3989                "environment": entry.environment,
3990                "base_url": entry.base_url,
3991                "error_message": entry.error_message
3992            })
3993        })
3994        .collect();
3995
3996    let response = serde_json::json!({
3997        "imports": imports,
3998        "total": total
3999    });
4000
4001    Json(ApiResponse::success(response))
4002}
4003
4004pub async fn get_admin_api_state(
4005    State(_state): State<AdminState>,
4006) -> Json<ApiResponse<serde_json::Value>> {
4007    Json(ApiResponse::success(serde_json::json!({
4008        "status": "active"
4009    })))
4010}
4011
4012pub async fn get_admin_api_replay(
4013    State(_state): State<AdminState>,
4014) -> Json<ApiResponse<serde_json::Value>> {
4015    Json(ApiResponse::success(serde_json::json!({
4016        "replay": []
4017    })))
4018}
4019
4020pub async fn get_sse_status(
4021    State(_state): State<AdminState>,
4022) -> Json<ApiResponse<serde_json::Value>> {
4023    Json(ApiResponse::success(serde_json::json!({
4024        "available": true,
4025        "endpoint": "/sse",
4026        "config": {
4027            "event_type": "status",
4028            "interval_ms": 1000,
4029            "data_template": "{}"
4030        }
4031    })))
4032}
4033
4034pub async fn get_sse_connections(
4035    State(_state): State<AdminState>,
4036) -> Json<ApiResponse<serde_json::Value>> {
4037    Json(ApiResponse::success(serde_json::json!({
4038        "active_connections": 0
4039    })))
4040}
4041
4042// Workspace management functions
4043pub async fn get_workspaces(
4044    State(_state): State<AdminState>,
4045) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4046    Json(ApiResponse::success(vec![]))
4047}
4048
4049pub async fn create_workspace(
4050    State(_state): State<AdminState>,
4051    Json(_request): Json<serde_json::Value>,
4052) -> Json<ApiResponse<String>> {
4053    Json(ApiResponse::success("Workspace created".to_string()))
4054}
4055
4056pub async fn open_workspace_from_directory(
4057    State(_state): State<AdminState>,
4058    Json(_request): Json<serde_json::Value>,
4059) -> Json<ApiResponse<String>> {
4060    Json(ApiResponse::success("Workspace opened from directory".to_string()))
4061}
4062
4063// Reality Slider API handlers
4064
4065/// Get current reality level
4066pub async fn get_reality_level(
4067    State(state): State<AdminState>,
4068) -> Json<ApiResponse<serde_json::Value>> {
4069    let engine = state.reality_engine.read().await;
4070    let level = engine.get_level().await;
4071    let config = engine.get_config().await;
4072
4073    Json(ApiResponse::success(serde_json::json!({
4074        "level": level.value(),
4075        "level_name": level.name(),
4076        "description": level.description(),
4077        "chaos": {
4078            "enabled": config.chaos.enabled,
4079            "error_rate": config.chaos.error_rate,
4080            "delay_rate": config.chaos.delay_rate,
4081        },
4082        "latency": {
4083            "base_ms": config.latency.base_ms,
4084            "jitter_ms": config.latency.jitter_ms,
4085        },
4086        "mockai": {
4087            "enabled": config.mockai.enabled,
4088        },
4089    })))
4090}
4091
4092/// Set reality level
4093#[derive(Deserialize)]
4094pub struct SetRealityLevelRequest {
4095    level: u8,
4096}
4097
4098pub async fn set_reality_level(
4099    State(state): State<AdminState>,
4100    Json(request): Json<SetRealityLevelRequest>,
4101) -> Json<ApiResponse<serde_json::Value>> {
4102    let level = match mockforge_core::RealityLevel::from_value(request.level) {
4103        Some(l) => l,
4104        None => {
4105            return Json(ApiResponse::error(format!(
4106                "Invalid reality level: {}. Must be between 1 and 5.",
4107                request.level
4108            )));
4109        }
4110    };
4111
4112    // Update reality engine
4113    let engine = state.reality_engine.write().await;
4114    engine.set_level(level).await;
4115    let config = engine.get_config().await;
4116    drop(engine); // Release lock early
4117
4118    // Apply hot-reload updates to subsystems
4119    let mut update_errors = Vec::new();
4120
4121    // Update chaos config if available
4122    if let Some(ref chaos_api_state) = state.chaos_api_state {
4123        let mut chaos_config = chaos_api_state.config.write().await;
4124
4125        // Convert reality config to chaos config using helper function
4126        // This ensures proper mapping of all fields
4127        use mockforge_chaos::config::{FaultInjectionConfig, LatencyConfig};
4128
4129        let latency_config = if config.latency.base_ms > 0 {
4130            Some(LatencyConfig {
4131                enabled: true,
4132                fixed_delay_ms: Some(config.latency.base_ms),
4133                random_delay_range_ms: config
4134                    .latency
4135                    .max_ms
4136                    .map(|max| (config.latency.min_ms, max)),
4137                jitter_percent: if config.latency.jitter_ms > 0 {
4138                    (config.latency.jitter_ms as f64 / config.latency.base_ms as f64).min(1.0)
4139                } else {
4140                    0.0
4141                },
4142                probability: 1.0,
4143            })
4144        } else {
4145            None
4146        };
4147
4148        let fault_injection_config = if config.chaos.enabled {
4149            Some(FaultInjectionConfig {
4150                enabled: true,
4151                http_errors: config.chaos.status_codes.clone(),
4152                http_error_probability: config.chaos.error_rate,
4153                connection_errors: false,
4154                connection_error_probability: 0.0,
4155                timeout_errors: config.chaos.inject_timeouts,
4156                timeout_ms: config.chaos.timeout_ms,
4157                timeout_probability: if config.chaos.inject_timeouts {
4158                    config.chaos.error_rate
4159                } else {
4160                    0.0
4161                },
4162                partial_responses: false,
4163                partial_response_probability: 0.0,
4164                payload_corruption: false,
4165                payload_corruption_probability: 0.0,
4166                corruption_type: mockforge_chaos::config::CorruptionType::None,
4167                error_pattern: Some(mockforge_chaos::config::ErrorPattern::Random {
4168                    probability: config.chaos.error_rate,
4169                }),
4170                mockai_enabled: false,
4171            })
4172        } else {
4173            None
4174        };
4175
4176        // Update chaos config from converted config
4177        chaos_config.enabled = config.chaos.enabled;
4178        chaos_config.latency = latency_config;
4179        chaos_config.fault_injection = fault_injection_config;
4180
4181        drop(chaos_config);
4182        tracing::info!("✅ Updated chaos config for reality level {}", level.value());
4183
4184        // Update middleware injectors if middleware is accessible
4185        // Note: The middleware reads from shared config, so injectors will be updated
4186        // on next request, but we can also trigger an update if middleware is stored
4187        // For now, the middleware reads config directly, so this is sufficient
4188    }
4189
4190    // Update latency injector if available
4191    if let Some(ref latency_injector) = state.latency_injector {
4192        match mockforge_core::latency::LatencyInjector::update_profile_async(
4193            latency_injector,
4194            config.latency.clone(),
4195        )
4196        .await
4197        {
4198            Ok(_) => {
4199                tracing::info!("✅ Updated latency injector for reality level {}", level.value());
4200            }
4201            Err(e) => {
4202                let error_msg = format!("Failed to update latency injector: {}", e);
4203                tracing::warn!("{}", error_msg);
4204                update_errors.push(error_msg);
4205            }
4206        }
4207    }
4208
4209    // Update MockAI if available
4210    if let Some(ref mockai) = state.mockai {
4211        match mockforge_core::intelligent_behavior::MockAI::update_config_async(
4212            mockai,
4213            config.mockai.clone(),
4214        )
4215        .await
4216        {
4217            Ok(_) => {
4218                tracing::info!("✅ Updated MockAI config for reality level {}", level.value());
4219            }
4220            Err(e) => {
4221                let error_msg = format!("Failed to update MockAI: {}", e);
4222                tracing::warn!("{}", error_msg);
4223                update_errors.push(error_msg);
4224            }
4225        }
4226    }
4227
4228    // Build response
4229    let mut response = serde_json::json!({
4230        "level": level.value(),
4231        "level_name": level.name(),
4232        "description": level.description(),
4233        "chaos": {
4234            "enabled": config.chaos.enabled,
4235            "error_rate": config.chaos.error_rate,
4236            "delay_rate": config.chaos.delay_rate,
4237        },
4238        "latency": {
4239            "base_ms": config.latency.base_ms,
4240            "jitter_ms": config.latency.jitter_ms,
4241        },
4242        "mockai": {
4243            "enabled": config.mockai.enabled,
4244        },
4245    });
4246
4247    // Add warnings if any updates failed
4248    if !update_errors.is_empty() {
4249        response["warnings"] = serde_json::json!(update_errors);
4250        tracing::warn!(
4251            "Reality level updated to {} but some subsystems failed to update: {:?}",
4252            level.value(),
4253            update_errors
4254        );
4255    } else {
4256        tracing::info!(
4257            "✅ Reality level successfully updated to {} (hot-reload applied)",
4258            level.value()
4259        );
4260    }
4261
4262    Json(ApiResponse::success(response))
4263}
4264
4265/// List all available reality presets
4266pub async fn list_reality_presets(
4267    State(state): State<AdminState>,
4268) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4269    let persistence = &state.workspace_persistence;
4270    match persistence.list_reality_presets().await {
4271        Ok(preset_paths) => {
4272            let presets: Vec<serde_json::Value> = preset_paths
4273                .iter()
4274                .map(|path| {
4275                    serde_json::json!({
4276                        "id": path.file_name().and_then(|n| n.to_str()).unwrap_or("unknown"),
4277                        "path": path.to_string_lossy(),
4278                        "name": path.file_stem().and_then(|n| n.to_str()).unwrap_or("unknown"),
4279                    })
4280                })
4281                .collect();
4282            Json(ApiResponse::success(presets))
4283        }
4284        Err(e) => Json(ApiResponse::error(format!("Failed to list presets: {}", e))),
4285    }
4286}
4287
4288/// Import a reality preset
4289#[derive(Deserialize)]
4290pub struct ImportPresetRequest {
4291    path: String,
4292}
4293
4294pub async fn import_reality_preset(
4295    State(state): State<AdminState>,
4296    Json(request): Json<ImportPresetRequest>,
4297) -> Json<ApiResponse<serde_json::Value>> {
4298    let persistence = &state.workspace_persistence;
4299    let path = std::path::Path::new(&request.path);
4300
4301    match persistence.import_reality_preset(path).await {
4302        Ok(preset) => {
4303            // Apply the preset to the reality engine
4304            let engine = state.reality_engine.write().await;
4305            engine.apply_preset(preset.clone()).await;
4306
4307            Json(ApiResponse::success(serde_json::json!({
4308                "name": preset.name,
4309                "description": preset.description,
4310                "level": preset.config.level.value(),
4311                "level_name": preset.config.level.name(),
4312            })))
4313        }
4314        Err(e) => Json(ApiResponse::error(format!("Failed to import preset: {}", e))),
4315    }
4316}
4317
4318/// Export current reality configuration as a preset
4319#[derive(Deserialize)]
4320pub struct ExportPresetRequest {
4321    name: String,
4322    description: Option<String>,
4323}
4324
4325pub async fn export_reality_preset(
4326    State(state): State<AdminState>,
4327    Json(request): Json<ExportPresetRequest>,
4328) -> Json<ApiResponse<serde_json::Value>> {
4329    let engine = state.reality_engine.read().await;
4330    let preset = engine.create_preset(request.name.clone(), request.description.clone()).await;
4331
4332    let persistence = &state.workspace_persistence;
4333    let presets_dir = persistence.presets_dir();
4334    let filename = format!("{}.json", request.name.replace(' ', "_").to_lowercase());
4335    let output_path = presets_dir.join(&filename);
4336
4337    match persistence.export_reality_preset(&preset, &output_path).await {
4338        Ok(_) => Json(ApiResponse::success(serde_json::json!({
4339            "name": preset.name,
4340            "description": preset.description,
4341            "path": output_path.to_string_lossy(),
4342            "level": preset.config.level.value(),
4343        }))),
4344        Err(e) => Json(ApiResponse::error(format!("Failed to export preset: {}", e))),
4345    }
4346}
4347
4348// Reality Continuum API handlers
4349
4350/// Get current blend ratio for a path
4351pub async fn get_continuum_ratio(
4352    State(state): State<AdminState>,
4353    Query(params): Query<HashMap<String, String>>,
4354) -> Json<ApiResponse<serde_json::Value>> {
4355    let path = params.get("path").cloned().unwrap_or_else(|| "/".to_string());
4356    let engine = state.continuum_engine.read().await;
4357    let ratio = engine.get_blend_ratio(&path).await;
4358    let config = engine.get_config().await;
4359    let enabled = engine.is_enabled().await;
4360
4361    Json(ApiResponse::success(serde_json::json!({
4362        "path": path,
4363        "blend_ratio": ratio,
4364        "enabled": enabled,
4365        "transition_mode": format!("{:?}", config.transition_mode),
4366        "merge_strategy": format!("{:?}", config.merge_strategy),
4367        "default_ratio": config.default_ratio,
4368    })))
4369}
4370
4371/// Set blend ratio for a path
4372#[derive(Deserialize)]
4373pub struct SetContinuumRatioRequest {
4374    path: String,
4375    ratio: f64,
4376}
4377
4378pub async fn set_continuum_ratio(
4379    State(state): State<AdminState>,
4380    Json(request): Json<SetContinuumRatioRequest>,
4381) -> Json<ApiResponse<serde_json::Value>> {
4382    let ratio = request.ratio.clamp(0.0, 1.0);
4383    let engine = state.continuum_engine.read().await;
4384    engine.set_blend_ratio(&request.path, ratio).await;
4385
4386    Json(ApiResponse::success(serde_json::json!({
4387        "path": request.path,
4388        "blend_ratio": ratio,
4389    })))
4390}
4391
4392/// Get time schedule
4393pub async fn get_continuum_schedule(
4394    State(state): State<AdminState>,
4395) -> Json<ApiResponse<serde_json::Value>> {
4396    let engine = state.continuum_engine.read().await;
4397    let schedule = engine.get_time_schedule().await;
4398
4399    match schedule {
4400        Some(s) => Json(ApiResponse::success(serde_json::json!({
4401            "start_time": s.start_time.to_rfc3339(),
4402            "end_time": s.end_time.to_rfc3339(),
4403            "start_ratio": s.start_ratio,
4404            "end_ratio": s.end_ratio,
4405            "curve": format!("{:?}", s.curve),
4406            "duration_days": s.duration().num_days(),
4407        }))),
4408        None => Json(ApiResponse::success(serde_json::json!(null))),
4409    }
4410}
4411
4412/// Update time schedule
4413#[derive(Deserialize)]
4414pub struct SetContinuumScheduleRequest {
4415    start_time: String,
4416    end_time: String,
4417    start_ratio: f64,
4418    end_ratio: f64,
4419    curve: Option<String>,
4420}
4421
4422pub async fn set_continuum_schedule(
4423    State(state): State<AdminState>,
4424    Json(request): Json<SetContinuumScheduleRequest>,
4425) -> Json<ApiResponse<serde_json::Value>> {
4426    let start_time = chrono::DateTime::parse_from_rfc3339(&request.start_time)
4427        .map_err(|e| format!("Invalid start_time: {}", e))
4428        .map(|dt| dt.with_timezone(&Utc));
4429
4430    let end_time = chrono::DateTime::parse_from_rfc3339(&request.end_time)
4431        .map_err(|e| format!("Invalid end_time: {}", e))
4432        .map(|dt| dt.with_timezone(&Utc));
4433
4434    match (start_time, end_time) {
4435        (Ok(start), Ok(end)) => {
4436            let curve = request
4437                .curve
4438                .as_deref()
4439                .map(|c| match c {
4440                    "linear" => mockforge_core::TransitionCurve::Linear,
4441                    "exponential" => mockforge_core::TransitionCurve::Exponential,
4442                    "sigmoid" => mockforge_core::TransitionCurve::Sigmoid,
4443                    _ => mockforge_core::TransitionCurve::Linear,
4444                })
4445                .unwrap_or(mockforge_core::TransitionCurve::Linear);
4446
4447            let schedule = mockforge_core::TimeSchedule::with_curve(
4448                start,
4449                end,
4450                request.start_ratio.clamp(0.0, 1.0),
4451                request.end_ratio.clamp(0.0, 1.0),
4452                curve,
4453            );
4454
4455            let engine = state.continuum_engine.read().await;
4456            engine.set_time_schedule(schedule.clone()).await;
4457
4458            Json(ApiResponse::success(serde_json::json!({
4459                "start_time": schedule.start_time.to_rfc3339(),
4460                "end_time": schedule.end_time.to_rfc3339(),
4461                "start_ratio": schedule.start_ratio,
4462                "end_ratio": schedule.end_ratio,
4463                "curve": format!("{:?}", schedule.curve),
4464            })))
4465        }
4466        (Err(e), _) | (_, Err(e)) => Json(ApiResponse::error(e)),
4467    }
4468}
4469
4470/// Manually advance blend ratio
4471#[derive(Deserialize)]
4472pub struct AdvanceContinuumRatioRequest {
4473    increment: Option<f64>,
4474}
4475
4476pub async fn advance_continuum_ratio(
4477    State(state): State<AdminState>,
4478    Json(request): Json<AdvanceContinuumRatioRequest>,
4479) -> Json<ApiResponse<serde_json::Value>> {
4480    let increment = request.increment.unwrap_or(0.1);
4481    let engine = state.continuum_engine.read().await;
4482    engine.advance_ratio(increment).await;
4483    let config = engine.get_config().await;
4484
4485    Json(ApiResponse::success(serde_json::json!({
4486        "default_ratio": config.default_ratio,
4487        "increment": increment,
4488    })))
4489}
4490
4491/// Enable or disable continuum
4492#[derive(Deserialize)]
4493pub struct SetContinuumEnabledRequest {
4494    enabled: bool,
4495}
4496
4497pub async fn set_continuum_enabled(
4498    State(state): State<AdminState>,
4499    Json(request): Json<SetContinuumEnabledRequest>,
4500) -> Json<ApiResponse<serde_json::Value>> {
4501    let engine = state.continuum_engine.read().await;
4502    engine.set_enabled(request.enabled).await;
4503
4504    Json(ApiResponse::success(serde_json::json!({
4505        "enabled": request.enabled,
4506    })))
4507}
4508
4509/// Get all manual overrides
4510pub async fn get_continuum_overrides(
4511    State(state): State<AdminState>,
4512) -> Json<ApiResponse<serde_json::Value>> {
4513    let engine = state.continuum_engine.read().await;
4514    let overrides = engine.get_manual_overrides().await;
4515
4516    Json(ApiResponse::success(serde_json::json!(overrides)))
4517}
4518
4519/// Clear all manual overrides
4520pub async fn clear_continuum_overrides(
4521    State(state): State<AdminState>,
4522) -> Json<ApiResponse<serde_json::Value>> {
4523    let engine = state.continuum_engine.read().await;
4524    engine.clear_manual_overrides().await;
4525
4526    Json(ApiResponse::success(serde_json::json!({
4527        "message": "All manual overrides cleared",
4528    })))
4529}
4530
4531pub async fn get_workspace(
4532    State(_state): State<AdminState>,
4533    Path(workspace_id): Path<String>,
4534) -> Json<ApiResponse<serde_json::Value>> {
4535    Json(ApiResponse::success(serde_json::json!({
4536        "workspace": {
4537            "summary": {
4538                "id": workspace_id,
4539                "name": "Mock Workspace",
4540                "description": "A mock workspace"
4541            },
4542            "folders": [],
4543            "requests": []
4544        }
4545    })))
4546}
4547
4548pub async fn delete_workspace(
4549    State(_state): State<AdminState>,
4550    Path(_workspace_id): Path<String>,
4551) -> Json<ApiResponse<String>> {
4552    Json(ApiResponse::success("Workspace deleted".to_string()))
4553}
4554
4555pub async fn set_active_workspace(
4556    State(_state): State<AdminState>,
4557    Path(_workspace_id): Path<String>,
4558) -> Json<ApiResponse<String>> {
4559    Json(ApiResponse::success("Workspace activated".to_string()))
4560}
4561
4562pub async fn create_folder(
4563    State(_state): State<AdminState>,
4564    Path(_workspace_id): Path<String>,
4565    Json(_request): Json<serde_json::Value>,
4566) -> Json<ApiResponse<String>> {
4567    Json(ApiResponse::success("Folder created".to_string()))
4568}
4569
4570pub async fn create_request(
4571    State(_state): State<AdminState>,
4572    Path(_workspace_id): Path<String>,
4573    Json(_request): Json<serde_json::Value>,
4574) -> Json<ApiResponse<String>> {
4575    Json(ApiResponse::success("Request created".to_string()))
4576}
4577
4578pub async fn execute_workspace_request(
4579    State(_state): State<AdminState>,
4580    Path((_workspace_id, _request_id)): Path<(String, String)>,
4581    Json(_request): Json<serde_json::Value>,
4582) -> Json<ApiResponse<serde_json::Value>> {
4583    Json(ApiResponse::success(serde_json::json!({
4584        "status": "executed",
4585        "response": {}
4586    })))
4587}
4588
4589pub async fn get_request_history(
4590    State(_state): State<AdminState>,
4591    Path((_workspace_id, _request_id)): Path<(String, String)>,
4592) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4593    Json(ApiResponse::success(vec![]))
4594}
4595
4596pub async fn get_folder(
4597    State(_state): State<AdminState>,
4598    Path((_workspace_id, folder_id)): Path<(String, String)>,
4599) -> Json<ApiResponse<serde_json::Value>> {
4600    Json(ApiResponse::success(serde_json::json!({
4601        "folder": {
4602            "summary": {
4603                "id": folder_id,
4604                "name": "Mock Folder",
4605                "description": "A mock folder"
4606            },
4607            "requests": []
4608        }
4609    })))
4610}
4611
4612pub async fn import_to_workspace(
4613    State(_state): State<AdminState>,
4614    Path(_workspace_id): Path<String>,
4615    Json(_request): Json<serde_json::Value>,
4616) -> Json<ApiResponse<String>> {
4617    Json(ApiResponse::success("Import to workspace completed".to_string()))
4618}
4619
4620pub async fn export_workspaces(
4621    State(_state): State<AdminState>,
4622    Json(_request): Json<serde_json::Value>,
4623) -> Json<ApiResponse<String>> {
4624    Json(ApiResponse::success("Workspaces exported".to_string()))
4625}
4626
4627// Environment management functions
4628pub async fn get_environments(
4629    State(_state): State<AdminState>,
4630    Path(_workspace_id): Path<String>,
4631) -> Json<ApiResponse<serde_json::Value>> {
4632    // Return a default global environment
4633    let environments = vec![serde_json::json!({
4634        "id": "global",
4635        "name": "Global",
4636        "description": "Global environment variables",
4637        "variable_count": 0,
4638        "is_global": true,
4639        "active": true,
4640        "order": 0
4641    })];
4642
4643    Json(ApiResponse::success(serde_json::json!({
4644        "environments": environments,
4645        "total": 1
4646    })))
4647}
4648
4649pub async fn create_environment(
4650    State(_state): State<AdminState>,
4651    Path(_workspace_id): Path<String>,
4652    Json(_request): Json<serde_json::Value>,
4653) -> Json<ApiResponse<String>> {
4654    Json(ApiResponse::success("Environment created".to_string()))
4655}
4656
4657pub async fn update_environment(
4658    State(_state): State<AdminState>,
4659    Path((_workspace_id, _environment_id)): Path<(String, String)>,
4660    Json(_request): Json<serde_json::Value>,
4661) -> Json<ApiResponse<String>> {
4662    Json(ApiResponse::success("Environment updated".to_string()))
4663}
4664
4665pub async fn delete_environment(
4666    State(_state): State<AdminState>,
4667    Path((_workspace_id, _environment_id)): Path<(String, String)>,
4668) -> Json<ApiResponse<String>> {
4669    Json(ApiResponse::success("Environment deleted".to_string()))
4670}
4671
4672pub async fn set_active_environment(
4673    State(_state): State<AdminState>,
4674    Path((_workspace_id, _environment_id)): Path<(String, String)>,
4675) -> Json<ApiResponse<String>> {
4676    Json(ApiResponse::success("Environment activated".to_string()))
4677}
4678
4679pub async fn update_environments_order(
4680    State(_state): State<AdminState>,
4681    Path(_workspace_id): Path<String>,
4682    Json(_request): Json<serde_json::Value>,
4683) -> Json<ApiResponse<String>> {
4684    Json(ApiResponse::success("Environment order updated".to_string()))
4685}
4686
4687pub async fn get_environment_variables(
4688    State(_state): State<AdminState>,
4689    Path((_workspace_id, _environment_id)): Path<(String, String)>,
4690) -> Json<ApiResponse<serde_json::Value>> {
4691    Json(ApiResponse::success(serde_json::json!({
4692        "variables": []
4693    })))
4694}
4695
4696pub async fn set_environment_variable(
4697    State(_state): State<AdminState>,
4698    Path((_workspace_id, _environment_id)): Path<(String, String)>,
4699    Json(_request): Json<serde_json::Value>,
4700) -> Json<ApiResponse<String>> {
4701    Json(ApiResponse::success("Environment variable set".to_string()))
4702}
4703
4704pub async fn remove_environment_variable(
4705    State(_state): State<AdminState>,
4706    Path((_workspace_id, _environment_id, _variable_name)): Path<(String, String, String)>,
4707) -> Json<ApiResponse<String>> {
4708    Json(ApiResponse::success("Environment variable removed".to_string()))
4709}
4710
4711// Autocomplete functions
4712pub async fn get_autocomplete_suggestions(
4713    State(_state): State<AdminState>,
4714    Path(_workspace_id): Path<String>,
4715    Json(_request): Json<serde_json::Value>,
4716) -> Json<ApiResponse<serde_json::Value>> {
4717    Json(ApiResponse::success(serde_json::json!({
4718        "suggestions": [],
4719        "start_position": 0,
4720        "end_position": 0
4721    })))
4722}
4723
4724// Sync management functions
4725pub async fn get_sync_status(
4726    State(_state): State<AdminState>,
4727    Path(_workspace_id): Path<String>,
4728) -> Json<ApiResponse<serde_json::Value>> {
4729    Json(ApiResponse::success(serde_json::json!({
4730        "status": "disabled"
4731    })))
4732}
4733
4734pub async fn configure_sync(
4735    State(_state): State<AdminState>,
4736    Path(_workspace_id): Path<String>,
4737    Json(_request): Json<serde_json::Value>,
4738) -> Json<ApiResponse<String>> {
4739    Json(ApiResponse::success("Sync configured".to_string()))
4740}
4741
4742pub async fn disable_sync(
4743    State(_state): State<AdminState>,
4744    Path(_workspace_id): Path<String>,
4745) -> Json<ApiResponse<String>> {
4746    Json(ApiResponse::success("Sync disabled".to_string()))
4747}
4748
4749pub async fn trigger_sync(
4750    State(_state): State<AdminState>,
4751    Path(_workspace_id): Path<String>,
4752) -> Json<ApiResponse<String>> {
4753    Json(ApiResponse::success("Sync triggered".to_string()))
4754}
4755
4756pub async fn get_sync_changes(
4757    State(_state): State<AdminState>,
4758    Path(_workspace_id): Path<String>,
4759) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4760    Json(ApiResponse::success(vec![]))
4761}
4762
4763pub async fn confirm_sync_changes(
4764    State(_state): State<AdminState>,
4765    Path(_workspace_id): Path<String>,
4766    Json(_request): Json<serde_json::Value>,
4767) -> Json<ApiResponse<String>> {
4768    Json(ApiResponse::success("Sync changes confirmed".to_string()))
4769}
4770
4771// Missing functions that routes.rs expects
4772pub async fn clear_import_history(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
4773    let mut history = state.import_history.write().await;
4774    history.clear();
4775    Json(ApiResponse::success("Import history cleared".to_string()))
4776}
4777
4778#[cfg(test)]
4779mod tests {
4780    use super::*;
4781
4782    #[test]
4783    fn test_request_metrics_creation() {
4784        use std::collections::HashMap;
4785
4786        let metrics = RequestMetrics {
4787            total_requests: 100,
4788            active_connections: 5,
4789            requests_by_endpoint: HashMap::new(),
4790            response_times: vec![10, 20, 30],
4791            response_times_by_endpoint: HashMap::new(),
4792            errors_by_endpoint: HashMap::new(),
4793            last_request_by_endpoint: HashMap::new(),
4794        };
4795
4796        assert_eq!(metrics.total_requests, 100);
4797        assert_eq!(metrics.active_connections, 5);
4798        assert_eq!(metrics.response_times.len(), 3);
4799    }
4800
4801    #[test]
4802    fn test_system_metrics_creation() {
4803        let metrics = SystemMetrics {
4804            cpu_usage_percent: 45.5,
4805            memory_usage_mb: 100,
4806            active_threads: 10,
4807        };
4808
4809        assert_eq!(metrics.active_threads, 10);
4810        assert!(metrics.cpu_usage_percent > 0.0);
4811        assert_eq!(metrics.memory_usage_mb, 100);
4812    }
4813
4814    #[test]
4815    fn test_time_series_point() {
4816        let point = TimeSeriesPoint {
4817            timestamp: Utc::now(),
4818            value: 42.5,
4819        };
4820
4821        assert_eq!(point.value, 42.5);
4822    }
4823
4824    #[test]
4825    fn test_restart_status() {
4826        let status = RestartStatus {
4827            in_progress: true,
4828            initiated_at: Some(Utc::now()),
4829            reason: Some("Manual restart".to_string()),
4830            success: None,
4831        };
4832
4833        assert!(status.in_progress);
4834        assert!(status.reason.is_some());
4835    }
4836
4837    #[test]
4838    fn test_configuration_state() {
4839        use std::collections::HashMap;
4840
4841        let state = ConfigurationState {
4842            latency_profile: LatencyProfile {
4843                name: "default".to_string(),
4844                base_ms: 100,
4845                jitter_ms: 10,
4846                tag_overrides: HashMap::new(),
4847            },
4848            fault_config: FaultConfig {
4849                enabled: false,
4850                failure_rate: 0.0,
4851                status_codes: vec![],
4852                active_failures: 0,
4853            },
4854            proxy_config: ProxyConfig {
4855                enabled: false,
4856                upstream_url: None,
4857                timeout_seconds: 30,
4858                requests_proxied: 0,
4859            },
4860            validation_settings: ValidationSettings {
4861                mode: "off".to_string(),
4862                aggregate_errors: false,
4863                validate_responses: false,
4864                overrides: HashMap::new(),
4865            },
4866            traffic_shaping: TrafficShapingConfig {
4867                enabled: false,
4868                bandwidth: crate::models::BandwidthConfig {
4869                    enabled: false,
4870                    max_bytes_per_sec: 1_048_576,
4871                    burst_capacity_bytes: 10_485_760,
4872                    tag_overrides: HashMap::new(),
4873                },
4874                burst_loss: crate::models::BurstLossConfig {
4875                    enabled: false,
4876                    burst_probability: 0.1,
4877                    burst_duration_ms: 5000,
4878                    loss_rate_during_burst: 0.5,
4879                    recovery_time_ms: 30000,
4880                    tag_overrides: HashMap::new(),
4881                },
4882            },
4883        };
4884
4885        assert_eq!(state.latency_profile.name, "default");
4886        assert!(!state.fault_config.enabled);
4887        assert!(!state.proxy_config.enabled);
4888    }
4889
4890    #[test]
4891    fn test_admin_state_new() {
4892        let http_addr: std::net::SocketAddr = "127.0.0.1:3000".parse().unwrap();
4893        let state = AdminState::new(
4894            Some(http_addr),
4895            None,
4896            None,
4897            None,
4898            true,
4899            8080,
4900            None,
4901            None,
4902            None,
4903            None,
4904            None,
4905            None,
4906            None,
4907            None,
4908        );
4909
4910        assert_eq!(state.http_server_addr, Some(http_addr));
4911        assert!(state.api_enabled);
4912        assert_eq!(state.admin_port, 8080);
4913    }
4914}