Skip to main content

mockforge_ui/
handlers.rs

1//! Request handlers for the admin UI
2//!
3//! This module has been refactored into sub-modules for better organization:
4//! - assets: Static asset serving
5//! - admin: Admin dashboard and server management
6//! - workspace: Workspace management operations
7//! - plugin: Plugin management operations
8//! - sync: Synchronization operations
9//! - import: Data import operations
10//! - fixtures: Fixture management operations
11
12use axum::{
13    extract::{Path, Query, State},
14    http::{self, StatusCode},
15    response::{
16        sse::{Event, Sse},
17        Html, IntoResponse, Json,
18    },
19};
20use chrono::Utc;
21use futures_util::stream::{self, Stream};
22use mockforge_core::{Error, Result};
23use mockforge_plugin_loader::PluginRegistry;
24use serde::{Deserialize, Serialize};
25use serde_json::json;
26use std::collections::HashMap;
27use std::convert::Infallible;
28use std::process::Command;
29use std::process::Stdio;
30use std::sync::Arc;
31use std::time::Duration;
32use sysinfo::System;
33use tokio::sync::RwLock;
34
35// Import all types from models
36use crate::models::{
37    ApiResponse, ConfigUpdate, DashboardData, DashboardSystemInfo, FaultConfig, HealthCheck,
38    LatencyProfile, LogFilter, MetricsData, ProxyConfig, RequestLog, RouteInfo, ServerInfo,
39    ServerStatus, SimpleMetricsData, SystemInfo, TrafficShapingConfig, ValidationSettings,
40    ValidationUpdate,
41};
42
43// Import import types from core
44use mockforge_core::workspace_import::{ImportResponse, ImportRoute};
45use mockforge_plugin_loader::{
46    GitPluginConfig, GitPluginLoader, PluginLoader, PluginLoaderConfig, PluginSource,
47    RemotePluginConfig, RemotePluginLoader,
48};
49
50// Handler sub-modules
51pub mod admin;
52pub mod ai_studio;
53pub mod analytics;
54pub mod analytics_stream;
55pub mod analytics_v2;
56pub mod assets;
57pub mod behavioral_cloning;
58pub mod chains;
59pub mod community;
60pub mod contract_diff;
61pub mod coverage_metrics;
62pub mod failure_analysis;
63pub mod graph;
64pub mod health;
65pub mod migration;
66pub mod pillar_analytics;
67pub mod playground;
68pub mod plugin;
69pub mod promotions;
70pub mod protocol_contracts;
71pub mod verification;
72pub mod voice;
73pub mod workspaces;
74
75// Re-export commonly used types
76pub use assets::*;
77pub use chains::*;
78pub use graph::*;
79pub use migration::*;
80pub use plugin::*;
81
82// Import workspace persistence
83use mockforge_core::workspace_import::WorkspaceImportConfig;
84use mockforge_core::workspace_persistence::WorkspacePersistence;
85
86/// Request metrics for tracking
87#[derive(Debug, Clone, Default)]
88pub struct RequestMetrics {
89    /// Total requests served
90    pub total_requests: u64,
91    /// Active connections
92    pub active_connections: u64,
93    /// Requests by endpoint
94    pub requests_by_endpoint: HashMap<String, u64>,
95    /// Response times (last N measurements)
96    pub response_times: Vec<u64>,
97    /// Response times by endpoint (last N measurements per endpoint)
98    pub response_times_by_endpoint: HashMap<String, Vec<u64>>,
99    /// Error count by endpoint
100    pub errors_by_endpoint: HashMap<String, u64>,
101    /// Last request timestamp by endpoint
102    pub last_request_by_endpoint: HashMap<String, chrono::DateTime<Utc>>,
103}
104
105/// System metrics
106#[derive(Debug, Clone)]
107pub struct SystemMetrics {
108    /// Memory usage in MB
109    pub memory_usage_mb: u64,
110    /// CPU usage percentage
111    pub cpu_usage_percent: f64,
112    /// Active threads
113    pub active_threads: u32,
114}
115
116/// Time series data point
117#[derive(Debug, Clone)]
118pub struct TimeSeriesPoint {
119    /// Timestamp
120    pub timestamp: chrono::DateTime<Utc>,
121    /// Value
122    pub value: f64,
123}
124
125/// Time series data for tracking metrics over time
126#[derive(Debug, Clone, Default)]
127pub struct TimeSeriesData {
128    /// Memory usage over time
129    pub memory_usage: Vec<TimeSeriesPoint>,
130    /// CPU usage over time
131    pub cpu_usage: Vec<TimeSeriesPoint>,
132    /// Request count over time
133    pub request_count: Vec<TimeSeriesPoint>,
134    /// Response time over time
135    pub response_time: Vec<TimeSeriesPoint>,
136}
137
138/// Restart status tracking
139#[derive(Debug, Clone, Serialize, Deserialize)]
140pub struct RestartStatus {
141    /// Whether a restart is currently in progress
142    pub in_progress: bool,
143    /// Timestamp when restart was initiated
144    pub initiated_at: Option<chrono::DateTime<Utc>>,
145    /// Restart reason/message
146    pub reason: Option<String>,
147    /// Whether restart was successful
148    pub success: Option<bool>,
149}
150
151/// Fixture metadata
152#[derive(Debug, Clone, Serialize, Deserialize)]
153pub struct FixtureInfo {
154    /// Unique identifier for the fixture
155    pub id: String,
156    /// Protocol type (http, websocket, grpc)
157    pub protocol: String,
158    /// HTTP method or operation type
159    pub method: String,
160    /// Request path
161    pub path: String,
162    /// When the fixture was saved
163    pub saved_at: chrono::DateTime<Utc>,
164    /// File size in bytes
165    pub file_size: u64,
166    /// File path relative to fixtures directory
167    pub file_path: String,
168    /// Request fingerprint hash
169    pub fingerprint: String,
170    /// Additional metadata from the fixture file
171    pub metadata: serde_json::Value,
172}
173
174/// Smoke test result
175#[derive(Debug, Clone, Serialize, Deserialize)]
176pub struct SmokeTestResult {
177    /// Test ID
178    pub id: String,
179    /// Test name
180    pub name: String,
181    /// HTTP method
182    pub method: String,
183    /// Request path
184    pub path: String,
185    /// Test description
186    pub description: String,
187    /// When the test was last run
188    pub last_run: Option<chrono::DateTime<Utc>>,
189    /// Test status (passed, failed, running, pending)
190    pub status: String,
191    /// Response time in milliseconds
192    pub response_time_ms: Option<u64>,
193    /// Error message if test failed
194    pub error_message: Option<String>,
195    /// HTTP status code received
196    pub status_code: Option<u16>,
197    /// Test duration in seconds
198    pub duration_seconds: Option<f64>,
199}
200
201/// Smoke test execution context
202#[derive(Debug, Clone)]
203pub struct SmokeTestContext {
204    /// Base URL for the service being tested
205    pub base_url: String,
206    /// Timeout for individual tests
207    pub timeout_seconds: u64,
208    /// Whether to run tests in parallel
209    pub parallel: bool,
210}
211
212/// Configuration state
213#[derive(Debug, Clone, Serialize)]
214pub struct ConfigurationState {
215    /// Latency profile
216    pub latency_profile: LatencyProfile,
217    /// Fault configuration
218    pub fault_config: FaultConfig,
219    /// Proxy configuration
220    pub proxy_config: ProxyConfig,
221    /// Validation settings
222    pub validation_settings: ValidationSettings,
223    /// Traffic shaping settings
224    pub traffic_shaping: TrafficShapingConfig,
225}
226
227/// Import history entry
228#[derive(Debug, Clone, Serialize, Deserialize)]
229pub struct ImportHistoryEntry {
230    /// Unique ID for the import
231    pub id: String,
232    /// Import format (postman, insomnia, curl)
233    pub format: String,
234    /// Timestamp of the import
235    pub timestamp: chrono::DateTime<Utc>,
236    /// Number of routes imported
237    pub routes_count: usize,
238    /// Number of variables imported
239    pub variables_count: usize,
240    /// Number of warnings
241    pub warnings_count: usize,
242    /// Whether the import was successful
243    pub success: bool,
244    /// Filename of the imported file
245    pub filename: Option<String>,
246    /// Environment used
247    pub environment: Option<String>,
248    /// Base URL used
249    pub base_url: Option<String>,
250    /// Error message if failed
251    pub error_message: Option<String>,
252}
253
254/// Shared state for the admin UI
255#[derive(Clone)]
256pub struct AdminState {
257    /// HTTP server address
258    pub http_server_addr: Option<std::net::SocketAddr>,
259    /// WebSocket server address
260    pub ws_server_addr: Option<std::net::SocketAddr>,
261    /// gRPC server address
262    pub grpc_server_addr: Option<std::net::SocketAddr>,
263    /// GraphQL server address
264    pub graphql_server_addr: Option<std::net::SocketAddr>,
265    /// Whether API endpoints are enabled
266    pub api_enabled: bool,
267    /// Admin server port
268    pub admin_port: u16,
269    /// Start time
270    pub start_time: chrono::DateTime<Utc>,
271    /// Request metrics (protected by RwLock)
272    pub metrics: Arc<RwLock<RequestMetrics>>,
273    /// System metrics (protected by RwLock)
274    pub system_metrics: Arc<RwLock<SystemMetrics>>,
275    /// Configuration (protected by RwLock)
276    pub config: Arc<RwLock<ConfigurationState>>,
277    /// Request logs (protected by RwLock)
278    pub logs: Arc<RwLock<Vec<RequestLog>>>,
279    /// Time series data (protected by RwLock)
280    pub time_series: Arc<RwLock<TimeSeriesData>>,
281    /// Restart status (protected by RwLock)
282    pub restart_status: Arc<RwLock<RestartStatus>>,
283    /// Smoke test results (protected by RwLock)
284    pub smoke_test_results: Arc<RwLock<Vec<SmokeTestResult>>>,
285    /// Import history (protected by RwLock)
286    pub import_history: Arc<RwLock<Vec<ImportHistoryEntry>>>,
287    /// Workspace persistence
288    pub workspace_persistence: Arc<WorkspacePersistence>,
289    /// Plugin registry (protected by RwLock)
290    pub plugin_registry: Arc<RwLock<PluginRegistry>>,
291    /// Reality engine for managing realism levels
292    pub reality_engine: Arc<RwLock<mockforge_core::RealityEngine>>,
293    /// Reality Continuum engine for blending mock and real data sources
294    pub continuum_engine: Arc<RwLock<mockforge_core::RealityContinuumEngine>>,
295    /// Chaos API state for hot-reload support (optional)
296    /// Contains config that can be updated at runtime
297    pub chaos_api_state: Option<Arc<mockforge_chaos::api::ChaosApiState>>,
298    /// Latency injector for HTTP middleware (optional)
299    /// Allows updating latency profile at runtime
300    pub latency_injector: Option<Arc<RwLock<mockforge_core::latency::LatencyInjector>>>,
301    /// MockAI instance (optional)
302    /// Allows updating MockAI configuration at runtime
303    pub mockai: Option<Arc<RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
304}
305
306impl AdminState {
307    /// Start system monitoring background task
308    pub async fn start_system_monitoring(&self) {
309        let state_clone = self.clone();
310        tokio::spawn(async move {
311            let mut sys = System::new_all();
312            let mut refresh_count = 0u64;
313
314            tracing::info!("Starting system monitoring background task");
315
316            loop {
317                // Refresh system information
318                sys.refresh_all();
319
320                // Get CPU usage
321                let cpu_usage = sys.global_cpu_usage();
322
323                // Get memory usage
324                let _total_memory = sys.total_memory() as f64;
325                let used_memory = sys.used_memory() as f64;
326                let memory_usage_mb = used_memory / 1024.0 / 1024.0;
327
328                // Get thread count (use available CPU cores as approximate measure)
329                let active_threads = sys.cpus().len() as u32;
330
331                // Update system metrics
332                let memory_mb_u64 = memory_usage_mb as u64;
333
334                // Only log every 10 refreshes to avoid spam
335                if refresh_count.is_multiple_of(10) {
336                    tracing::debug!(
337                        "System metrics updated: CPU={:.1}%, Mem={}MB, Threads={}",
338                        cpu_usage,
339                        memory_mb_u64,
340                        active_threads
341                    );
342                }
343
344                state_clone
345                    .update_system_metrics(memory_mb_u64, cpu_usage as f64, active_threads)
346                    .await;
347
348                refresh_count += 1;
349
350                // Sleep for 10 seconds between updates
351                tokio::time::sleep(Duration::from_secs(10)).await;
352            }
353        });
354    }
355
356    /// Create new admin state
357    ///
358    /// # Arguments
359    /// * `http_server_addr` - HTTP server address
360    /// * `ws_server_addr` - WebSocket server address
361    /// * `grpc_server_addr` - gRPC server address
362    /// * `graphql_server_addr` - GraphQL server address
363    /// * `api_enabled` - Whether API endpoints are enabled
364    /// * `admin_port` - Admin server port
365    /// * `chaos_api_state` - Optional chaos API state for hot-reload support
366    /// * `latency_injector` - Optional latency injector for hot-reload support
367    /// * `mockai` - Optional MockAI instance for hot-reload support
368    /// * `continuum_config` - Optional Reality Continuum configuration
369    /// * `virtual_clock` - Optional virtual clock for time-based progression
370    #[allow(clippy::too_many_arguments)]
371    pub fn new(
372        http_server_addr: Option<std::net::SocketAddr>,
373        ws_server_addr: Option<std::net::SocketAddr>,
374        grpc_server_addr: Option<std::net::SocketAddr>,
375        graphql_server_addr: Option<std::net::SocketAddr>,
376        api_enabled: bool,
377        admin_port: u16,
378        chaos_api_state: Option<Arc<mockforge_chaos::api::ChaosApiState>>,
379        latency_injector: Option<Arc<RwLock<mockforge_core::latency::LatencyInjector>>>,
380        mockai: Option<Arc<RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
381        continuum_config: Option<mockforge_core::ContinuumConfig>,
382        virtual_clock: Option<Arc<mockforge_core::VirtualClock>>,
383    ) -> Self {
384        let start_time = Utc::now();
385
386        Self {
387            http_server_addr,
388            ws_server_addr,
389            grpc_server_addr,
390            graphql_server_addr,
391            api_enabled,
392            admin_port,
393            start_time,
394            metrics: Arc::new(RwLock::new(RequestMetrics::default())),
395            system_metrics: Arc::new(RwLock::new(SystemMetrics {
396                memory_usage_mb: 0,
397                cpu_usage_percent: 0.0,
398                active_threads: 0,
399            })),
400            config: Arc::new(RwLock::new(ConfigurationState {
401                latency_profile: LatencyProfile {
402                    name: "default".to_string(),
403                    base_ms: 50,
404                    jitter_ms: 20,
405                    tag_overrides: HashMap::new(),
406                },
407                fault_config: FaultConfig {
408                    enabled: false,
409                    failure_rate: 0.0,
410                    status_codes: vec![500, 502, 503],
411                    active_failures: 0,
412                },
413                proxy_config: ProxyConfig {
414                    enabled: false,
415                    upstream_url: None,
416                    timeout_seconds: 30,
417                    requests_proxied: 0,
418                },
419                validation_settings: ValidationSettings {
420                    mode: "enforce".to_string(),
421                    aggregate_errors: true,
422                    validate_responses: false,
423                    overrides: HashMap::new(),
424                },
425                traffic_shaping: TrafficShapingConfig {
426                    enabled: false,
427                    bandwidth: crate::models::BandwidthConfig {
428                        enabled: false,
429                        max_bytes_per_sec: 1_048_576,
430                        burst_capacity_bytes: 10_485_760,
431                        tag_overrides: HashMap::new(),
432                    },
433                    burst_loss: crate::models::BurstLossConfig {
434                        enabled: false,
435                        burst_probability: 0.1,
436                        burst_duration_ms: 5000,
437                        loss_rate_during_burst: 0.5,
438                        recovery_time_ms: 30000,
439                        tag_overrides: HashMap::new(),
440                    },
441                },
442            })),
443            logs: Arc::new(RwLock::new(Vec::new())),
444            time_series: Arc::new(RwLock::new(TimeSeriesData::default())),
445            restart_status: Arc::new(RwLock::new(RestartStatus {
446                in_progress: false,
447                initiated_at: None,
448                reason: None,
449                success: None,
450            })),
451            smoke_test_results: Arc::new(RwLock::new(Vec::new())),
452            import_history: Arc::new(RwLock::new(Vec::new())),
453            workspace_persistence: Arc::new(WorkspacePersistence::new("./workspaces")),
454            plugin_registry: Arc::new(RwLock::new(PluginRegistry::new())),
455            reality_engine: Arc::new(RwLock::new(mockforge_core::RealityEngine::new())),
456            continuum_engine: Arc::new(RwLock::new({
457                let config = continuum_config.unwrap_or_default();
458                if let Some(clock) = virtual_clock {
459                    mockforge_core::RealityContinuumEngine::with_virtual_clock(config, clock)
460                } else {
461                    mockforge_core::RealityContinuumEngine::new(config)
462                }
463            })),
464            chaos_api_state,
465            latency_injector,
466            mockai,
467        }
468    }
469
470    /// Record a request
471    pub async fn record_request(
472        &self,
473        method: &str,
474        path: &str,
475        status_code: u16,
476        response_time_ms: u64,
477        error: Option<String>,
478    ) {
479        let mut metrics = self.metrics.write().await;
480
481        metrics.total_requests += 1;
482        let endpoint = format!("{} {}", method, path);
483        *metrics.requests_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
484
485        if status_code >= 400 {
486            *metrics.errors_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
487        }
488
489        // Keep only last 100 response times globally
490        metrics.response_times.push(response_time_ms);
491        if metrics.response_times.len() > 100 {
492            metrics.response_times.remove(0);
493        }
494
495        // Keep only last 50 response times per endpoint
496        let endpoint_times = metrics
497            .response_times_by_endpoint
498            .entry(endpoint.clone())
499            .or_insert_with(Vec::new);
500        endpoint_times.push(response_time_ms);
501        if endpoint_times.len() > 50 {
502            endpoint_times.remove(0);
503        }
504
505        // Update last request timestamp for this endpoint
506        metrics.last_request_by_endpoint.insert(endpoint, Utc::now());
507
508        // Capture total_requests before releasing the lock
509        let total_requests = metrics.total_requests;
510
511        // Release metrics lock before acquiring other locks
512        drop(metrics);
513
514        // Update time series data for request count and response time
515        self.update_time_series_on_request(response_time_ms, total_requests).await;
516
517        // Record the log
518        let mut logs = self.logs.write().await;
519        let log_entry = RequestLog {
520            id: format!("req_{}", total_requests),
521            timestamp: Utc::now(),
522            method: method.to_string(),
523            path: path.to_string(),
524            status_code,
525            response_time_ms,
526            client_ip: None,
527            user_agent: None,
528            headers: HashMap::new(),
529            response_size_bytes: 0,
530            error_message: error,
531        };
532
533        logs.push(log_entry);
534
535        // Keep only last 1000 logs
536        if logs.len() > 1000 {
537            logs.remove(0);
538        }
539    }
540
541    /// Get current metrics
542    pub async fn get_metrics(&self) -> RequestMetrics {
543        self.metrics.read().await.clone()
544    }
545
546    /// Update system metrics
547    pub async fn update_system_metrics(&self, memory_mb: u64, cpu_percent: f64, threads: u32) {
548        let mut system_metrics = self.system_metrics.write().await;
549        system_metrics.memory_usage_mb = memory_mb;
550        system_metrics.cpu_usage_percent = cpu_percent;
551        system_metrics.active_threads = threads;
552
553        // Update time series data
554        self.update_time_series_data(memory_mb as f64, cpu_percent).await;
555    }
556
557    /// Update time series data with new metrics
558    async fn update_time_series_data(&self, memory_mb: f64, cpu_percent: f64) {
559        let now = Utc::now();
560        let mut time_series = self.time_series.write().await;
561
562        // Add memory usage data point
563        time_series.memory_usage.push(TimeSeriesPoint {
564            timestamp: now,
565            value: memory_mb,
566        });
567
568        // Add CPU usage data point
569        time_series.cpu_usage.push(TimeSeriesPoint {
570            timestamp: now,
571            value: cpu_percent,
572        });
573
574        // Add request count data point (from current metrics)
575        let metrics = self.metrics.read().await;
576        time_series.request_count.push(TimeSeriesPoint {
577            timestamp: now,
578            value: metrics.total_requests as f64,
579        });
580
581        // Add average response time data point
582        let avg_response_time = if !metrics.response_times.is_empty() {
583            metrics.response_times.iter().sum::<u64>() as f64 / metrics.response_times.len() as f64
584        } else {
585            0.0
586        };
587        time_series.response_time.push(TimeSeriesPoint {
588            timestamp: now,
589            value: avg_response_time,
590        });
591
592        // Keep only last 100 data points for each metric to prevent memory bloat
593        const MAX_POINTS: usize = 100;
594        if time_series.memory_usage.len() > MAX_POINTS {
595            time_series.memory_usage.remove(0);
596        }
597        if time_series.cpu_usage.len() > MAX_POINTS {
598            time_series.cpu_usage.remove(0);
599        }
600        if time_series.request_count.len() > MAX_POINTS {
601            time_series.request_count.remove(0);
602        }
603        if time_series.response_time.len() > MAX_POINTS {
604            time_series.response_time.remove(0);
605        }
606    }
607
608    /// Get system metrics
609    pub async fn get_system_metrics(&self) -> SystemMetrics {
610        self.system_metrics.read().await.clone()
611    }
612
613    /// Get time series data
614    pub async fn get_time_series_data(&self) -> TimeSeriesData {
615        self.time_series.read().await.clone()
616    }
617
618    /// Get restart status
619    pub async fn get_restart_status(&self) -> RestartStatus {
620        self.restart_status.read().await.clone()
621    }
622
623    /// Initiate server restart
624    pub async fn initiate_restart(&self, reason: String) -> Result<()> {
625        let mut status = self.restart_status.write().await;
626
627        if status.in_progress {
628            return Err(Error::generic("Restart already in progress".to_string()));
629        }
630
631        status.in_progress = true;
632        status.initiated_at = Some(Utc::now());
633        status.reason = Some(reason);
634        status.success = None;
635
636        Ok(())
637    }
638
639    /// Complete restart (success or failure)
640    pub async fn complete_restart(&self, success: bool) {
641        let mut status = self.restart_status.write().await;
642        status.in_progress = false;
643        status.success = Some(success);
644    }
645
646    /// Get smoke test results
647    pub async fn get_smoke_test_results(&self) -> Vec<SmokeTestResult> {
648        self.smoke_test_results.read().await.clone()
649    }
650
651    /// Update smoke test result
652    pub async fn update_smoke_test_result(&self, result: SmokeTestResult) {
653        let mut results = self.smoke_test_results.write().await;
654
655        // Find existing result by ID and update, or add new one
656        if let Some(existing) = results.iter_mut().find(|r| r.id == result.id) {
657            *existing = result;
658        } else {
659            results.push(result);
660        }
661
662        // Keep only last 100 test results
663        if results.len() > 100 {
664            results.remove(0);
665        }
666    }
667
668    /// Clear all smoke test results
669    pub async fn clear_smoke_test_results(&self) {
670        let mut results = self.smoke_test_results.write().await;
671        results.clear();
672    }
673
674    /// Update time series data when a request is recorded
675    async fn update_time_series_on_request(&self, response_time_ms: u64, total_requests: u64) {
676        let now = Utc::now();
677        let mut time_series = self.time_series.write().await;
678
679        // Add request count data point
680        time_series.request_count.push(TimeSeriesPoint {
681            timestamp: now,
682            value: total_requests as f64,
683        });
684
685        // Add response time data point
686        time_series.response_time.push(TimeSeriesPoint {
687            timestamp: now,
688            value: response_time_ms as f64,
689        });
690
691        // Keep only last 100 data points for each metric to prevent memory bloat
692        const MAX_POINTS: usize = 100;
693        if time_series.request_count.len() > MAX_POINTS {
694            time_series.request_count.remove(0);
695        }
696        if time_series.response_time.len() > MAX_POINTS {
697            time_series.response_time.remove(0);
698        }
699    }
700
701    /// Get current configuration
702    pub async fn get_config(&self) -> ConfigurationState {
703        self.config.read().await.clone()
704    }
705
706    /// Update latency configuration
707    pub async fn update_latency_config(
708        &self,
709        base_ms: u64,
710        jitter_ms: u64,
711        tag_overrides: HashMap<String, u64>,
712    ) {
713        let mut config = self.config.write().await;
714        config.latency_profile.base_ms = base_ms;
715        config.latency_profile.jitter_ms = jitter_ms;
716        config.latency_profile.tag_overrides = tag_overrides;
717    }
718
719    /// Update fault configuration
720    pub async fn update_fault_config(
721        &self,
722        enabled: bool,
723        failure_rate: f64,
724        status_codes: Vec<u16>,
725    ) {
726        let mut config = self.config.write().await;
727        config.fault_config.enabled = enabled;
728        config.fault_config.failure_rate = failure_rate;
729        config.fault_config.status_codes = status_codes;
730    }
731
732    /// Update proxy configuration
733    pub async fn update_proxy_config(
734        &self,
735        enabled: bool,
736        upstream_url: Option<String>,
737        timeout_seconds: u64,
738    ) {
739        let mut config = self.config.write().await;
740        config.proxy_config.enabled = enabled;
741        config.proxy_config.upstream_url = upstream_url;
742        config.proxy_config.timeout_seconds = timeout_seconds;
743    }
744
745    /// Update validation settings
746    pub async fn update_validation_config(
747        &self,
748        mode: String,
749        aggregate_errors: bool,
750        validate_responses: bool,
751        overrides: HashMap<String, String>,
752    ) {
753        let mut config = self.config.write().await;
754        config.validation_settings.mode = mode;
755        config.validation_settings.aggregate_errors = aggregate_errors;
756        config.validation_settings.validate_responses = validate_responses;
757        config.validation_settings.overrides = overrides;
758    }
759
760    /// Get filtered logs
761    pub async fn get_logs_filtered(&self, filter: &LogFilter) -> Vec<RequestLog> {
762        let logs = self.logs.read().await;
763
764        logs.iter()
765            .rev() // Most recent first
766            .filter(|log| {
767                if let Some(ref method) = filter.method {
768                    if log.method != *method {
769                        return false;
770                    }
771                }
772                if let Some(ref path_pattern) = filter.path_pattern {
773                    if !log.path.contains(path_pattern) {
774                        return false;
775                    }
776                }
777                if let Some(status) = filter.status_code {
778                    if log.status_code != status {
779                        return false;
780                    }
781                }
782                true
783            })
784            .take(filter.limit.unwrap_or(100))
785            .cloned()
786            .collect()
787    }
788
789    /// Clear all logs
790    pub async fn clear_logs(&self) {
791        let mut logs = self.logs.write().await;
792        logs.clear();
793    }
794}
795
796/// Serve the main admin interface
797pub async fn serve_admin_html() -> Html<&'static str> {
798    Html(crate::get_admin_html())
799}
800
801/// Serve admin CSS
802pub async fn serve_admin_css() -> ([(http::HeaderName, &'static str); 1], &'static str) {
803    ([(http::header::CONTENT_TYPE, "text/css")], crate::get_admin_css())
804}
805
806/// Serve admin JavaScript
807pub async fn serve_admin_js() -> ([(http::HeaderName, &'static str); 1], &'static str) {
808    ([(http::header::CONTENT_TYPE, "application/javascript")], crate::get_admin_js())
809}
810
811/// Get dashboard data
812pub async fn get_dashboard(State(state): State<AdminState>) -> Json<ApiResponse<DashboardData>> {
813    let uptime = Utc::now().signed_duration_since(state.start_time).num_seconds() as u64;
814
815    // Get system metrics from state
816    let system_metrics = state.get_system_metrics().await;
817    let _config = state.get_config().await;
818
819    // Get recent logs and calculate metrics from centralized logger
820    let (recent_logs, calculated_metrics): (Vec<RequestLog>, RequestMetrics) =
821        if let Some(global_logger) = mockforge_core::get_global_logger() {
822            // Get all logs to calculate metrics
823            let all_logs = global_logger.get_recent_logs(None).await;
824            let recent_logs_subset = global_logger.get_recent_logs(Some(20)).await;
825
826            // Calculate metrics from logs
827            let total_requests = all_logs.len() as u64;
828            let mut requests_by_endpoint = HashMap::new();
829            let mut errors_by_endpoint = HashMap::new();
830            let mut response_times = Vec::new();
831            let mut last_request_by_endpoint = HashMap::new();
832
833            for log in &all_logs {
834                let endpoint_key = format!("{} {}", log.method, log.path);
835                *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
836
837                if log.status_code >= 400 {
838                    *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
839                }
840
841                response_times.push(log.response_time_ms);
842                last_request_by_endpoint.insert(endpoint_key, log.timestamp);
843            }
844
845            let calculated_metrics = RequestMetrics {
846                total_requests,
847                active_connections: 0, // We don't track this from logs
848                requests_by_endpoint,
849                response_times,
850                response_times_by_endpoint: HashMap::new(), // Simplified for now
851                errors_by_endpoint,
852                last_request_by_endpoint,
853            };
854
855            // Convert to RequestLog format for admin UI
856            let recent_logs = recent_logs_subset
857                .into_iter()
858                .map(|log| RequestLog {
859                    id: log.id,
860                    timestamp: log.timestamp,
861                    method: log.method,
862                    path: log.path,
863                    status_code: log.status_code,
864                    response_time_ms: log.response_time_ms,
865                    client_ip: log.client_ip,
866                    user_agent: log.user_agent,
867                    headers: log.headers,
868                    response_size_bytes: log.response_size_bytes,
869                    error_message: log.error_message,
870                })
871                .collect();
872
873            (recent_logs, calculated_metrics)
874        } else {
875            // Fallback to local logs if centralized logger not available
876            let logs = state.logs.read().await;
877            let recent_logs = logs.iter().rev().take(10).cloned().collect();
878            let metrics = state.get_metrics().await;
879            (recent_logs, metrics)
880        };
881
882    let metrics = calculated_metrics;
883
884    let system_info = SystemInfo {
885        version: env!("CARGO_PKG_VERSION").to_string(),
886        uptime_seconds: uptime,
887        memory_usage_mb: system_metrics.memory_usage_mb,
888        cpu_usage_percent: system_metrics.cpu_usage_percent,
889        active_threads: system_metrics.active_threads as usize,
890        total_routes: metrics.requests_by_endpoint.len(),
891        total_fixtures: count_fixtures().unwrap_or(0),
892    };
893
894    let servers = vec![
895        ServerStatus {
896            server_type: "HTTP".to_string(),
897            address: state.http_server_addr.map(|addr| addr.to_string()),
898            running: state.http_server_addr.is_some(),
899            start_time: Some(state.start_time),
900            uptime_seconds: Some(uptime),
901            active_connections: metrics.active_connections,
902            total_requests: count_requests_by_server_type(&metrics, "HTTP"),
903        },
904        ServerStatus {
905            server_type: "WebSocket".to_string(),
906            address: state.ws_server_addr.map(|addr| addr.to_string()),
907            running: state.ws_server_addr.is_some(),
908            start_time: Some(state.start_time),
909            uptime_seconds: Some(uptime),
910            active_connections: metrics.active_connections / 2, // Estimate
911            total_requests: count_requests_by_server_type(&metrics, "WebSocket"),
912        },
913        ServerStatus {
914            server_type: "gRPC".to_string(),
915            address: state.grpc_server_addr.map(|addr| addr.to_string()),
916            running: state.grpc_server_addr.is_some(),
917            start_time: Some(state.start_time),
918            uptime_seconds: Some(uptime),
919            active_connections: metrics.active_connections / 3, // Estimate
920            total_requests: count_requests_by_server_type(&metrics, "gRPC"),
921        },
922    ];
923
924    // Build routes info from actual request metrics
925    let mut routes = Vec::new();
926    for (endpoint, count) in &metrics.requests_by_endpoint {
927        let parts: Vec<&str> = endpoint.splitn(2, ' ').collect();
928        if parts.len() == 2 {
929            let method = parts[0].to_string();
930            let path = parts[1].to_string();
931            let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
932
933            routes.push(RouteInfo {
934                method: Some(method.clone()),
935                path: path.clone(),
936                priority: 0,
937                has_fixtures: route_has_fixtures(&method, &path),
938                latency_ms: calculate_endpoint_latency(&metrics, endpoint),
939                request_count: *count,
940                last_request: get_endpoint_last_request(&metrics, endpoint),
941                error_count,
942            });
943        }
944    }
945
946    let dashboard = DashboardData {
947        server_info: ServerInfo {
948            version: env!("CARGO_PKG_VERSION").to_string(),
949            build_time: option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown").to_string(),
950            git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("unknown").to_string(),
951            http_server: state.http_server_addr.map(|addr| addr.to_string()),
952            ws_server: state.ws_server_addr.map(|addr| addr.to_string()),
953            grpc_server: state.grpc_server_addr.map(|addr| addr.to_string()),
954            graphql_server: state.graphql_server_addr.map(|addr| addr.to_string()),
955            api_enabled: state.api_enabled,
956            admin_port: state.admin_port,
957        },
958        system_info: DashboardSystemInfo {
959            os: std::env::consts::OS.to_string(),
960            arch: std::env::consts::ARCH.to_string(),
961            uptime,
962            memory_usage: system_metrics.memory_usage_mb * 1024 * 1024, // Convert MB to bytes
963        },
964        metrics: SimpleMetricsData {
965            total_requests: metrics.requests_by_endpoint.values().sum(),
966            active_requests: metrics.active_connections,
967            average_response_time: if metrics.response_times.is_empty() {
968                0.0
969            } else {
970                metrics.response_times.iter().sum::<u64>() as f64
971                    / metrics.response_times.len() as f64
972            },
973            error_rate: {
974                let total_requests = metrics.requests_by_endpoint.values().sum::<u64>();
975                let total_errors = metrics.errors_by_endpoint.values().sum::<u64>();
976                if total_requests == 0 {
977                    0.0
978                } else {
979                    total_errors as f64 / total_requests as f64
980                }
981            },
982        },
983        servers,
984        recent_logs,
985        system: system_info,
986    };
987
988    Json(ApiResponse::success(dashboard))
989}
990
991/// Get routes by proxying to HTTP server
992pub async fn get_routes(State(state): State<AdminState>) -> impl IntoResponse {
993    if let Some(http_addr) = state.http_server_addr {
994        // Try to fetch routes from the HTTP server
995        let url = format!("http://{}/__mockforge/routes", http_addr);
996        if let Ok(response) = reqwest::get(&url).await {
997            if response.status().is_success() {
998                if let Ok(body) = response.text().await {
999                    return (StatusCode::OK, [("content-type", "application/json")], body);
1000                }
1001            }
1002        }
1003    }
1004
1005    // Fallback: return empty routes
1006    (
1007        StatusCode::OK,
1008        [("content-type", "application/json")],
1009        r#"{"routes":[]}"#.to_string(),
1010    )
1011}
1012
1013/// Get server info (HTTP server address for API calls)
1014pub async fn get_server_info(State(state): State<AdminState>) -> Json<serde_json::Value> {
1015    Json(json!({
1016        "http_server": state.http_server_addr.map(|addr| addr.to_string()),
1017        "ws_server": state.ws_server_addr.map(|addr| addr.to_string()),
1018        "grpc_server": state.grpc_server_addr.map(|addr| addr.to_string()),
1019        "admin_port": state.admin_port
1020    }))
1021}
1022
1023/// Get health check status
1024pub async fn get_health() -> Json<HealthCheck> {
1025    Json(
1026        HealthCheck::healthy()
1027            .with_service("http".to_string(), "healthy".to_string())
1028            .with_service("websocket".to_string(), "healthy".to_string())
1029            .with_service("grpc".to_string(), "healthy".to_string()),
1030    )
1031}
1032
1033/// Get request logs with optional filtering
1034pub async fn get_logs(
1035    State(state): State<AdminState>,
1036    Query(params): Query<HashMap<String, String>>,
1037) -> Json<ApiResponse<Vec<RequestLog>>> {
1038    let mut filter = LogFilter::default();
1039
1040    if let Some(method) = params.get("method") {
1041        filter.method = Some(method.clone());
1042    }
1043    if let Some(path) = params.get("path") {
1044        filter.path_pattern = Some(path.clone());
1045    }
1046    if let Some(status) = params.get("status").and_then(|s| s.parse().ok()) {
1047        filter.status_code = Some(status);
1048    }
1049    if let Some(limit) = params.get("limit").and_then(|s| s.parse().ok()) {
1050        filter.limit = Some(limit);
1051    }
1052
1053    // Get logs from centralized logger (same as dashboard)
1054    let logs = if let Some(global_logger) = mockforge_core::get_global_logger() {
1055        // Get logs from centralized logger
1056        let centralized_logs = global_logger.get_recent_logs(filter.limit).await;
1057
1058        // Convert to RequestLog format and apply filters
1059        centralized_logs
1060            .into_iter()
1061            .filter(|log| {
1062                if let Some(ref method) = filter.method {
1063                    if log.method != *method {
1064                        return false;
1065                    }
1066                }
1067                if let Some(ref path_pattern) = filter.path_pattern {
1068                    if !log.path.contains(path_pattern) {
1069                        return false;
1070                    }
1071                }
1072                if let Some(status) = filter.status_code {
1073                    if log.status_code != status {
1074                        return false;
1075                    }
1076                }
1077                true
1078            })
1079            .map(|log| RequestLog {
1080                id: log.id,
1081                timestamp: log.timestamp,
1082                method: log.method,
1083                path: log.path,
1084                status_code: log.status_code,
1085                response_time_ms: log.response_time_ms,
1086                client_ip: log.client_ip,
1087                user_agent: log.user_agent,
1088                headers: log.headers,
1089                response_size_bytes: log.response_size_bytes,
1090                error_message: log.error_message,
1091            })
1092            .collect()
1093    } else {
1094        // Fallback to local logs if centralized logger not available
1095        state.get_logs_filtered(&filter).await
1096    };
1097
1098    Json(ApiResponse::success(logs))
1099}
1100
1101/// Get reality trace metadata for a specific request
1102///
1103/// GET /__mockforge/api/reality/trace/:request_id
1104pub async fn get_reality_trace(
1105    Path(request_id): Path<String>,
1106) -> Json<ApiResponse<Option<mockforge_core::request_logger::RealityTraceMetadata>>> {
1107    if let Some(global_logger) = mockforge_core::get_global_logger() {
1108        let logs = global_logger.get_recent_logs(None).await;
1109        if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1110            Json(ApiResponse::success(log_entry.reality_metadata))
1111        } else {
1112            Json(ApiResponse::error(format!("Request {} not found", request_id)))
1113        }
1114    } else {
1115        Json(ApiResponse::error("Request logger not initialized".to_string()))
1116    }
1117}
1118
1119/// Get response generation trace for a specific request
1120///
1121/// GET /__mockforge/api/reality/response-trace/:request_id
1122pub async fn get_response_trace(
1123    Path(request_id): Path<String>,
1124) -> Json<ApiResponse<Option<serde_json::Value>>> {
1125    if let Some(global_logger) = mockforge_core::get_global_logger() {
1126        let logs = global_logger.get_recent_logs(None).await;
1127        if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1128            // Response generation trace would be stored in metadata
1129            // For now, return the metadata as JSON
1130            let trace = log_entry
1131                .metadata
1132                .get("response_generation_trace")
1133                .and_then(|s| serde_json::from_str::<serde_json::Value>(s).ok());
1134            Json(ApiResponse::success(trace))
1135        } else {
1136            Json(ApiResponse::error(format!("Request {} not found", request_id)))
1137        }
1138    } else {
1139        Json(ApiResponse::error("Request logger not initialized".to_string()))
1140    }
1141}
1142
1143// Configuration for recent logs display
1144const RECENT_LOGS_LIMIT: usize = 20;
1145const RECENT_LOGS_TTL_MINUTES: i64 = 5;
1146
1147/// SSE endpoint for real-time log streaming
1148pub async fn logs_sse(
1149    State(_state): State<AdminState>,
1150) -> Sse<impl Stream<Item = std::result::Result<Event, Infallible>>> {
1151    tracing::info!("SSE endpoint /logs/sse accessed - starting real-time log streaming for recent requests only");
1152
1153    let stream = stream::unfold(std::collections::HashSet::new(), |mut seen_ids| async move {
1154        tokio::time::sleep(Duration::from_millis(500)).await;
1155
1156        // Get recent logs from centralized logger (limit to recent entries for dashboard)
1157        if let Some(global_logger) = mockforge_core::get_global_logger() {
1158            let centralized_logs = global_logger.get_recent_logs(Some(RECENT_LOGS_LIMIT)).await;
1159
1160            tracing::debug!(
1161                "SSE: Checking logs - total logs: {}, seen logs: {}",
1162                centralized_logs.len(),
1163                seen_ids.len()
1164            );
1165
1166            // Filter for recent logs within TTL
1167            let now = Utc::now();
1168            let ttl_cutoff = now - chrono::Duration::minutes(RECENT_LOGS_TTL_MINUTES);
1169
1170            // Find new logs that haven't been seen before
1171            let new_logs: Vec<RequestLog> = centralized_logs
1172                .into_iter()
1173                .filter(|log| {
1174                    // Only include logs from the last X minutes and not yet seen
1175                    log.timestamp > ttl_cutoff && !seen_ids.contains(&log.id)
1176                })
1177                .map(|log| RequestLog {
1178                    id: log.id,
1179                    timestamp: log.timestamp,
1180                    method: log.method,
1181                    path: log.path,
1182                    status_code: log.status_code,
1183                    response_time_ms: log.response_time_ms,
1184                    client_ip: log.client_ip,
1185                    user_agent: log.user_agent,
1186                    headers: log.headers,
1187                    response_size_bytes: log.response_size_bytes,
1188                    error_message: log.error_message,
1189                })
1190                .collect();
1191
1192            // Add new log IDs to the seen set
1193            for log in &new_logs {
1194                seen_ids.insert(log.id.clone());
1195            }
1196
1197            // Send new logs if any
1198            if !new_logs.is_empty() {
1199                tracing::info!("SSE: Sending {} new logs to client", new_logs.len());
1200
1201                let event_data = serde_json::to_string(&new_logs).unwrap_or_default();
1202                let event = Ok(Event::default().event("new_logs").data(event_data));
1203
1204                return Some((event, seen_ids));
1205            }
1206        }
1207
1208        // Send keep-alive
1209        let event = Ok(Event::default().event("keep_alive").data(""));
1210        Some((event, seen_ids))
1211    });
1212
1213    Sse::new(stream).keep_alive(
1214        axum::response::sse::KeepAlive::new()
1215            .interval(Duration::from_secs(15))
1216            .text("keep-alive-text"),
1217    )
1218}
1219
1220/// Get metrics data
1221pub async fn get_metrics(State(state): State<AdminState>) -> Json<ApiResponse<MetricsData>> {
1222    // Get metrics from global logger (same as get_dashboard)
1223    let metrics = if let Some(global_logger) = mockforge_core::get_global_logger() {
1224        let all_logs = global_logger.get_recent_logs(None).await;
1225
1226        let total_requests = all_logs.len() as u64;
1227        let mut requests_by_endpoint = HashMap::new();
1228        let mut errors_by_endpoint = HashMap::new();
1229        let mut response_times = Vec::new();
1230        let mut last_request_by_endpoint = HashMap::new();
1231
1232        for log in &all_logs {
1233            let endpoint_key = format!("{} {}", log.method, log.path);
1234            *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1235
1236            if log.status_code >= 400 {
1237                *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1238            }
1239
1240            response_times.push(log.response_time_ms);
1241            last_request_by_endpoint.insert(endpoint_key, log.timestamp);
1242        }
1243
1244        RequestMetrics {
1245            total_requests,
1246            active_connections: 0,
1247            requests_by_endpoint,
1248            response_times,
1249            response_times_by_endpoint: HashMap::new(),
1250            errors_by_endpoint,
1251            last_request_by_endpoint,
1252        }
1253    } else {
1254        state.get_metrics().await
1255    };
1256
1257    let system_metrics = state.get_system_metrics().await;
1258    let time_series = state.get_time_series_data().await;
1259
1260    // Helper function to calculate percentile from sorted array
1261    fn calculate_percentile(sorted_data: &[u64], percentile: f64) -> u64 {
1262        if sorted_data.is_empty() {
1263            return 0;
1264        }
1265        let idx = ((sorted_data.len() as f64) * percentile).ceil() as usize;
1266        let idx = idx.min(sorted_data.len().saturating_sub(1));
1267        sorted_data[idx]
1268    }
1269
1270    // Calculate percentiles from response times (p50, p75, p90, p95, p99, p99.9)
1271    let mut response_times = metrics.response_times.clone();
1272    response_times.sort();
1273
1274    let p50 = calculate_percentile(&response_times, 0.50);
1275    let p75 = calculate_percentile(&response_times, 0.75);
1276    let p90 = calculate_percentile(&response_times, 0.90);
1277    let p95 = calculate_percentile(&response_times, 0.95);
1278    let p99 = calculate_percentile(&response_times, 0.99);
1279    let p999 = calculate_percentile(&response_times, 0.999);
1280
1281    // Calculate per-endpoint percentiles for detailed analysis
1282    let mut response_times_by_endpoint: HashMap<String, Vec<u64>> = HashMap::new();
1283    if let Some(global_logger) = mockforge_core::get_global_logger() {
1284        let all_logs = global_logger.get_recent_logs(None).await;
1285        for log in &all_logs {
1286            let endpoint_key = format!("{} {}", log.method, log.path);
1287            response_times_by_endpoint
1288                .entry(endpoint_key)
1289                .or_default()
1290                .push(log.response_time_ms);
1291        }
1292    }
1293
1294    // Calculate percentiles for each endpoint
1295    let mut endpoint_percentiles: HashMap<String, HashMap<String, u64>> = HashMap::new();
1296    for (endpoint, times) in &mut response_times_by_endpoint {
1297        times.sort();
1298        if !times.is_empty() {
1299            endpoint_percentiles.insert(
1300                endpoint.clone(),
1301                HashMap::from([
1302                    ("p50".to_string(), calculate_percentile(times, 0.50)),
1303                    ("p75".to_string(), calculate_percentile(times, 0.75)),
1304                    ("p90".to_string(), calculate_percentile(times, 0.90)),
1305                    ("p95".to_string(), calculate_percentile(times, 0.95)),
1306                    ("p99".to_string(), calculate_percentile(times, 0.99)),
1307                    ("p999".to_string(), calculate_percentile(times, 0.999)),
1308                ]),
1309            );
1310        }
1311    }
1312
1313    // Calculate error rates
1314    let mut error_rate_by_endpoint = HashMap::new();
1315    for (endpoint, total_count) in &metrics.requests_by_endpoint {
1316        let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
1317        let error_rate = if *total_count > 0 {
1318            error_count as f64 / *total_count as f64
1319        } else {
1320            0.0
1321        };
1322        error_rate_by_endpoint.insert(endpoint.clone(), error_rate);
1323    }
1324
1325    // Convert time series data to the format expected by the frontend
1326    // If no time series data exists yet, use current system metrics as a fallback
1327    let memory_usage_over_time = if time_series.memory_usage.is_empty() {
1328        vec![(Utc::now(), system_metrics.memory_usage_mb)]
1329    } else {
1330        time_series
1331            .memory_usage
1332            .iter()
1333            .map(|point| (point.timestamp, point.value as u64))
1334            .collect()
1335    };
1336
1337    let cpu_usage_over_time = if time_series.cpu_usage.is_empty() {
1338        vec![(Utc::now(), system_metrics.cpu_usage_percent)]
1339    } else {
1340        time_series
1341            .cpu_usage
1342            .iter()
1343            .map(|point| (point.timestamp, point.value))
1344            .collect()
1345    };
1346
1347    // Build time-series latency data (last 100 data points)
1348    let latency_over_time: Vec<(chrono::DateTime<Utc>, u64)> =
1349        if let Some(global_logger) = mockforge_core::get_global_logger() {
1350            let all_logs = global_logger.get_recent_logs(Some(100)).await;
1351            all_logs.iter().map(|log| (log.timestamp, log.response_time_ms)).collect()
1352        } else {
1353            Vec::new()
1354        };
1355
1356    let metrics_data = MetricsData {
1357        requests_by_endpoint: metrics.requests_by_endpoint,
1358        response_time_percentiles: HashMap::from([
1359            ("p50".to_string(), p50),
1360            ("p75".to_string(), p75),
1361            ("p90".to_string(), p90),
1362            ("p95".to_string(), p95),
1363            ("p99".to_string(), p99),
1364            ("p999".to_string(), p999),
1365        ]),
1366        endpoint_percentiles: Some(endpoint_percentiles),
1367        latency_over_time: Some(latency_over_time),
1368        error_rate_by_endpoint,
1369        memory_usage_over_time,
1370        cpu_usage_over_time,
1371    };
1372
1373    Json(ApiResponse::success(metrics_data))
1374}
1375
1376/// Update latency profile
1377pub async fn update_latency(
1378    State(state): State<AdminState>,
1379    headers: http::HeaderMap,
1380    Json(update): Json<ConfigUpdate>,
1381) -> Json<ApiResponse<String>> {
1382    use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1383    use crate::rbac::{extract_user_context, get_default_user_context};
1384
1385    if update.config_type != "latency" {
1386        return Json(ApiResponse::error("Invalid config type".to_string()));
1387    }
1388
1389    // Extract latency configuration from the update data
1390    let base_ms = update.data.get("base_ms").and_then(|v| v.as_u64()).unwrap_or(50);
1391    let jitter_ms = update.data.get("jitter_ms").and_then(|v| v.as_u64()).unwrap_or(20);
1392
1393    let tag_overrides: HashMap<String, u64> = update
1394        .data
1395        .get("tag_overrides")
1396        .and_then(|v| v.as_object())
1397        .map(|obj| obj.iter().filter_map(|(k, v)| v.as_u64().map(|val| (k.clone(), val))).collect())
1398        .unwrap_or_default();
1399
1400    // Update the actual configuration
1401    state.update_latency_config(base_ms, jitter_ms, tag_overrides.clone()).await;
1402
1403    // Record audit log with user context
1404    if let Some(audit_store) = get_global_audit_store() {
1405        let metadata = serde_json::json!({
1406            "base_ms": base_ms,
1407            "jitter_ms": jitter_ms,
1408            "tag_overrides": tag_overrides,
1409        });
1410        let mut audit_log = create_audit_log(
1411            AdminActionType::ConfigLatencyUpdated,
1412            format!("Latency profile updated: base_ms={}, jitter_ms={}", base_ms, jitter_ms),
1413            None,
1414            true,
1415            None,
1416            Some(metadata),
1417        );
1418
1419        // Extract user context from headers
1420        if let Some(user_ctx) = extract_user_context(&headers).or_else(get_default_user_context) {
1421            audit_log.user_id = Some(user_ctx.user_id);
1422            audit_log.username = Some(user_ctx.username);
1423        }
1424
1425        // Extract IP address from headers
1426        if let Some(ip) = headers
1427            .get("x-forwarded-for")
1428            .or_else(|| headers.get("x-real-ip"))
1429            .and_then(|h| h.to_str().ok())
1430        {
1431            audit_log.ip_address = Some(ip.to_string());
1432        }
1433
1434        // Extract user agent
1435        if let Some(ua) = headers.get("user-agent").and_then(|h| h.to_str().ok()) {
1436            audit_log.user_agent = Some(ua.to_string());
1437        }
1438
1439        audit_store.record(audit_log).await;
1440    }
1441
1442    tracing::info!("Updated latency profile: base_ms={}, jitter_ms={}", base_ms, jitter_ms);
1443
1444    Json(ApiResponse::success("Latency profile updated".to_string()))
1445}
1446
1447/// Update fault injection configuration
1448pub async fn update_faults(
1449    State(state): State<AdminState>,
1450    Json(update): Json<ConfigUpdate>,
1451) -> Json<ApiResponse<String>> {
1452    if update.config_type != "faults" {
1453        return Json(ApiResponse::error("Invalid config type".to_string()));
1454    }
1455
1456    // Extract fault configuration from the update data
1457    let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1458
1459    let failure_rate = update.data.get("failure_rate").and_then(|v| v.as_f64()).unwrap_or(0.0);
1460
1461    let status_codes = update
1462        .data
1463        .get("status_codes")
1464        .and_then(|v| v.as_array())
1465        .map(|arr| arr.iter().filter_map(|v| v.as_u64().map(|n| n as u16)).collect())
1466        .unwrap_or_else(|| vec![500, 502, 503]);
1467
1468    // Update the actual configuration
1469    state.update_fault_config(enabled, failure_rate, status_codes).await;
1470
1471    tracing::info!(
1472        "Updated fault configuration: enabled={}, failure_rate={}",
1473        enabled,
1474        failure_rate
1475    );
1476
1477    Json(ApiResponse::success("Fault configuration updated".to_string()))
1478}
1479
1480/// Update proxy configuration
1481pub async fn update_proxy(
1482    State(state): State<AdminState>,
1483    Json(update): Json<ConfigUpdate>,
1484) -> Json<ApiResponse<String>> {
1485    if update.config_type != "proxy" {
1486        return Json(ApiResponse::error("Invalid config type".to_string()));
1487    }
1488
1489    // Extract proxy configuration from the update data
1490    let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1491
1492    let upstream_url =
1493        update.data.get("upstream_url").and_then(|v| v.as_str()).map(|s| s.to_string());
1494
1495    let timeout_seconds = update.data.get("timeout_seconds").and_then(|v| v.as_u64()).unwrap_or(30);
1496
1497    // Update the actual configuration
1498    state.update_proxy_config(enabled, upstream_url.clone(), timeout_seconds).await;
1499
1500    tracing::info!(
1501        "Updated proxy configuration: enabled={}, upstream_url={:?}",
1502        enabled,
1503        upstream_url
1504    );
1505
1506    Json(ApiResponse::success("Proxy configuration updated".to_string()))
1507}
1508
1509/// Clear request logs
1510pub async fn clear_logs(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1511    // Clear the actual logs from state
1512    state.clear_logs().await;
1513    tracing::info!("Cleared all request logs");
1514
1515    Json(ApiResponse::success("Logs cleared".to_string()))
1516}
1517
1518/// Restart servers
1519pub async fn restart_servers(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1520    use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1521    // Check if restart is already in progress
1522    let current_status = state.get_restart_status().await;
1523    if current_status.in_progress {
1524        return Json(ApiResponse::error("Server restart already in progress".to_string()));
1525    }
1526
1527    // Initiate restart status
1528    let restart_result = state
1529        .initiate_restart("Manual restart requested via admin UI".to_string())
1530        .await;
1531
1532    let success = restart_result.is_ok();
1533    let error_msg = restart_result.as_ref().err().map(|e| format!("{}", e));
1534
1535    // Record audit log
1536    if let Some(audit_store) = get_global_audit_store() {
1537        let audit_log = create_audit_log(
1538            AdminActionType::ServerRestarted,
1539            "Server restart initiated via admin UI".to_string(),
1540            None,
1541            success,
1542            error_msg.clone(),
1543            None,
1544        );
1545        audit_store.record(audit_log).await;
1546    }
1547
1548    if let Err(e) = restart_result {
1549        return Json(ApiResponse::error(format!("Failed to initiate restart: {}", e)));
1550    }
1551
1552    // Spawn restart task to avoid blocking the response
1553    let state_clone = state.clone();
1554    tokio::spawn(async move {
1555        if let Err(e) = perform_server_restart(&state_clone).await {
1556            tracing::error!("Server restart failed: {}", e);
1557            state_clone.complete_restart(false).await;
1558        } else {
1559            tracing::info!("Server restart completed successfully");
1560            state_clone.complete_restart(true).await;
1561        }
1562    });
1563
1564    tracing::info!("Server restart initiated via admin UI");
1565    Json(ApiResponse::success(
1566        "Server restart initiated. Please wait for completion.".to_string(),
1567    ))
1568}
1569
1570/// Perform the actual server restart
1571async fn perform_server_restart(_state: &AdminState) -> Result<()> {
1572    // Get the current process ID
1573    let current_pid = std::process::id();
1574    tracing::info!("Initiating restart for process PID: {}", current_pid);
1575
1576    // Try to find the parent process (MockForge CLI)
1577    let parent_pid = get_parent_process_id(current_pid).await?;
1578    tracing::info!("Found parent process PID: {}", parent_pid);
1579
1580    // Method 1: Try to restart via parent process signal
1581    if let Ok(()) = restart_via_parent_signal(parent_pid).await {
1582        tracing::info!("Restart initiated via parent process signal");
1583        return Ok(());
1584    }
1585
1586    // Method 2: Fallback to process replacement
1587    if let Ok(()) = restart_via_process_replacement().await {
1588        tracing::info!("Restart initiated via process replacement");
1589        return Ok(());
1590    }
1591
1592    // Method 3: Last resort - graceful shutdown with restart script
1593    restart_via_script().await
1594}
1595
1596/// Get parent process ID
1597async fn get_parent_process_id(pid: u32) -> Result<u32> {
1598    // Try to read from /proc/pid/stat on Linux
1599    #[cfg(target_os = "linux")]
1600    {
1601        // Read /proc filesystem using spawn_blocking
1602        let stat_path = format!("/proc/{}/stat", pid);
1603        if let Ok(ppid) = tokio::task::spawn_blocking(move || -> Result<u32> {
1604            let content = std::fs::read_to_string(&stat_path)
1605                .map_err(|e| Error::generic(format!("Failed to read {}: {}", stat_path, e)))?;
1606
1607            let fields: Vec<&str> = content.split_whitespace().collect();
1608            if fields.len() > 3 {
1609                fields[3]
1610                    .parse::<u32>()
1611                    .map_err(|e| Error::generic(format!("Failed to parse PPID: {}", e)))
1612            } else {
1613                Err(Error::generic("Insufficient fields in /proc/pid/stat".to_string()))
1614            }
1615        })
1616        .await
1617        {
1618            return ppid;
1619        }
1620    }
1621
1622    // Fallback: assume we're running under a shell/process manager
1623    Ok(1) // PID 1 as fallback
1624}
1625
1626/// Restart via parent process signal
1627async fn restart_via_parent_signal(parent_pid: u32) -> Result<()> {
1628    #[cfg(unix)]
1629    {
1630        use std::process::Command;
1631
1632        // Send SIGTERM to parent process to trigger restart
1633        let output = Command::new("kill")
1634            .args(["-TERM", &parent_pid.to_string()])
1635            .output()
1636            .map_err(|e| Error::generic(format!("Failed to send signal: {}", e)))?;
1637
1638        if !output.status.success() {
1639            return Err(Error::generic(
1640                "Failed to send restart signal to parent process".to_string(),
1641            ));
1642        }
1643
1644        // Wait a moment for the signal to be processed
1645        tokio::time::sleep(Duration::from_millis(100)).await;
1646        Ok(())
1647    }
1648
1649    #[cfg(not(unix))]
1650    {
1651        Err(Error::generic(
1652            "Signal-based restart not supported on this platform".to_string(),
1653        ))
1654    }
1655}
1656
1657/// Restart via process replacement
1658async fn restart_via_process_replacement() -> Result<()> {
1659    // Get the current executable path
1660    let current_exe = std::env::current_exe()
1661        .map_err(|e| Error::generic(format!("Failed to get current executable: {}", e)))?;
1662
1663    // Get current command line arguments
1664    let args: Vec<String> = std::env::args().collect();
1665
1666    tracing::info!("Restarting with command: {:?}", args);
1667
1668    // Start new process
1669    let mut child = Command::new(&current_exe)
1670        .args(&args[1..]) // Skip the program name
1671        .stdout(Stdio::inherit())
1672        .stderr(Stdio::inherit())
1673        .spawn()
1674        .map_err(|e| Error::generic(format!("Failed to start new process: {}", e)))?;
1675
1676    // Give the new process a moment to start
1677    tokio::time::sleep(Duration::from_millis(500)).await;
1678
1679    // Check if the new process is still running
1680    match child.try_wait() {
1681        Ok(Some(status)) => {
1682            if status.success() {
1683                tracing::info!("New process started successfully");
1684                Ok(())
1685            } else {
1686                Err(Error::generic("New process exited with error".to_string()))
1687            }
1688        }
1689        Ok(None) => {
1690            tracing::info!("New process is running, exiting current process");
1691            // Exit current process
1692            std::process::exit(0);
1693        }
1694        Err(e) => Err(Error::generic(format!("Failed to check new process status: {}", e))),
1695    }
1696}
1697
1698/// Restart via external script
1699async fn restart_via_script() -> Result<()> {
1700    // Look for restart script in common locations
1701    let script_paths = ["./scripts/restart.sh", "./restart.sh", "restart.sh"];
1702
1703    for script_path in &script_paths {
1704        if std::path::Path::new(script_path).exists() {
1705            tracing::info!("Using restart script: {}", script_path);
1706
1707            let output = Command::new("bash")
1708                .arg(script_path)
1709                .output()
1710                .map_err(|e| Error::generic(format!("Failed to execute restart script: {}", e)))?;
1711
1712            if output.status.success() {
1713                return Ok(());
1714            } else {
1715                tracing::warn!(
1716                    "Restart script failed: {}",
1717                    String::from_utf8_lossy(&output.stderr)
1718                );
1719            }
1720        }
1721    }
1722
1723    // If no script found, try to use the clear-ports script as a fallback
1724    let clear_script = "./scripts/clear-ports.sh";
1725    if std::path::Path::new(clear_script).exists() {
1726        tracing::info!("Using clear-ports script as fallback");
1727
1728        let _ = Command::new("bash").arg(clear_script).output();
1729    }
1730
1731    Err(Error::generic(
1732        "No restart mechanism available. Please restart manually.".to_string(),
1733    ))
1734}
1735
1736/// Get restart status
1737pub async fn get_restart_status(
1738    State(state): State<AdminState>,
1739) -> Json<ApiResponse<RestartStatus>> {
1740    let status = state.get_restart_status().await;
1741    Json(ApiResponse::success(status))
1742}
1743
1744/// Get audit logs
1745pub async fn get_audit_logs(
1746    Query(params): Query<HashMap<String, String>>,
1747) -> Json<ApiResponse<Vec<crate::audit::AdminAuditLog>>> {
1748    use crate::audit::{get_global_audit_store, AdminActionType};
1749
1750    let action_type_str = params.get("action_type");
1751    let user_id = params.get("user_id").map(|s| s.as_str());
1752    let limit = params.get("limit").and_then(|s| s.parse::<usize>().ok());
1753    let offset = params.get("offset").and_then(|s| s.parse::<usize>().ok());
1754
1755    // Parse action type if provided
1756    let action_type = action_type_str.and_then(|s| {
1757        // Simple string matching - could be enhanced with proper parsing
1758        match s.as_str() {
1759            "config_latency_updated" => Some(AdminActionType::ConfigLatencyUpdated),
1760            "config_faults_updated" => Some(AdminActionType::ConfigFaultsUpdated),
1761            "server_restarted" => Some(AdminActionType::ServerRestarted),
1762            "logs_cleared" => Some(AdminActionType::LogsCleared),
1763            _ => None,
1764        }
1765    });
1766
1767    if let Some(audit_store) = get_global_audit_store() {
1768        let logs = audit_store.get_logs(action_type, user_id, limit, offset).await;
1769        Json(ApiResponse::success(logs))
1770    } else {
1771        Json(ApiResponse::error("Audit logging not initialized".to_string()))
1772    }
1773}
1774
1775/// Get audit log statistics
1776pub async fn get_audit_stats() -> Json<ApiResponse<crate::audit::AuditLogStats>> {
1777    use crate::audit::get_global_audit_store;
1778
1779    if let Some(audit_store) = get_global_audit_store() {
1780        let stats = audit_store.get_stats().await;
1781        Json(ApiResponse::success(stats))
1782    } else {
1783        Json(ApiResponse::error("Audit logging not initialized".to_string()))
1784    }
1785}
1786
1787/// Get server configuration
1788pub async fn get_config(State(state): State<AdminState>) -> Json<ApiResponse<serde_json::Value>> {
1789    let config_state = state.get_config().await;
1790
1791    let config = json!({
1792        "latency": {
1793            "enabled": true,
1794            "base_ms": config_state.latency_profile.base_ms,
1795            "jitter_ms": config_state.latency_profile.jitter_ms,
1796            "tag_overrides": config_state.latency_profile.tag_overrides
1797        },
1798        "faults": {
1799            "enabled": config_state.fault_config.enabled,
1800            "failure_rate": config_state.fault_config.failure_rate,
1801            "status_codes": config_state.fault_config.status_codes
1802        },
1803        "proxy": {
1804            "enabled": config_state.proxy_config.enabled,
1805            "upstream_url": config_state.proxy_config.upstream_url,
1806            "timeout_seconds": config_state.proxy_config.timeout_seconds
1807        },
1808        "traffic_shaping": {
1809            "enabled": config_state.traffic_shaping.enabled,
1810            "bandwidth": config_state.traffic_shaping.bandwidth,
1811            "burst_loss": config_state.traffic_shaping.burst_loss
1812        },
1813        "validation": {
1814            "mode": config_state.validation_settings.mode,
1815            "aggregate_errors": config_state.validation_settings.aggregate_errors,
1816            "validate_responses": config_state.validation_settings.validate_responses,
1817            "overrides": config_state.validation_settings.overrides
1818        }
1819    });
1820
1821    Json(ApiResponse::success(config))
1822}
1823
1824/// Count total fixtures in the fixtures directory
1825pub fn count_fixtures() -> Result<usize> {
1826    // Get the fixtures directory from environment or use default
1827    let fixtures_dir =
1828        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1829    let fixtures_path = std::path::Path::new(&fixtures_dir);
1830
1831    if !fixtures_path.exists() {
1832        return Ok(0);
1833    }
1834
1835    let mut total_count = 0;
1836
1837    // Count HTTP fixtures
1838    let http_fixtures_path = fixtures_path.join("http");
1839    if http_fixtures_path.exists() {
1840        total_count += count_fixtures_in_directory(&http_fixtures_path)?;
1841    }
1842
1843    // Count WebSocket fixtures
1844    let ws_fixtures_path = fixtures_path.join("websocket");
1845    if ws_fixtures_path.exists() {
1846        total_count += count_fixtures_in_directory(&ws_fixtures_path)?;
1847    }
1848
1849    // Count gRPC fixtures
1850    let grpc_fixtures_path = fixtures_path.join("grpc");
1851    if grpc_fixtures_path.exists() {
1852        total_count += count_fixtures_in_directory(&grpc_fixtures_path)?;
1853    }
1854
1855    Ok(total_count)
1856}
1857
1858/// Helper function to count JSON files in a directory recursively (blocking version)
1859fn count_fixtures_in_directory(dir_path: &std::path::Path) -> Result<usize> {
1860    let mut count = 0;
1861
1862    if let Ok(entries) = std::fs::read_dir(dir_path) {
1863        for entry in entries {
1864            let entry = entry
1865                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1866            let path = entry.path();
1867
1868            if path.is_dir() {
1869                // Recursively count fixtures in subdirectories
1870                count += count_fixtures_in_directory(&path)?;
1871            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1872                // Count JSON files as fixtures
1873                count += 1;
1874            }
1875        }
1876    }
1877
1878    Ok(count)
1879}
1880
1881/// Check if a specific route has fixtures
1882pub fn route_has_fixtures(method: &str, path: &str) -> bool {
1883    // Get the fixtures directory from environment or use default
1884    let fixtures_dir =
1885        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1886    let fixtures_path = std::path::Path::new(&fixtures_dir);
1887
1888    if !fixtures_path.exists() {
1889        return false;
1890    }
1891
1892    // Check HTTP fixtures
1893    let method_lower = method.to_lowercase();
1894    let path_hash = path.replace(['/', ':'], "_");
1895    let http_fixtures_path = fixtures_path.join("http").join(&method_lower).join(&path_hash);
1896
1897    if http_fixtures_path.exists() {
1898        // Check if there are any JSON files in this directory
1899        if let Ok(entries) = std::fs::read_dir(&http_fixtures_path) {
1900            for entry in entries.flatten() {
1901                if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1902                    return true;
1903                }
1904            }
1905        }
1906    }
1907
1908    // Check WebSocket fixtures for WS method
1909    if method.to_uppercase() == "WS" {
1910        let ws_fixtures_path = fixtures_path.join("websocket").join(&path_hash);
1911
1912        if ws_fixtures_path.exists() {
1913            if let Ok(entries) = std::fs::read_dir(&ws_fixtures_path) {
1914                for entry in entries.flatten() {
1915                    if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1916                        return true;
1917                    }
1918                }
1919            }
1920        }
1921    }
1922
1923    false
1924}
1925
1926/// Calculate average latency for a specific endpoint
1927fn calculate_endpoint_latency(metrics: &RequestMetrics, endpoint: &str) -> Option<u64> {
1928    metrics.response_times_by_endpoint.get(endpoint).and_then(|times| {
1929        if times.is_empty() {
1930            None
1931        } else {
1932            let sum: u64 = times.iter().sum();
1933            Some(sum / times.len() as u64)
1934        }
1935    })
1936}
1937
1938/// Get the last request timestamp for a specific endpoint
1939fn get_endpoint_last_request(
1940    metrics: &RequestMetrics,
1941    endpoint: &str,
1942) -> Option<chrono::DateTime<Utc>> {
1943    metrics.last_request_by_endpoint.get(endpoint).copied()
1944}
1945
1946/// Count total requests for a specific server type
1947fn count_requests_by_server_type(metrics: &RequestMetrics, server_type: &str) -> u64 {
1948    match server_type {
1949        "HTTP" => {
1950            // Count all HTTP requests (GET, POST, PUT, DELETE, etc.)
1951            metrics
1952                .requests_by_endpoint
1953                .iter()
1954                .filter(|(endpoint, _)| {
1955                    let method = endpoint.split(' ').next().unwrap_or("");
1956                    matches!(
1957                        method,
1958                        "GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "HEAD" | "OPTIONS"
1959                    )
1960                })
1961                .map(|(_, count)| count)
1962                .sum()
1963        }
1964        "WebSocket" => {
1965            // Count WebSocket requests (WS method)
1966            metrics
1967                .requests_by_endpoint
1968                .iter()
1969                .filter(|(endpoint, _)| {
1970                    let method = endpoint.split(' ').next().unwrap_or("");
1971                    method == "WS"
1972                })
1973                .map(|(_, count)| count)
1974                .sum()
1975        }
1976        "gRPC" => {
1977            // Count gRPC requests (gRPC method)
1978            metrics
1979                .requests_by_endpoint
1980                .iter()
1981                .filter(|(endpoint, _)| {
1982                    let method = endpoint.split(' ').next().unwrap_or("");
1983                    method == "gRPC"
1984                })
1985                .map(|(_, count)| count)
1986                .sum()
1987        }
1988        _ => 0,
1989    }
1990}
1991
1992/// Get fixtures/replay data
1993pub async fn get_fixtures() -> Json<ApiResponse<Vec<FixtureInfo>>> {
1994    match scan_fixtures_directory() {
1995        Ok(fixtures) => Json(ApiResponse::success(fixtures)),
1996        Err(e) => {
1997            tracing::error!("Failed to scan fixtures directory: {}", e);
1998            Json(ApiResponse::error(format!("Failed to load fixtures: {}", e)))
1999        }
2000    }
2001}
2002
2003/// Scan the fixtures directory and return all fixture information
2004fn scan_fixtures_directory() -> Result<Vec<FixtureInfo>> {
2005    let fixtures_dir =
2006        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2007    let fixtures_path = std::path::Path::new(&fixtures_dir);
2008
2009    if !fixtures_path.exists() {
2010        tracing::warn!("Fixtures directory does not exist: {}", fixtures_dir);
2011        return Ok(Vec::new());
2012    }
2013
2014    let mut all_fixtures = Vec::new();
2015
2016    // Scan HTTP fixtures
2017    let http_fixtures = scan_protocol_fixtures(fixtures_path, "http")?;
2018    all_fixtures.extend(http_fixtures);
2019
2020    // Scan WebSocket fixtures
2021    let ws_fixtures = scan_protocol_fixtures(fixtures_path, "websocket")?;
2022    all_fixtures.extend(ws_fixtures);
2023
2024    // Scan gRPC fixtures
2025    let grpc_fixtures = scan_protocol_fixtures(fixtures_path, "grpc")?;
2026    all_fixtures.extend(grpc_fixtures);
2027
2028    // Sort by saved_at timestamp (newest first)
2029    all_fixtures.sort_by(|a, b| b.saved_at.cmp(&a.saved_at));
2030
2031    tracing::info!("Found {} fixtures in directory: {}", all_fixtures.len(), fixtures_dir);
2032    Ok(all_fixtures)
2033}
2034
2035/// Scan fixtures for a specific protocol
2036fn scan_protocol_fixtures(
2037    fixtures_path: &std::path::Path,
2038    protocol: &str,
2039) -> Result<Vec<FixtureInfo>> {
2040    let protocol_path = fixtures_path.join(protocol);
2041    let mut fixtures = Vec::new();
2042
2043    if !protocol_path.exists() {
2044        return Ok(fixtures);
2045    }
2046
2047    // Walk through the protocol directory recursively
2048    if let Ok(entries) = std::fs::read_dir(&protocol_path) {
2049        for entry in entries {
2050            let entry = entry
2051                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2052            let path = entry.path();
2053
2054            if path.is_dir() {
2055                // Recursively scan subdirectories
2056                let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2057                fixtures.extend(sub_fixtures);
2058            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2059                // Process individual JSON fixture file
2060                if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2061                    fixtures.push(fixture);
2062                }
2063            }
2064        }
2065    }
2066
2067    Ok(fixtures)
2068}
2069
2070/// Recursively scan a directory for fixture files
2071fn scan_directory_recursive(
2072    dir_path: &std::path::Path,
2073    protocol: &str,
2074) -> Result<Vec<FixtureInfo>> {
2075    let mut fixtures = Vec::new();
2076
2077    if let Ok(entries) = std::fs::read_dir(dir_path) {
2078        for entry in entries {
2079            let entry = entry
2080                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2081            let path = entry.path();
2082
2083            if path.is_dir() {
2084                // Recursively scan subdirectories
2085                let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2086                fixtures.extend(sub_fixtures);
2087            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2088                // Process individual JSON fixture file
2089                if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2090                    fixtures.push(fixture);
2091                }
2092            }
2093        }
2094    }
2095
2096    Ok(fixtures)
2097}
2098
2099/// Parse a single fixture file and extract metadata (synchronous version)
2100fn parse_fixture_file_sync(file_path: &std::path::Path, protocol: &str) -> Result<FixtureInfo> {
2101    // Get file metadata
2102    let metadata = std::fs::metadata(file_path)
2103        .map_err(|e| Error::generic(format!("Failed to read file metadata: {}", e)))?;
2104
2105    let file_size = metadata.len();
2106    let modified_time = metadata
2107        .modified()
2108        .map_err(|e| Error::generic(format!("Failed to get file modification time: {}", e)))?;
2109
2110    let saved_at = chrono::DateTime::from(modified_time);
2111
2112    // Read and parse the fixture file (blocking - called from spawn_blocking context)
2113    let content = std::fs::read_to_string(file_path)
2114        .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2115
2116    let fixture_data: serde_json::Value = serde_json::from_str(&content)
2117        .map_err(|e| Error::generic(format!("Failed to parse fixture JSON: {}", e)))?;
2118
2119    // Extract method and path from the fixture data
2120    let (method, path) = extract_method_and_path(&fixture_data, protocol)?;
2121
2122    // Generate a unique ID based on file path and content
2123    let id = generate_fixture_id(file_path, &content);
2124
2125    // Extract fingerprint from file path or fixture data
2126    let fingerprint = extract_fingerprint(file_path, &fixture_data)?;
2127
2128    // Get relative file path
2129    let fixtures_dir =
2130        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2131    let fixtures_path = std::path::Path::new(&fixtures_dir);
2132    let file_path_str = file_path
2133        .strip_prefix(fixtures_path)
2134        .unwrap_or(file_path)
2135        .to_string_lossy()
2136        .to_string();
2137
2138    Ok(FixtureInfo {
2139        id,
2140        protocol: protocol.to_string(),
2141        method,
2142        path,
2143        saved_at,
2144        file_size,
2145        file_path: file_path_str,
2146        fingerprint,
2147        metadata: fixture_data,
2148    })
2149}
2150
2151/// Extract method and path from fixture data
2152fn extract_method_and_path(
2153    fixture_data: &serde_json::Value,
2154    protocol: &str,
2155) -> Result<(String, String)> {
2156    match protocol {
2157        "http" => {
2158            // For HTTP fixtures, look for request.method and request.path
2159            let method = fixture_data
2160                .get("request")
2161                .and_then(|req| req.get("method"))
2162                .and_then(|m| m.as_str())
2163                .unwrap_or("UNKNOWN")
2164                .to_uppercase();
2165
2166            let path = fixture_data
2167                .get("request")
2168                .and_then(|req| req.get("path"))
2169                .and_then(|p| p.as_str())
2170                .unwrap_or("/unknown")
2171                .to_string();
2172
2173            Ok((method, path))
2174        }
2175        "websocket" => {
2176            // For WebSocket fixtures, use WS method and extract path from metadata
2177            let path = fixture_data
2178                .get("path")
2179                .and_then(|p| p.as_str())
2180                .or_else(|| {
2181                    fixture_data
2182                        .get("request")
2183                        .and_then(|req| req.get("path"))
2184                        .and_then(|p| p.as_str())
2185                })
2186                .unwrap_or("/ws")
2187                .to_string();
2188
2189            Ok(("WS".to_string(), path))
2190        }
2191        "grpc" => {
2192            // For gRPC fixtures, extract service and method
2193            let service =
2194                fixture_data.get("service").and_then(|s| s.as_str()).unwrap_or("UnknownService");
2195
2196            let method =
2197                fixture_data.get("method").and_then(|m| m.as_str()).unwrap_or("UnknownMethod");
2198
2199            let path = format!("/{}/{}", service, method);
2200            Ok(("gRPC".to_string(), path))
2201        }
2202        _ => {
2203            let path = fixture_data
2204                .get("path")
2205                .and_then(|p| p.as_str())
2206                .unwrap_or("/unknown")
2207                .to_string();
2208            Ok((protocol.to_uppercase(), path))
2209        }
2210    }
2211}
2212
2213/// Generate a unique fixture ID
2214fn generate_fixture_id(file_path: &std::path::Path, content: &str) -> String {
2215    use std::collections::hash_map::DefaultHasher;
2216    use std::hash::{Hash, Hasher};
2217
2218    let mut hasher = DefaultHasher::new();
2219    file_path.hash(&mut hasher);
2220    content.hash(&mut hasher);
2221    format!("fixture_{:x}", hasher.finish())
2222}
2223
2224/// Extract fingerprint from file path or fixture data
2225fn extract_fingerprint(
2226    file_path: &std::path::Path,
2227    fixture_data: &serde_json::Value,
2228) -> Result<String> {
2229    // Try to extract from fixture data first
2230    if let Some(fingerprint) = fixture_data.get("fingerprint").and_then(|f| f.as_str()) {
2231        return Ok(fingerprint.to_string());
2232    }
2233
2234    // Try to extract from file path (common pattern: method_path_hash.json)
2235    if let Some(file_name) = file_path.file_stem().and_then(|s| s.to_str()) {
2236        // Look for hash pattern at the end of filename
2237        if let Some(hash) = file_name.split('_').next_back() {
2238            if hash.len() >= 8 && hash.chars().all(|c| c.is_alphanumeric()) {
2239                return Ok(hash.to_string());
2240            }
2241        }
2242    }
2243
2244    // Fallback: generate from file path
2245    use std::collections::hash_map::DefaultHasher;
2246    use std::hash::{Hash, Hasher};
2247
2248    let mut hasher = DefaultHasher::new();
2249    file_path.hash(&mut hasher);
2250    Ok(format!("{:x}", hasher.finish()))
2251}
2252
2253/// Delete a fixture
2254pub async fn delete_fixture(
2255    Json(payload): Json<FixtureDeleteRequest>,
2256) -> Json<ApiResponse<String>> {
2257    match delete_fixture_by_id(&payload.fixture_id).await {
2258        Ok(_) => {
2259            tracing::info!("Successfully deleted fixture: {}", payload.fixture_id);
2260            Json(ApiResponse::success("Fixture deleted successfully".to_string()))
2261        }
2262        Err(e) => {
2263            tracing::error!("Failed to delete fixture {}: {}", payload.fixture_id, e);
2264            Json(ApiResponse::error(format!("Failed to delete fixture: {}", e)))
2265        }
2266    }
2267}
2268
2269/// Delete multiple fixtures
2270pub async fn delete_fixtures_bulk(
2271    Json(payload): Json<FixtureBulkDeleteRequest>,
2272) -> Json<ApiResponse<FixtureBulkDeleteResult>> {
2273    let mut deleted_count = 0;
2274    let mut errors = Vec::new();
2275
2276    for fixture_id in &payload.fixture_ids {
2277        match delete_fixture_by_id(fixture_id).await {
2278            Ok(_) => {
2279                deleted_count += 1;
2280                tracing::info!("Successfully deleted fixture: {}", fixture_id);
2281            }
2282            Err(e) => {
2283                errors.push(format!("Failed to delete {}: {}", fixture_id, e));
2284                tracing::error!("Failed to delete fixture {}: {}", fixture_id, e);
2285            }
2286        }
2287    }
2288
2289    let result = FixtureBulkDeleteResult {
2290        deleted_count,
2291        total_requested: payload.fixture_ids.len(),
2292        errors: errors.clone(),
2293    };
2294
2295    if errors.is_empty() {
2296        Json(ApiResponse::success(result))
2297    } else {
2298        Json(ApiResponse::error(format!(
2299            "Partial success: {} deleted, {} errors",
2300            deleted_count,
2301            errors.len()
2302        )))
2303    }
2304}
2305
2306/// Delete a single fixture by ID
2307async fn delete_fixture_by_id(fixture_id: &str) -> Result<()> {
2308    // First, try to find the fixture by scanning the fixtures directory
2309    // This is more robust than trying to parse the ID format
2310    let fixtures_dir =
2311        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2312    let fixtures_path = std::path::Path::new(&fixtures_dir);
2313
2314    if !fixtures_path.exists() {
2315        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2316    }
2317
2318    // Search for the fixture file by ID across all protocols
2319    let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2320
2321    // Delete the file using spawn_blocking
2322    let file_path_clone = file_path.clone();
2323    tokio::task::spawn_blocking(move || {
2324        if file_path_clone.exists() {
2325            std::fs::remove_file(&file_path_clone).map_err(|e| {
2326                Error::generic(format!(
2327                    "Failed to delete fixture file {}: {}",
2328                    file_path_clone.display(),
2329                    e
2330                ))
2331            })
2332        } else {
2333            Err(Error::generic(format!("Fixture file not found: {}", file_path_clone.display())))
2334        }
2335    })
2336    .await
2337    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2338
2339    tracing::info!("Deleted fixture file: {}", file_path.display());
2340
2341    // Also try to remove empty parent directories
2342    cleanup_empty_directories(&file_path).await;
2343
2344    Ok(())
2345}
2346
2347/// Find a fixture file by its ID across all protocols
2348fn find_fixture_file_by_id(
2349    fixtures_path: &std::path::Path,
2350    fixture_id: &str,
2351) -> Result<std::path::PathBuf> {
2352    // Search in all protocol directories
2353    let protocols = ["http", "websocket", "grpc"];
2354
2355    for protocol in &protocols {
2356        let protocol_path = fixtures_path.join(protocol);
2357        if let Ok(found_path) = search_fixture_in_directory(&protocol_path, fixture_id) {
2358            return Ok(found_path);
2359        }
2360    }
2361
2362    Err(Error::generic(format!(
2363        "Fixture with ID '{}' not found in any protocol directory",
2364        fixture_id
2365    )))
2366}
2367
2368/// Recursively search for a fixture file by ID in a directory
2369fn search_fixture_in_directory(
2370    dir_path: &std::path::Path,
2371    fixture_id: &str,
2372) -> Result<std::path::PathBuf> {
2373    if !dir_path.exists() {
2374        return Err(Error::generic(format!("Directory does not exist: {}", dir_path.display())));
2375    }
2376
2377    if let Ok(entries) = std::fs::read_dir(dir_path) {
2378        for entry in entries {
2379            let entry = entry
2380                .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2381            let path = entry.path();
2382
2383            if path.is_dir() {
2384                // Recursively search subdirectories
2385                if let Ok(found_path) = search_fixture_in_directory(&path, fixture_id) {
2386                    return Ok(found_path);
2387                }
2388            } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2389                // Check if this file matches the fixture ID
2390                if let Ok(fixture_info) = parse_fixture_file_sync(&path, "unknown") {
2391                    if fixture_info.id == fixture_id {
2392                        return Ok(path);
2393                    }
2394                }
2395            }
2396        }
2397    }
2398
2399    Err(Error::generic(format!(
2400        "Fixture not found in directory: {}",
2401        dir_path.display()
2402    )))
2403}
2404
2405/// Clean up empty directories after file deletion
2406async fn cleanup_empty_directories(file_path: &std::path::Path) {
2407    let file_path = file_path.to_path_buf();
2408
2409    // Use spawn_blocking for directory operations
2410    let _ = tokio::task::spawn_blocking(move || {
2411        if let Some(parent) = file_path.parent() {
2412            // Try to remove empty directories up to the protocol level
2413            let mut current = parent;
2414            let fixtures_dir =
2415                std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2416            let fixtures_path = std::path::Path::new(&fixtures_dir);
2417
2418            while current != fixtures_path && current.parent().is_some() {
2419                if let Ok(entries) = std::fs::read_dir(current) {
2420                    if entries.count() == 0 {
2421                        if let Err(e) = std::fs::remove_dir(current) {
2422                            tracing::debug!(
2423                                "Failed to remove empty directory {}: {}",
2424                                current.display(),
2425                                e
2426                            );
2427                            break;
2428                        } else {
2429                            tracing::debug!("Removed empty directory: {}", current.display());
2430                        }
2431                    } else {
2432                        break;
2433                    }
2434                } else {
2435                    break;
2436                }
2437
2438                if let Some(next_parent) = current.parent() {
2439                    current = next_parent;
2440                } else {
2441                    break;
2442                }
2443            }
2444        }
2445    })
2446    .await;
2447}
2448
2449/// Download a fixture file
2450pub async fn download_fixture(Path(fixture_id): Path<String>) -> impl IntoResponse {
2451    // Find and read the fixture file
2452    match download_fixture_by_id(&fixture_id).await {
2453        Ok((content, file_name)) => (
2454            StatusCode::OK,
2455            [
2456                (http::header::CONTENT_TYPE, "application/json".to_string()),
2457                (
2458                    http::header::CONTENT_DISPOSITION,
2459                    format!("attachment; filename=\"{}\"", file_name),
2460                ),
2461            ],
2462            content,
2463        )
2464            .into_response(),
2465        Err(e) => {
2466            tracing::error!("Failed to download fixture {}: {}", fixture_id, e);
2467            let error_response = format!(r#"{{"error": "Failed to download fixture: {}"}}"#, e);
2468            (
2469                StatusCode::NOT_FOUND,
2470                [(http::header::CONTENT_TYPE, "application/json".to_string())],
2471                error_response,
2472            )
2473                .into_response()
2474        }
2475    }
2476}
2477
2478/// Download a fixture file by ID
2479async fn download_fixture_by_id(fixture_id: &str) -> Result<(String, String)> {
2480    // Find the fixture file by ID
2481    let fixtures_dir =
2482        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2483    let fixtures_path = std::path::Path::new(&fixtures_dir);
2484
2485    if !fixtures_path.exists() {
2486        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2487    }
2488
2489    let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2490
2491    // Read the file content using spawn_blocking
2492    let file_path_clone = file_path.clone();
2493    let (content, file_name) = tokio::task::spawn_blocking(move || {
2494        let content = std::fs::read_to_string(&file_path_clone)
2495            .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2496
2497        let file_name = file_path_clone
2498            .file_name()
2499            .and_then(|name| name.to_str())
2500            .unwrap_or("fixture.json")
2501            .to_string();
2502
2503        Ok::<_, Error>((content, file_name))
2504    })
2505    .await
2506    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2507
2508    tracing::info!("Downloaded fixture file: {} ({} bytes)", file_path.display(), content.len());
2509    Ok((content, file_name))
2510}
2511
2512/// Rename a fixture
2513pub async fn rename_fixture(
2514    Path(fixture_id): Path<String>,
2515    Json(payload): Json<FixtureRenameRequest>,
2516) -> Json<ApiResponse<String>> {
2517    match rename_fixture_by_id(&fixture_id, &payload.new_name).await {
2518        Ok(new_path) => {
2519            tracing::info!("Successfully renamed fixture: {} -> {}", fixture_id, payload.new_name);
2520            Json(ApiResponse::success(format!("Fixture renamed successfully to: {}", new_path)))
2521        }
2522        Err(e) => {
2523            tracing::error!("Failed to rename fixture {}: {}", fixture_id, e);
2524            Json(ApiResponse::error(format!("Failed to rename fixture: {}", e)))
2525        }
2526    }
2527}
2528
2529/// Rename a fixture by ID
2530async fn rename_fixture_by_id(fixture_id: &str, new_name: &str) -> Result<String> {
2531    // Validate new name
2532    if new_name.is_empty() {
2533        return Err(Error::generic("New name cannot be empty".to_string()));
2534    }
2535
2536    // Ensure new name ends with .json
2537    let new_name = if new_name.ends_with(".json") {
2538        new_name.to_string()
2539    } else {
2540        format!("{}.json", new_name)
2541    };
2542
2543    // Find the fixture file
2544    let fixtures_dir =
2545        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2546    let fixtures_path = std::path::Path::new(&fixtures_dir);
2547
2548    if !fixtures_path.exists() {
2549        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2550    }
2551
2552    let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2553
2554    // Get the parent directory and construct new path
2555    let parent = old_path
2556        .parent()
2557        .ok_or_else(|| Error::generic("Could not determine parent directory".to_string()))?;
2558
2559    let new_path = parent.join(&new_name);
2560
2561    // Check if target already exists
2562    if new_path.exists() {
2563        return Err(Error::generic(format!(
2564            "A fixture with name '{}' already exists in the same directory",
2565            new_name
2566        )));
2567    }
2568
2569    // Rename the file using spawn_blocking
2570    let old_path_clone = old_path.clone();
2571    let new_path_clone = new_path.clone();
2572    tokio::task::spawn_blocking(move || {
2573        std::fs::rename(&old_path_clone, &new_path_clone)
2574            .map_err(|e| Error::generic(format!("Failed to rename fixture file: {}", e)))
2575    })
2576    .await
2577    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2578
2579    tracing::info!("Renamed fixture file: {} -> {}", old_path.display(), new_path.display());
2580
2581    // Return relative path for display
2582    Ok(new_path
2583        .strip_prefix(fixtures_path)
2584        .unwrap_or(&new_path)
2585        .to_string_lossy()
2586        .to_string())
2587}
2588
2589/// Move a fixture to a new path
2590pub async fn move_fixture(
2591    Path(fixture_id): Path<String>,
2592    Json(payload): Json<FixtureMoveRequest>,
2593) -> Json<ApiResponse<String>> {
2594    match move_fixture_by_id(&fixture_id, &payload.new_path).await {
2595        Ok(new_location) => {
2596            tracing::info!("Successfully moved fixture: {} -> {}", fixture_id, payload.new_path);
2597            Json(ApiResponse::success(format!("Fixture moved successfully to: {}", new_location)))
2598        }
2599        Err(e) => {
2600            tracing::error!("Failed to move fixture {}: {}", fixture_id, e);
2601            Json(ApiResponse::error(format!("Failed to move fixture: {}", e)))
2602        }
2603    }
2604}
2605
2606/// Move a fixture by ID to a new path
2607async fn move_fixture_by_id(fixture_id: &str, new_path: &str) -> Result<String> {
2608    // Validate new path
2609    if new_path.is_empty() {
2610        return Err(Error::generic("New path cannot be empty".to_string()));
2611    }
2612
2613    // Find the fixture file
2614    let fixtures_dir =
2615        std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2616    let fixtures_path = std::path::Path::new(&fixtures_dir);
2617
2618    if !fixtures_path.exists() {
2619        return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2620    }
2621
2622    let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2623
2624    // Construct the new path - can be either relative to fixtures_dir or absolute within it
2625    let new_full_path = if new_path.starts_with('/') {
2626        // Absolute path within fixtures directory
2627        fixtures_path.join(new_path.trim_start_matches('/'))
2628    } else {
2629        // Relative path from fixtures directory
2630        fixtures_path.join(new_path)
2631    };
2632
2633    // Ensure target ends with .json if it doesn't already
2634    let new_full_path = if new_full_path.extension().and_then(|s| s.to_str()) == Some("json") {
2635        new_full_path
2636    } else {
2637        // If the path is a directory or doesn't have .json extension, append the original filename
2638        if new_full_path.is_dir() || !new_path.contains('.') {
2639            let file_name = old_path.file_name().ok_or_else(|| {
2640                Error::generic("Could not determine original file name".to_string())
2641            })?;
2642            new_full_path.join(file_name)
2643        } else {
2644            new_full_path.with_extension("json")
2645        }
2646    };
2647
2648    // Check if target already exists
2649    if new_full_path.exists() {
2650        return Err(Error::generic(format!(
2651            "A fixture already exists at path: {}",
2652            new_full_path.display()
2653        )));
2654    }
2655
2656    // Create parent directories and move file using spawn_blocking
2657    let old_path_clone = old_path.clone();
2658    let new_full_path_clone = new_full_path.clone();
2659    tokio::task::spawn_blocking(move || {
2660        // Create parent directories if they don't exist
2661        if let Some(parent) = new_full_path_clone.parent() {
2662            std::fs::create_dir_all(parent)
2663                .map_err(|e| Error::generic(format!("Failed to create target directory: {}", e)))?;
2664        }
2665
2666        // Move the file
2667        std::fs::rename(&old_path_clone, &new_full_path_clone)
2668            .map_err(|e| Error::generic(format!("Failed to move fixture file: {}", e)))
2669    })
2670    .await
2671    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2672
2673    tracing::info!("Moved fixture file: {} -> {}", old_path.display(), new_full_path.display());
2674
2675    // Clean up empty directories from the old location
2676    cleanup_empty_directories(&old_path).await;
2677
2678    // Return relative path for display
2679    Ok(new_full_path
2680        .strip_prefix(fixtures_path)
2681        .unwrap_or(&new_full_path)
2682        .to_string_lossy()
2683        .to_string())
2684}
2685
2686/// Get current validation settings
2687pub async fn get_validation(
2688    State(state): State<AdminState>,
2689) -> Json<ApiResponse<ValidationSettings>> {
2690    // Get real validation settings from configuration
2691    let config_state = state.get_config().await;
2692
2693    Json(ApiResponse::success(config_state.validation_settings))
2694}
2695
2696/// Update validation settings
2697pub async fn update_validation(
2698    State(state): State<AdminState>,
2699    Json(update): Json<ValidationUpdate>,
2700) -> Json<ApiResponse<String>> {
2701    // Validate the mode
2702    match update.mode.as_str() {
2703        "enforce" | "warn" | "off" => {}
2704        _ => {
2705            return Json(ApiResponse::error(
2706                "Invalid validation mode. Must be 'enforce', 'warn', or 'off'".to_string(),
2707            ))
2708        }
2709    }
2710
2711    // Update the actual validation configuration
2712    let mode = update.mode.clone();
2713    state
2714        .update_validation_config(
2715            update.mode,
2716            update.aggregate_errors,
2717            update.validate_responses,
2718            update.overrides.unwrap_or_default(),
2719        )
2720        .await;
2721
2722    tracing::info!(
2723        "Updated validation settings: mode={}, aggregate_errors={}",
2724        mode,
2725        update.aggregate_errors
2726    );
2727
2728    Json(ApiResponse::success("Validation settings updated".to_string()))
2729}
2730
2731/// Get environment variables
2732pub async fn get_env_vars() -> Json<ApiResponse<HashMap<String, String>>> {
2733    // Get actual environment variables that are relevant to MockForge
2734    let mut env_vars = HashMap::new();
2735
2736    let relevant_vars = [
2737        // Core functionality
2738        "MOCKFORGE_LATENCY_ENABLED",
2739        "MOCKFORGE_FAILURES_ENABLED",
2740        "MOCKFORGE_PROXY_ENABLED",
2741        "MOCKFORGE_RECORD_ENABLED",
2742        "MOCKFORGE_REPLAY_ENABLED",
2743        "MOCKFORGE_LOG_LEVEL",
2744        "MOCKFORGE_CONFIG_FILE",
2745        "RUST_LOG",
2746        // HTTP server configuration
2747        "MOCKFORGE_HTTP_PORT",
2748        "MOCKFORGE_HTTP_HOST",
2749        "MOCKFORGE_HTTP_OPENAPI_SPEC",
2750        "MOCKFORGE_CORS_ENABLED",
2751        "MOCKFORGE_REQUEST_TIMEOUT_SECS",
2752        // WebSocket server configuration
2753        "MOCKFORGE_WS_PORT",
2754        "MOCKFORGE_WS_HOST",
2755        "MOCKFORGE_WS_REPLAY_FILE",
2756        "MOCKFORGE_WS_CONNECTION_TIMEOUT_SECS",
2757        // gRPC server configuration
2758        "MOCKFORGE_GRPC_PORT",
2759        "MOCKFORGE_GRPC_HOST",
2760        // Admin UI configuration
2761        "MOCKFORGE_ADMIN_ENABLED",
2762        "MOCKFORGE_ADMIN_PORT",
2763        "MOCKFORGE_ADMIN_HOST",
2764        "MOCKFORGE_ADMIN_MOUNT_PATH",
2765        "MOCKFORGE_ADMIN_API_ENABLED",
2766        // Template and validation
2767        "MOCKFORGE_RESPONSE_TEMPLATE_EXPAND",
2768        "MOCKFORGE_REQUEST_VALIDATION",
2769        "MOCKFORGE_AGGREGATE_ERRORS",
2770        "MOCKFORGE_RESPONSE_VALIDATION",
2771        "MOCKFORGE_VALIDATION_STATUS",
2772        // Data generation
2773        "MOCKFORGE_RAG_ENABLED",
2774        "MOCKFORGE_FAKE_TOKENS",
2775        // Other settings
2776        "MOCKFORGE_FIXTURES_DIR",
2777    ];
2778
2779    for var_name in &relevant_vars {
2780        if let Ok(value) = std::env::var(var_name) {
2781            env_vars.insert(var_name.to_string(), value);
2782        }
2783    }
2784
2785    Json(ApiResponse::success(env_vars))
2786}
2787
2788/// Update environment variable
2789pub async fn update_env_var(Json(update): Json<EnvVarUpdate>) -> Json<ApiResponse<String>> {
2790    // Set the environment variable (runtime only - not persisted)
2791    std::env::set_var(&update.key, &update.value);
2792
2793    tracing::info!("Updated environment variable: {}={}", update.key, update.value);
2794
2795    // Note: Environment variables set at runtime are not persisted
2796    // In a production system, you might want to write to a .env file or config file
2797    Json(ApiResponse::success(format!(
2798        "Environment variable {} updated to '{}'. Note: This change is not persisted and will be lost on restart.",
2799        update.key, update.value
2800    )))
2801}
2802
2803/// Get file content
2804pub async fn get_file_content(
2805    Json(request): Json<FileContentRequest>,
2806) -> Json<ApiResponse<String>> {
2807    // Validate the file path for security
2808    if let Err(e) = validate_file_path(&request.file_path) {
2809        return Json(ApiResponse::error(format!("Invalid file path: {}", e)));
2810    }
2811
2812    // Read the actual file content
2813    match tokio::fs::read_to_string(&request.file_path).await {
2814        Ok(content) => {
2815            // Validate the file content for security
2816            if let Err(e) = validate_file_content(&content) {
2817                return Json(ApiResponse::error(format!("Invalid file content: {}", e)));
2818            }
2819            Json(ApiResponse::success(content))
2820        }
2821        Err(e) => Json(ApiResponse::error(format!("Failed to read file: {}", e))),
2822    }
2823}
2824
2825/// Save file content
2826pub async fn save_file_content(Json(request): Json<FileSaveRequest>) -> Json<ApiResponse<String>> {
2827    match save_file_to_filesystem(&request.file_path, &request.content).await {
2828        Ok(_) => {
2829            tracing::info!("Successfully saved file: {}", request.file_path);
2830            Json(ApiResponse::success("File saved successfully".to_string()))
2831        }
2832        Err(e) => {
2833            tracing::error!("Failed to save file {}: {}", request.file_path, e);
2834            Json(ApiResponse::error(format!("Failed to save file: {}", e)))
2835        }
2836    }
2837}
2838
2839/// Save content to a file on the filesystem
2840async fn save_file_to_filesystem(file_path: &str, content: &str) -> Result<()> {
2841    // Validate the file path for security
2842    validate_file_path(file_path)?;
2843
2844    // Validate the file content for security
2845    validate_file_content(content)?;
2846
2847    // Convert to PathBuf and clone data for spawn_blocking
2848    let path = std::path::PathBuf::from(file_path);
2849    let content = content.to_string();
2850
2851    // Perform file operations using spawn_blocking
2852    let path_clone = path.clone();
2853    let content_clone = content.clone();
2854    tokio::task::spawn_blocking(move || {
2855        // Create parent directories if they don't exist
2856        if let Some(parent) = path_clone.parent() {
2857            std::fs::create_dir_all(parent).map_err(|e| {
2858                Error::generic(format!("Failed to create directory {}: {}", parent.display(), e))
2859            })?;
2860        }
2861
2862        // Write the content to the file
2863        std::fs::write(&path_clone, &content_clone).map_err(|e| {
2864            Error::generic(format!("Failed to write file {}: {}", path_clone.display(), e))
2865        })?;
2866
2867        // Verify the file was written correctly
2868        let written_content = std::fs::read_to_string(&path_clone).map_err(|e| {
2869            Error::generic(format!("Failed to verify written file {}: {}", path_clone.display(), e))
2870        })?;
2871
2872        if written_content != content_clone {
2873            return Err(Error::generic(format!(
2874                "File content verification failed for {}",
2875                path_clone.display()
2876            )));
2877        }
2878
2879        Ok::<_, Error>(())
2880    })
2881    .await
2882    .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2883
2884    tracing::info!("File saved successfully: {} ({} bytes)", path.display(), content.len());
2885    Ok(())
2886}
2887
2888/// Validate file path for security
2889fn validate_file_path(file_path: &str) -> Result<()> {
2890    // Check for path traversal attacks
2891    if file_path.contains("..") {
2892        return Err(Error::generic("Path traversal detected in file path".to_string()));
2893    }
2894
2895    // Check for absolute paths that might be outside allowed directories
2896    let path = std::path::Path::new(file_path);
2897    if path.is_absolute() {
2898        // For absolute paths, ensure they're within allowed directories
2899        let allowed_dirs = [
2900            std::env::current_dir().unwrap_or_default(),
2901            std::path::PathBuf::from("."),
2902            std::path::PathBuf::from("fixtures"),
2903            std::path::PathBuf::from("config"),
2904        ];
2905
2906        let mut is_allowed = false;
2907        for allowed_dir in &allowed_dirs {
2908            if path.starts_with(allowed_dir) {
2909                is_allowed = true;
2910                break;
2911            }
2912        }
2913
2914        if !is_allowed {
2915            return Err(Error::generic("File path is outside allowed directories".to_string()));
2916        }
2917    }
2918
2919    // Check for dangerous file extensions or names
2920    let dangerous_extensions = ["exe", "bat", "cmd", "sh", "ps1", "scr", "com"];
2921    if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
2922        if dangerous_extensions.contains(&extension.to_lowercase().as_str()) {
2923            return Err(Error::generic(format!(
2924                "Dangerous file extension not allowed: {}",
2925                extension
2926            )));
2927        }
2928    }
2929
2930    Ok(())
2931}
2932
2933/// Validate file content for security
2934fn validate_file_content(content: &str) -> Result<()> {
2935    // Check for reasonable file size (prevent DoS)
2936    if content.len() > 10 * 1024 * 1024 {
2937        // 10MB limit
2938        return Err(Error::generic("File content too large (max 10MB)".to_string()));
2939    }
2940
2941    // Check for null bytes (potential security issue)
2942    if content.contains('\0') {
2943        return Err(Error::generic("File content contains null bytes".to_string()));
2944    }
2945
2946    Ok(())
2947}
2948
2949/// Fixture delete request
2950#[derive(Debug, Clone, Serialize, Deserialize)]
2951pub struct FixtureDeleteRequest {
2952    pub fixture_id: String,
2953}
2954
2955/// Environment variable update
2956#[derive(Debug, Clone, Serialize, Deserialize)]
2957pub struct EnvVarUpdate {
2958    pub key: String,
2959    pub value: String,
2960}
2961
2962/// Fixture bulk delete request
2963#[derive(Debug, Clone, Serialize, Deserialize)]
2964pub struct FixtureBulkDeleteRequest {
2965    pub fixture_ids: Vec<String>,
2966}
2967
2968/// Fixture bulk delete result
2969#[derive(Debug, Clone, Serialize, Deserialize)]
2970pub struct FixtureBulkDeleteResult {
2971    pub deleted_count: usize,
2972    pub total_requested: usize,
2973    pub errors: Vec<String>,
2974}
2975
2976/// Fixture rename request
2977#[derive(Debug, Clone, Serialize, Deserialize)]
2978pub struct FixtureRenameRequest {
2979    pub new_name: String,
2980}
2981
2982/// Fixture move request
2983#[derive(Debug, Clone, Serialize, Deserialize)]
2984pub struct FixtureMoveRequest {
2985    pub new_path: String,
2986}
2987
2988/// File content request
2989#[derive(Debug, Clone, Serialize, Deserialize)]
2990pub struct FileContentRequest {
2991    pub file_path: String,
2992    pub file_type: String,
2993}
2994
2995/// File save request
2996#[derive(Debug, Clone, Serialize, Deserialize)]
2997pub struct FileSaveRequest {
2998    pub file_path: String,
2999    pub content: String,
3000}
3001
3002/// Get smoke tests
3003pub async fn get_smoke_tests(
3004    State(state): State<AdminState>,
3005) -> Json<ApiResponse<Vec<SmokeTestResult>>> {
3006    let results = state.get_smoke_test_results().await;
3007    Json(ApiResponse::success(results))
3008}
3009
3010/// Run smoke tests endpoint
3011pub async fn run_smoke_tests_endpoint(
3012    State(state): State<AdminState>,
3013) -> Json<ApiResponse<String>> {
3014    tracing::info!("Starting smoke test execution");
3015
3016    // Spawn smoke test execution in background to avoid blocking
3017    let state_clone = state.clone();
3018    tokio::spawn(async move {
3019        if let Err(e) = execute_smoke_tests(&state_clone).await {
3020            tracing::error!("Smoke test execution failed: {}", e);
3021        } else {
3022            tracing::info!("Smoke test execution completed successfully");
3023        }
3024    });
3025
3026    Json(ApiResponse::success(
3027        "Smoke tests started. Check results in the smoke tests section.".to_string(),
3028    ))
3029}
3030
3031/// Execute smoke tests against fixtures
3032async fn execute_smoke_tests(state: &AdminState) -> Result<()> {
3033    // Get base URL from environment or use default
3034    let base_url =
3035        std::env::var("MOCKFORGE_BASE_URL").unwrap_or_else(|_| "http://localhost:3000".to_string());
3036
3037    let context = SmokeTestContext {
3038        base_url,
3039        timeout_seconds: 30,
3040        parallel: true,
3041    };
3042
3043    // Get all fixtures to create smoke tests from
3044    let fixtures = scan_fixtures_directory()?;
3045
3046    // Filter for HTTP fixtures only (smoke tests are typically HTTP)
3047    let http_fixtures: Vec<&FixtureInfo> =
3048        fixtures.iter().filter(|f| f.protocol == "http").collect();
3049
3050    if http_fixtures.is_empty() {
3051        tracing::warn!("No HTTP fixtures found for smoke testing");
3052        return Ok(());
3053    }
3054
3055    tracing::info!("Running smoke tests for {} HTTP fixtures", http_fixtures.len());
3056
3057    // Create smoke test results from fixtures
3058    let mut test_results = Vec::new();
3059
3060    for fixture in http_fixtures {
3061        let test_result = create_smoke_test_from_fixture(fixture);
3062        test_results.push(test_result);
3063    }
3064
3065    // Execute tests
3066    let mut executed_results = Vec::new();
3067    for mut test_result in test_results {
3068        // Update status to running
3069        test_result.status = "running".to_string();
3070        state.update_smoke_test_result(test_result.clone()).await;
3071
3072        // Execute the test
3073        let start_time = std::time::Instant::now();
3074        match execute_single_smoke_test(&test_result, &context).await {
3075            Ok((status_code, response_time_ms)) => {
3076                test_result.status = "passed".to_string();
3077                test_result.status_code = Some(status_code);
3078                test_result.response_time_ms = Some(response_time_ms);
3079                test_result.error_message = None;
3080            }
3081            Err(e) => {
3082                test_result.status = "failed".to_string();
3083                test_result.error_message = Some(e.to_string());
3084                test_result.status_code = None;
3085                test_result.response_time_ms = None;
3086            }
3087        }
3088
3089        let duration = start_time.elapsed();
3090        test_result.duration_seconds = Some(duration.as_secs_f64());
3091        test_result.last_run = Some(Utc::now());
3092
3093        executed_results.push(test_result.clone());
3094        state.update_smoke_test_result(test_result).await;
3095    }
3096
3097    tracing::info!("Smoke test execution completed: {} tests run", executed_results.len());
3098    Ok(())
3099}
3100
3101/// Create a smoke test result from a fixture
3102fn create_smoke_test_from_fixture(fixture: &FixtureInfo) -> SmokeTestResult {
3103    let test_name = format!("{} {}", fixture.method, fixture.path);
3104    let description = format!("Smoke test for {} endpoint", fixture.path);
3105
3106    SmokeTestResult {
3107        id: format!("smoke_{}", fixture.id),
3108        name: test_name,
3109        method: fixture.method.clone(),
3110        path: fixture.path.clone(),
3111        description,
3112        last_run: None,
3113        status: "pending".to_string(),
3114        response_time_ms: None,
3115        error_message: None,
3116        status_code: None,
3117        duration_seconds: None,
3118    }
3119}
3120
3121/// Execute a single smoke test
3122async fn execute_single_smoke_test(
3123    test: &SmokeTestResult,
3124    context: &SmokeTestContext,
3125) -> Result<(u16, u64)> {
3126    let url = format!("{}{}", context.base_url, test.path);
3127    let client = reqwest::Client::builder()
3128        .timeout(Duration::from_secs(context.timeout_seconds))
3129        .build()
3130        .map_err(|e| Error::generic(format!("Failed to create HTTP client: {}", e)))?;
3131
3132    let start_time = std::time::Instant::now();
3133
3134    let response = match test.method.as_str() {
3135        "GET" => client.get(&url).send().await,
3136        "POST" => client.post(&url).send().await,
3137        "PUT" => client.put(&url).send().await,
3138        "DELETE" => client.delete(&url).send().await,
3139        "PATCH" => client.patch(&url).send().await,
3140        "HEAD" => client.head(&url).send().await,
3141        "OPTIONS" => client.request(reqwest::Method::OPTIONS, &url).send().await,
3142        _ => {
3143            return Err(Error::generic(format!("Unsupported HTTP method: {}", test.method)));
3144        }
3145    };
3146
3147    let response_time = start_time.elapsed();
3148    let response_time_ms = response_time.as_millis() as u64;
3149
3150    match response {
3151        Ok(resp) => {
3152            let status_code = resp.status().as_u16();
3153            if (200..400).contains(&status_code) {
3154                Ok((status_code, response_time_ms))
3155            } else {
3156                Err(Error::generic(format!(
3157                    "HTTP error: {} {}",
3158                    status_code,
3159                    resp.status().canonical_reason().unwrap_or("Unknown")
3160                )))
3161            }
3162        }
3163        Err(e) => Err(Error::generic(format!("Request failed: {}", e))),
3164    }
3165}
3166
3167#[derive(Debug, Deserialize)]
3168pub struct PluginInstallRequest {
3169    pub source: String,
3170    #[serde(default)]
3171    pub force: bool,
3172    #[serde(default)]
3173    pub skip_validation: bool,
3174    #[serde(default)]
3175    pub no_verify: bool,
3176    pub checksum: Option<String>,
3177}
3178
3179#[derive(Debug, Deserialize)]
3180pub struct PluginValidateRequest {
3181    pub source: String,
3182}
3183
3184fn find_plugin_directory(path: &std::path::Path) -> Option<std::path::PathBuf> {
3185    if path.join("plugin.yaml").exists() {
3186        return Some(path.to_path_buf());
3187    }
3188
3189    let entries = std::fs::read_dir(path).ok()?;
3190    for entry in entries.filter_map(|e| e.ok()) {
3191        let child = entry.path();
3192        if child.is_dir() && child.join("plugin.yaml").exists() {
3193            return Some(child);
3194        }
3195    }
3196
3197    None
3198}
3199
3200async fn resolve_plugin_source_path(
3201    source: PluginSource,
3202    checksum: Option<&str>,
3203) -> std::result::Result<std::path::PathBuf, String> {
3204    match source {
3205        PluginSource::Local(path) => Ok(path),
3206        PluginSource::Url { url, .. } => {
3207            let loader = RemotePluginLoader::new(RemotePluginConfig::default())
3208                .map_err(|e| format!("Failed to initialize remote plugin loader: {}", e))?;
3209            loader
3210                .download_with_checksum(&url, checksum)
3211                .await
3212                .map_err(|e| format!("Failed to download plugin from URL: {}", e))
3213        }
3214        PluginSource::Git(git_source) => {
3215            let loader = GitPluginLoader::new(GitPluginConfig::default())
3216                .map_err(|e| format!("Failed to initialize git plugin loader: {}", e))?;
3217            loader
3218                .clone_from_git(&git_source)
3219                .await
3220                .map_err(|e| format!("Failed to clone plugin from git: {}", e))
3221        }
3222        PluginSource::Registry { name, version } => Err(format!(
3223            "Registry plugin installation is not yet supported from the admin API (requested {}@{})",
3224            name,
3225            version.unwrap_or_else(|| "latest".to_string())
3226        )),
3227    }
3228}
3229
3230/// Install a plugin from a path, URL, or Git source and register it in-process.
3231pub async fn install_plugin(
3232    State(state): State<AdminState>,
3233    Json(request): Json<PluginInstallRequest>,
3234) -> impl IntoResponse {
3235    let source = request.source.trim().to_string();
3236    if source.is_empty() {
3237        return Json(json!({
3238            "success": false,
3239            "error": "Plugin source is required"
3240        }));
3241    }
3242
3243    if request.skip_validation {
3244        return Json(json!({
3245            "success": false,
3246            "error": "Skipping validation is not supported in admin install flow."
3247        }));
3248    }
3249
3250    if request.no_verify {
3251        return Json(json!({
3252            "success": false,
3253            "error": "Disabling signature verification is not supported in admin install flow."
3254        }));
3255    }
3256
3257    let force = request.force;
3258    let checksum = request.checksum.clone();
3259    let state_for_install = state.clone();
3260
3261    let install_result = tokio::task::spawn_blocking(
3262        move || -> std::result::Result<(String, String, String), String> {
3263            let runtime = tokio::runtime::Builder::new_current_thread()
3264                .enable_all()
3265                .build()
3266                .map_err(|e| format!("Failed to initialize install runtime: {}", e))?;
3267
3268            let (plugin_instance, plugin_id, plugin_name, plugin_version) =
3269                runtime.block_on(async move {
3270                    let parsed_source = PluginSource::parse(&source)
3271                        .map_err(|e| format!("Invalid plugin source: {}", e))?;
3272
3273                    let source_path =
3274                        resolve_plugin_source_path(parsed_source, checksum.as_deref()).await?;
3275
3276                    let plugin_root = if source_path.is_dir() {
3277                        find_plugin_directory(&source_path).unwrap_or(source_path.clone())
3278                    } else {
3279                        source_path.clone()
3280                    };
3281
3282                    if !plugin_root.exists() || !plugin_root.is_dir() {
3283                        return Err(format!(
3284                            "Resolved plugin path is not a directory: {}",
3285                            plugin_root.display()
3286                        ));
3287                    }
3288
3289                    let loader = PluginLoader::new(PluginLoaderConfig::default());
3290                    let manifest = loader
3291                        .validate_plugin(&plugin_root)
3292                        .await
3293                        .map_err(|e| format!("Failed to validate plugin: {}", e))?;
3294
3295                    let plugin_id = manifest.info.id.clone();
3296                    let plugin_name = manifest.info.name.clone();
3297                    let plugin_version = manifest.info.version.to_string();
3298
3299                    let parent_dir = plugin_root
3300                        .parent()
3301                        .unwrap_or_else(|| std::path::Path::new("."))
3302                        .to_string_lossy()
3303                        .to_string();
3304
3305                    let runtime_loader = PluginLoader::new(PluginLoaderConfig {
3306                        plugin_dirs: vec![parent_dir],
3307                        ..PluginLoaderConfig::default()
3308                    });
3309
3310                    runtime_loader
3311                        .load_plugin(&plugin_id)
3312                        .await
3313                        .map_err(|e| format!("Failed to load plugin into runtime: {}", e))?;
3314
3315                    let plugin_instance =
3316                        runtime_loader.get_plugin(&plugin_id).await.ok_or_else(|| {
3317                            "Plugin loaded but instance was not retrievable from loader".to_string()
3318                        })?;
3319
3320                    Ok::<_, String>((
3321                        plugin_instance,
3322                        plugin_id.to_string(),
3323                        plugin_name,
3324                        plugin_version,
3325                    ))
3326                })?;
3327
3328            let mut registry = state_for_install.plugin_registry.blocking_write();
3329
3330            if let Some(existing_id) =
3331                registry.list_plugins().into_iter().find(|id| id.as_str() == plugin_id)
3332            {
3333                if force {
3334                    registry.remove_plugin(&existing_id).map_err(|e| {
3335                        format!("Failed to remove existing plugin before reinstall: {}", e)
3336                    })?;
3337                } else {
3338                    return Err(format!(
3339                        "Plugin '{}' is already installed. Use force=true to reinstall.",
3340                        plugin_id
3341                    ));
3342                }
3343            }
3344
3345            registry
3346                .add_plugin(plugin_instance)
3347                .map_err(|e| format!("Failed to register plugin in admin registry: {}", e))?;
3348
3349            Ok((plugin_id, plugin_name, plugin_version))
3350        },
3351    )
3352    .await;
3353
3354    let (plugin_id, plugin_name, plugin_version) = match install_result {
3355        Ok(Ok(result)) => result,
3356        Ok(Err(err)) => {
3357            return Json(json!({
3358                "success": false,
3359                "error": err
3360            }))
3361        }
3362        Err(err) => {
3363            return Json(json!({
3364                "success": false,
3365                "error": format!("Plugin installation task failed: {}", err)
3366            }))
3367        }
3368    };
3369
3370    Json(json!({
3371        "success": true,
3372        "data": {
3373            "plugin_id": plugin_id,
3374            "name": plugin_name,
3375            "version": plugin_version
3376        },
3377        "message": "Plugin installed and registered in runtime."
3378    }))
3379}
3380
3381/// Validate a plugin source without installing it.
3382pub async fn validate_plugin(Json(request): Json<PluginValidateRequest>) -> impl IntoResponse {
3383    let source = request.source.trim();
3384    if source.is_empty() {
3385        return Json(json!({
3386            "success": false,
3387            "error": "Plugin source is required"
3388        }));
3389    }
3390
3391    let source = match PluginSource::parse(source) {
3392        Ok(source) => source,
3393        Err(e) => {
3394            return Json(json!({
3395                "success": false,
3396                "error": format!("Invalid plugin source: {}", e)
3397            }));
3398        }
3399    };
3400
3401    let path = match source.clone() {
3402        PluginSource::Local(path) => path,
3403        PluginSource::Url { .. } | PluginSource::Git(_) => {
3404            match resolve_plugin_source_path(source, None).await {
3405                Ok(path) => path,
3406                Err(err) => {
3407                    return Json(json!({
3408                        "success": false,
3409                        "error": err
3410                    }))
3411                }
3412            }
3413        }
3414        PluginSource::Registry { .. } => {
3415            return Json(json!({
3416                "success": false,
3417                "error": "Registry plugin validation is not yet supported from the admin API."
3418            }))
3419        }
3420    };
3421
3422    let plugin_root = if path.is_dir() {
3423        find_plugin_directory(&path).unwrap_or(path.clone())
3424    } else {
3425        path
3426    };
3427
3428    let loader = PluginLoader::new(PluginLoaderConfig::default());
3429    match loader.validate_plugin(&plugin_root).await {
3430        Ok(manifest) => Json(json!({
3431            "success": true,
3432            "data": {
3433                "valid": true,
3434                "id": manifest.info.id.to_string(),
3435                "name": manifest.info.name,
3436                "version": manifest.info.version.to_string()
3437            }
3438        })),
3439        Err(e) => Json(json!({
3440            "success": false,
3441            "data": { "valid": false },
3442            "error": format!("Plugin validation failed: {}", e)
3443        })),
3444    }
3445}
3446
3447// Missing handler functions that routes.rs expects
3448pub async fn update_traffic_shaping(
3449    State(state): State<AdminState>,
3450    Json(config): Json<TrafficShapingConfig>,
3451) -> Json<ApiResponse<String>> {
3452    if config.burst_loss.burst_probability > 1.0
3453        || config.burst_loss.loss_rate_during_burst > 1.0
3454        || config.burst_loss.burst_probability < 0.0
3455        || config.burst_loss.loss_rate_during_burst < 0.0
3456    {
3457        return Json(ApiResponse::error(
3458            "Burst loss probabilities must be between 0.0 and 1.0".to_string(),
3459        ));
3460    }
3461
3462    {
3463        let mut cfg = state.config.write().await;
3464        cfg.traffic_shaping = config.clone();
3465    }
3466
3467    if let Some(ref chaos_api_state) = state.chaos_api_state {
3468        let mut chaos_config = chaos_api_state.config.write().await;
3469        chaos_config.traffic_shaping = Some(mockforge_chaos::config::TrafficShapingConfig {
3470            enabled: config.enabled,
3471            bandwidth_limit_bps: config.bandwidth.max_bytes_per_sec,
3472            packet_loss_percent: config.burst_loss.loss_rate_during_burst * 100.0,
3473            max_connections: 0,
3474            connection_timeout_ms: 30000,
3475        });
3476    }
3477
3478    Json(ApiResponse::success("Traffic shaping updated".to_string()))
3479}
3480
3481pub async fn import_postman(
3482    State(state): State<AdminState>,
3483    Json(request): Json<serde_json::Value>,
3484) -> Json<ApiResponse<String>> {
3485    use mockforge_core::workspace_import::{import_postman_to_workspace, WorkspaceImportConfig};
3486    use uuid::Uuid;
3487
3488    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3489    let filename = request.get("filename").and_then(|v| v.as_str());
3490    let environment = request.get("environment").and_then(|v| v.as_str());
3491    let base_url = request.get("base_url").and_then(|v| v.as_str());
3492
3493    // Import the collection
3494    let import_result = match mockforge_core::import::import_postman_collection(content, base_url) {
3495        Ok(result) => result,
3496        Err(e) => {
3497            // Record failed import
3498            let entry = ImportHistoryEntry {
3499                id: Uuid::new_v4().to_string(),
3500                format: "postman".to_string(),
3501                timestamp: Utc::now(),
3502                routes_count: 0,
3503                variables_count: 0,
3504                warnings_count: 0,
3505                success: false,
3506                filename: filename.map(|s| s.to_string()),
3507                environment: environment.map(|s| s.to_string()),
3508                base_url: base_url.map(|s| s.to_string()),
3509                error_message: Some(e.clone()),
3510            };
3511            let mut history = state.import_history.write().await;
3512            history.push(entry);
3513
3514            return Json(ApiResponse::error(format!("Postman import failed: {}", e)));
3515        }
3516    };
3517
3518    // Create workspace from imported routes
3519    let workspace_name = filename
3520        .and_then(|f| f.split('.').next())
3521        .unwrap_or("Imported Postman Collection");
3522
3523    let config = WorkspaceImportConfig {
3524        create_folders: true,
3525        base_folder_name: None,
3526        preserve_hierarchy: true,
3527        max_depth: 5,
3528    };
3529
3530    // Convert MockForgeRoute to ImportRoute
3531    let routes: Vec<ImportRoute> = import_result
3532        .routes
3533        .into_iter()
3534        .map(|route| ImportRoute {
3535            method: route.method,
3536            path: route.path,
3537            headers: route.headers,
3538            body: route.body,
3539            response: ImportResponse {
3540                status: route.response.status,
3541                headers: route.response.headers,
3542                body: route.response.body,
3543            },
3544        })
3545        .collect();
3546
3547    match import_postman_to_workspace(routes, workspace_name.to_string(), config) {
3548        Ok(workspace_result) => {
3549            // Save the workspace to persistent storage
3550            if let Err(e) =
3551                state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3552            {
3553                tracing::error!("Failed to save workspace: {}", e);
3554                return Json(ApiResponse::error(format!(
3555                    "Import succeeded but failed to save workspace: {}",
3556                    e
3557                )));
3558            }
3559
3560            // Record successful import
3561            let entry = ImportHistoryEntry {
3562                id: Uuid::new_v4().to_string(),
3563                format: "postman".to_string(),
3564                timestamp: Utc::now(),
3565                routes_count: workspace_result.request_count,
3566                variables_count: import_result.variables.len(),
3567                warnings_count: workspace_result.warnings.len(),
3568                success: true,
3569                filename: filename.map(|s| s.to_string()),
3570                environment: environment.map(|s| s.to_string()),
3571                base_url: base_url.map(|s| s.to_string()),
3572                error_message: None,
3573            };
3574            let mut history = state.import_history.write().await;
3575            history.push(entry);
3576
3577            Json(ApiResponse::success(format!(
3578                "Successfully imported {} routes into workspace '{}'",
3579                workspace_result.request_count, workspace_name
3580            )))
3581        }
3582        Err(e) => {
3583            // Record failed import
3584            let entry = ImportHistoryEntry {
3585                id: Uuid::new_v4().to_string(),
3586                format: "postman".to_string(),
3587                timestamp: Utc::now(),
3588                routes_count: 0,
3589                variables_count: 0,
3590                warnings_count: 0,
3591                success: false,
3592                filename: filename.map(|s| s.to_string()),
3593                environment: environment.map(|s| s.to_string()),
3594                base_url: base_url.map(|s| s.to_string()),
3595                error_message: Some(e.to_string()),
3596            };
3597            let mut history = state.import_history.write().await;
3598            history.push(entry);
3599
3600            Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3601        }
3602    }
3603}
3604
3605pub async fn import_insomnia(
3606    State(state): State<AdminState>,
3607    Json(request): Json<serde_json::Value>,
3608) -> Json<ApiResponse<String>> {
3609    use uuid::Uuid;
3610
3611    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3612    let filename = request.get("filename").and_then(|v| v.as_str());
3613    let environment = request.get("environment").and_then(|v| v.as_str());
3614    let base_url = request.get("base_url").and_then(|v| v.as_str());
3615
3616    // Import the export
3617    let import_result = match mockforge_core::import::import_insomnia_export(content, environment) {
3618        Ok(result) => result,
3619        Err(e) => {
3620            // Record failed import
3621            let entry = ImportHistoryEntry {
3622                id: Uuid::new_v4().to_string(),
3623                format: "insomnia".to_string(),
3624                timestamp: Utc::now(),
3625                routes_count: 0,
3626                variables_count: 0,
3627                warnings_count: 0,
3628                success: false,
3629                filename: filename.map(|s| s.to_string()),
3630                environment: environment.map(|s| s.to_string()),
3631                base_url: base_url.map(|s| s.to_string()),
3632                error_message: Some(e.clone()),
3633            };
3634            let mut history = state.import_history.write().await;
3635            history.push(entry);
3636
3637            return Json(ApiResponse::error(format!("Insomnia import failed: {}", e)));
3638        }
3639    };
3640
3641    // Create workspace from imported routes
3642    let workspace_name = filename
3643        .and_then(|f| f.split('.').next())
3644        .unwrap_or("Imported Insomnia Collection");
3645
3646    let _config = WorkspaceImportConfig {
3647        create_folders: true,
3648        base_folder_name: None,
3649        preserve_hierarchy: true,
3650        max_depth: 5,
3651    };
3652
3653    // Extract variables count before moving import_result
3654    let variables_count = import_result.variables.len();
3655
3656    match mockforge_core::workspace_import::create_workspace_from_insomnia(
3657        import_result,
3658        Some(workspace_name.to_string()),
3659    ) {
3660        Ok(workspace_result) => {
3661            // Save the workspace to persistent storage
3662            if let Err(e) =
3663                state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3664            {
3665                tracing::error!("Failed to save workspace: {}", e);
3666                return Json(ApiResponse::error(format!(
3667                    "Import succeeded but failed to save workspace: {}",
3668                    e
3669                )));
3670            }
3671
3672            // Record successful import
3673            let entry = ImportHistoryEntry {
3674                id: Uuid::new_v4().to_string(),
3675                format: "insomnia".to_string(),
3676                timestamp: Utc::now(),
3677                routes_count: workspace_result.request_count,
3678                variables_count,
3679                warnings_count: workspace_result.warnings.len(),
3680                success: true,
3681                filename: filename.map(|s| s.to_string()),
3682                environment: environment.map(|s| s.to_string()),
3683                base_url: base_url.map(|s| s.to_string()),
3684                error_message: None,
3685            };
3686            let mut history = state.import_history.write().await;
3687            history.push(entry);
3688
3689            Json(ApiResponse::success(format!(
3690                "Successfully imported {} routes into workspace '{}'",
3691                workspace_result.request_count, workspace_name
3692            )))
3693        }
3694        Err(e) => {
3695            // Record failed import
3696            let entry = ImportHistoryEntry {
3697                id: Uuid::new_v4().to_string(),
3698                format: "insomnia".to_string(),
3699                timestamp: Utc::now(),
3700                routes_count: 0,
3701                variables_count: 0,
3702                warnings_count: 0,
3703                success: false,
3704                filename: filename.map(|s| s.to_string()),
3705                environment: environment.map(|s| s.to_string()),
3706                base_url: base_url.map(|s| s.to_string()),
3707                error_message: Some(e.to_string()),
3708            };
3709            let mut history = state.import_history.write().await;
3710            history.push(entry);
3711
3712            Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3713        }
3714    }
3715}
3716
3717pub async fn import_openapi(
3718    State(_state): State<AdminState>,
3719    Json(_request): Json<serde_json::Value>,
3720) -> Json<ApiResponse<String>> {
3721    Json(ApiResponse::success("OpenAPI import completed".to_string()))
3722}
3723
3724pub async fn import_curl(
3725    State(state): State<AdminState>,
3726    Json(request): Json<serde_json::Value>,
3727) -> Json<ApiResponse<String>> {
3728    use uuid::Uuid;
3729
3730    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3731    let filename = request.get("filename").and_then(|v| v.as_str());
3732    let base_url = request.get("base_url").and_then(|v| v.as_str());
3733
3734    // Import the commands
3735    let import_result = match mockforge_core::import::import_curl_commands(content, base_url) {
3736        Ok(result) => result,
3737        Err(e) => {
3738            // Record failed import
3739            let entry = ImportHistoryEntry {
3740                id: Uuid::new_v4().to_string(),
3741                format: "curl".to_string(),
3742                timestamp: Utc::now(),
3743                routes_count: 0,
3744                variables_count: 0,
3745                warnings_count: 0,
3746                success: false,
3747                filename: filename.map(|s| s.to_string()),
3748                environment: None,
3749                base_url: base_url.map(|s| s.to_string()),
3750                error_message: Some(e.clone()),
3751            };
3752            let mut history = state.import_history.write().await;
3753            history.push(entry);
3754
3755            return Json(ApiResponse::error(format!("Curl import failed: {}", e)));
3756        }
3757    };
3758
3759    // Create workspace from imported routes
3760    let workspace_name =
3761        filename.and_then(|f| f.split('.').next()).unwrap_or("Imported Curl Commands");
3762
3763    match mockforge_core::workspace_import::create_workspace_from_curl(
3764        import_result,
3765        Some(workspace_name.to_string()),
3766    ) {
3767        Ok(workspace_result) => {
3768            // Save the workspace to persistent storage
3769            if let Err(e) =
3770                state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3771            {
3772                tracing::error!("Failed to save workspace: {}", e);
3773                return Json(ApiResponse::error(format!(
3774                    "Import succeeded but failed to save workspace: {}",
3775                    e
3776                )));
3777            }
3778
3779            // Record successful import
3780            let entry = ImportHistoryEntry {
3781                id: Uuid::new_v4().to_string(),
3782                format: "curl".to_string(),
3783                timestamp: Utc::now(),
3784                routes_count: workspace_result.request_count,
3785                variables_count: 0, // Curl doesn't have variables
3786                warnings_count: workspace_result.warnings.len(),
3787                success: true,
3788                filename: filename.map(|s| s.to_string()),
3789                environment: None,
3790                base_url: base_url.map(|s| s.to_string()),
3791                error_message: None,
3792            };
3793            let mut history = state.import_history.write().await;
3794            history.push(entry);
3795
3796            Json(ApiResponse::success(format!(
3797                "Successfully imported {} routes into workspace '{}'",
3798                workspace_result.request_count, workspace_name
3799            )))
3800        }
3801        Err(e) => {
3802            // Record failed import
3803            let entry = ImportHistoryEntry {
3804                id: Uuid::new_v4().to_string(),
3805                format: "curl".to_string(),
3806                timestamp: Utc::now(),
3807                routes_count: 0,
3808                variables_count: 0,
3809                warnings_count: 0,
3810                success: false,
3811                filename: filename.map(|s| s.to_string()),
3812                environment: None,
3813                base_url: base_url.map(|s| s.to_string()),
3814                error_message: Some(e.to_string()),
3815            };
3816            let mut history = state.import_history.write().await;
3817            history.push(entry);
3818
3819            Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3820        }
3821    }
3822}
3823
3824pub async fn preview_import(
3825    State(_state): State<AdminState>,
3826    Json(request): Json<serde_json::Value>,
3827) -> Json<ApiResponse<serde_json::Value>> {
3828    use mockforge_core::import::{
3829        import_curl_commands, import_insomnia_export, import_postman_collection,
3830    };
3831
3832    let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3833    let filename = request.get("filename").and_then(|v| v.as_str());
3834    let environment = request.get("environment").and_then(|v| v.as_str());
3835    let base_url = request.get("base_url").and_then(|v| v.as_str());
3836
3837    // Detect format from filename or content
3838    let format = if let Some(fname) = filename {
3839        if fname.to_lowercase().contains("postman")
3840            || fname.to_lowercase().ends_with(".postman_collection")
3841        {
3842            "postman"
3843        } else if fname.to_lowercase().contains("insomnia")
3844            || fname.to_lowercase().ends_with(".insomnia")
3845        {
3846            "insomnia"
3847        } else if fname.to_lowercase().contains("curl")
3848            || fname.to_lowercase().ends_with(".sh")
3849            || fname.to_lowercase().ends_with(".curl")
3850        {
3851            "curl"
3852        } else {
3853            "unknown"
3854        }
3855    } else {
3856        "unknown"
3857    };
3858
3859    match format {
3860        "postman" => match import_postman_collection(content, base_url) {
3861            Ok(import_result) => {
3862                let routes: Vec<serde_json::Value> = import_result
3863                    .routes
3864                    .into_iter()
3865                    .map(|route| {
3866                        serde_json::json!({
3867                            "method": route.method,
3868                            "path": route.path,
3869                            "headers": route.headers,
3870                            "body": route.body,
3871                            "status_code": route.response.status,
3872                            "response": serde_json::json!({
3873                                "status": route.response.status,
3874                                "headers": route.response.headers,
3875                                "body": route.response.body
3876                            })
3877                        })
3878                    })
3879                    .collect();
3880
3881                let response = serde_json::json!({
3882                    "routes": routes,
3883                    "variables": import_result.variables,
3884                    "warnings": import_result.warnings
3885                });
3886
3887                Json(ApiResponse::success(response))
3888            }
3889            Err(e) => Json(ApiResponse::error(format!("Postman import failed: {}", e))),
3890        },
3891        "insomnia" => match import_insomnia_export(content, environment) {
3892            Ok(import_result) => {
3893                let routes: Vec<serde_json::Value> = import_result
3894                    .routes
3895                    .into_iter()
3896                    .map(|route| {
3897                        serde_json::json!({
3898                            "method": route.method,
3899                            "path": route.path,
3900                            "headers": route.headers,
3901                            "body": route.body,
3902                            "status_code": route.response.status,
3903                            "response": serde_json::json!({
3904                                "status": route.response.status,
3905                                "headers": route.response.headers,
3906                                "body": route.response.body
3907                            })
3908                        })
3909                    })
3910                    .collect();
3911
3912                let response = serde_json::json!({
3913                    "routes": routes,
3914                    "variables": import_result.variables,
3915                    "warnings": import_result.warnings
3916                });
3917
3918                Json(ApiResponse::success(response))
3919            }
3920            Err(e) => Json(ApiResponse::error(format!("Insomnia import failed: {}", e))),
3921        },
3922        "curl" => match import_curl_commands(content, base_url) {
3923            Ok(import_result) => {
3924                let routes: Vec<serde_json::Value> = import_result
3925                    .routes
3926                    .into_iter()
3927                    .map(|route| {
3928                        serde_json::json!({
3929                            "method": route.method,
3930                            "path": route.path,
3931                            "headers": route.headers,
3932                            "body": route.body,
3933                            "status_code": route.response.status,
3934                            "response": serde_json::json!({
3935                                "status": route.response.status,
3936                                "headers": route.response.headers,
3937                                "body": route.response.body
3938                            })
3939                        })
3940                    })
3941                    .collect();
3942
3943                let response = serde_json::json!({
3944                    "routes": routes,
3945                    "variables": serde_json::json!({}),
3946                    "warnings": import_result.warnings
3947                });
3948
3949                Json(ApiResponse::success(response))
3950            }
3951            Err(e) => Json(ApiResponse::error(format!("Curl import failed: {}", e))),
3952        },
3953        _ => Json(ApiResponse::error("Unsupported import format".to_string())),
3954    }
3955}
3956
3957pub async fn get_import_history(
3958    State(state): State<AdminState>,
3959) -> Json<ApiResponse<serde_json::Value>> {
3960    let history = state.import_history.read().await;
3961    let total = history.len();
3962
3963    let imports: Vec<serde_json::Value> = history
3964        .iter()
3965        .rev()
3966        .take(50)
3967        .map(|entry| {
3968            serde_json::json!({
3969                "id": entry.id,
3970                "format": entry.format,
3971                "timestamp": entry.timestamp.to_rfc3339(),
3972                "routes_count": entry.routes_count,
3973                "variables_count": entry.variables_count,
3974                "warnings_count": entry.warnings_count,
3975                "success": entry.success,
3976                "filename": entry.filename,
3977                "environment": entry.environment,
3978                "base_url": entry.base_url,
3979                "error_message": entry.error_message
3980            })
3981        })
3982        .collect();
3983
3984    let response = serde_json::json!({
3985        "imports": imports,
3986        "total": total
3987    });
3988
3989    Json(ApiResponse::success(response))
3990}
3991
3992pub async fn get_admin_api_state(
3993    State(_state): State<AdminState>,
3994) -> Json<ApiResponse<serde_json::Value>> {
3995    Json(ApiResponse::success(serde_json::json!({
3996        "status": "active"
3997    })))
3998}
3999
4000pub async fn get_admin_api_replay(
4001    State(_state): State<AdminState>,
4002) -> Json<ApiResponse<serde_json::Value>> {
4003    Json(ApiResponse::success(serde_json::json!({
4004        "replay": []
4005    })))
4006}
4007
4008pub async fn get_sse_status(
4009    State(_state): State<AdminState>,
4010) -> Json<ApiResponse<serde_json::Value>> {
4011    Json(ApiResponse::success(serde_json::json!({
4012        "available": true,
4013        "endpoint": "/sse",
4014        "config": {
4015            "event_type": "status",
4016            "interval_ms": 1000,
4017            "data_template": "{}"
4018        }
4019    })))
4020}
4021
4022pub async fn get_sse_connections(
4023    State(_state): State<AdminState>,
4024) -> Json<ApiResponse<serde_json::Value>> {
4025    Json(ApiResponse::success(serde_json::json!({
4026        "active_connections": 0
4027    })))
4028}
4029
4030// Workspace management functions
4031pub async fn get_workspaces(
4032    State(_state): State<AdminState>,
4033) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4034    Json(ApiResponse::success(vec![]))
4035}
4036
4037pub async fn create_workspace(
4038    State(_state): State<AdminState>,
4039    Json(_request): Json<serde_json::Value>,
4040) -> Json<ApiResponse<String>> {
4041    Json(ApiResponse::success("Workspace created".to_string()))
4042}
4043
4044pub async fn open_workspace_from_directory(
4045    State(_state): State<AdminState>,
4046    Json(_request): Json<serde_json::Value>,
4047) -> Json<ApiResponse<String>> {
4048    Json(ApiResponse::success("Workspace opened from directory".to_string()))
4049}
4050
4051// Reality Slider API handlers
4052
4053/// Get current reality level
4054pub async fn get_reality_level(
4055    State(state): State<AdminState>,
4056) -> Json<ApiResponse<serde_json::Value>> {
4057    let engine = state.reality_engine.read().await;
4058    let level = engine.get_level().await;
4059    let config = engine.get_config().await;
4060
4061    Json(ApiResponse::success(serde_json::json!({
4062        "level": level.value(),
4063        "level_name": level.name(),
4064        "description": level.description(),
4065        "chaos": {
4066            "enabled": config.chaos.enabled,
4067            "error_rate": config.chaos.error_rate,
4068            "delay_rate": config.chaos.delay_rate,
4069        },
4070        "latency": {
4071            "base_ms": config.latency.base_ms,
4072            "jitter_ms": config.latency.jitter_ms,
4073        },
4074        "mockai": {
4075            "enabled": config.mockai.enabled,
4076        },
4077    })))
4078}
4079
4080/// Set reality level
4081#[derive(Deserialize)]
4082pub struct SetRealityLevelRequest {
4083    level: u8,
4084}
4085
4086pub async fn set_reality_level(
4087    State(state): State<AdminState>,
4088    Json(request): Json<SetRealityLevelRequest>,
4089) -> Json<ApiResponse<serde_json::Value>> {
4090    let level = match mockforge_core::RealityLevel::from_value(request.level) {
4091        Some(l) => l,
4092        None => {
4093            return Json(ApiResponse::error(format!(
4094                "Invalid reality level: {}. Must be between 1 and 5.",
4095                request.level
4096            )));
4097        }
4098    };
4099
4100    // Update reality engine
4101    let engine = state.reality_engine.write().await;
4102    engine.set_level(level).await;
4103    let config = engine.get_config().await;
4104    drop(engine); // Release lock early
4105
4106    // Apply hot-reload updates to subsystems
4107    let mut update_errors = Vec::new();
4108
4109    // Update chaos config if available
4110    if let Some(ref chaos_api_state) = state.chaos_api_state {
4111        let mut chaos_config = chaos_api_state.config.write().await;
4112
4113        // Convert reality config to chaos config using helper function
4114        // This ensures proper mapping of all fields
4115        use mockforge_chaos::config::{FaultInjectionConfig, LatencyConfig};
4116
4117        let latency_config = if config.latency.base_ms > 0 {
4118            Some(LatencyConfig {
4119                enabled: true,
4120                fixed_delay_ms: Some(config.latency.base_ms),
4121                random_delay_range_ms: config
4122                    .latency
4123                    .max_ms
4124                    .map(|max| (config.latency.min_ms, max)),
4125                jitter_percent: if config.latency.jitter_ms > 0 {
4126                    (config.latency.jitter_ms as f64 / config.latency.base_ms as f64).min(1.0)
4127                } else {
4128                    0.0
4129                },
4130                probability: 1.0,
4131            })
4132        } else {
4133            None
4134        };
4135
4136        let fault_injection_config = if config.chaos.enabled {
4137            Some(FaultInjectionConfig {
4138                enabled: true,
4139                http_errors: config.chaos.status_codes.clone(),
4140                http_error_probability: config.chaos.error_rate,
4141                connection_errors: false,
4142                connection_error_probability: 0.0,
4143                timeout_errors: config.chaos.inject_timeouts,
4144                timeout_ms: config.chaos.timeout_ms,
4145                timeout_probability: if config.chaos.inject_timeouts {
4146                    config.chaos.error_rate
4147                } else {
4148                    0.0
4149                },
4150                partial_responses: false,
4151                partial_response_probability: 0.0,
4152                payload_corruption: false,
4153                payload_corruption_probability: 0.0,
4154                corruption_type: mockforge_chaos::config::CorruptionType::None,
4155                error_pattern: Some(mockforge_chaos::config::ErrorPattern::Random {
4156                    probability: config.chaos.error_rate,
4157                }),
4158                mockai_enabled: false,
4159            })
4160        } else {
4161            None
4162        };
4163
4164        // Update chaos config from converted config
4165        chaos_config.enabled = config.chaos.enabled;
4166        chaos_config.latency = latency_config;
4167        chaos_config.fault_injection = fault_injection_config;
4168
4169        drop(chaos_config);
4170        tracing::info!("✅ Updated chaos config for reality level {}", level.value());
4171
4172        // Update middleware injectors if middleware is accessible
4173        // Note: The middleware reads from shared config, so injectors will be updated
4174        // on next request, but we can also trigger an update if middleware is stored
4175        // For now, the middleware reads config directly, so this is sufficient
4176    }
4177
4178    // Update latency injector if available
4179    if let Some(ref latency_injector) = state.latency_injector {
4180        match mockforge_core::latency::LatencyInjector::update_profile_async(
4181            latency_injector,
4182            config.latency.clone(),
4183        )
4184        .await
4185        {
4186            Ok(_) => {
4187                tracing::info!("✅ Updated latency injector for reality level {}", level.value());
4188            }
4189            Err(e) => {
4190                let error_msg = format!("Failed to update latency injector: {}", e);
4191                tracing::warn!("{}", error_msg);
4192                update_errors.push(error_msg);
4193            }
4194        }
4195    }
4196
4197    // Update MockAI if available
4198    if let Some(ref mockai) = state.mockai {
4199        match mockforge_core::intelligent_behavior::MockAI::update_config_async(
4200            mockai,
4201            config.mockai.clone(),
4202        )
4203        .await
4204        {
4205            Ok(_) => {
4206                tracing::info!("✅ Updated MockAI config for reality level {}", level.value());
4207            }
4208            Err(e) => {
4209                let error_msg = format!("Failed to update MockAI: {}", e);
4210                tracing::warn!("{}", error_msg);
4211                update_errors.push(error_msg);
4212            }
4213        }
4214    }
4215
4216    // Build response
4217    let mut response = serde_json::json!({
4218        "level": level.value(),
4219        "level_name": level.name(),
4220        "description": level.description(),
4221        "chaos": {
4222            "enabled": config.chaos.enabled,
4223            "error_rate": config.chaos.error_rate,
4224            "delay_rate": config.chaos.delay_rate,
4225        },
4226        "latency": {
4227            "base_ms": config.latency.base_ms,
4228            "jitter_ms": config.latency.jitter_ms,
4229        },
4230        "mockai": {
4231            "enabled": config.mockai.enabled,
4232        },
4233    });
4234
4235    // Add warnings if any updates failed
4236    if !update_errors.is_empty() {
4237        response["warnings"] = serde_json::json!(update_errors);
4238        tracing::warn!(
4239            "Reality level updated to {} but some subsystems failed to update: {:?}",
4240            level.value(),
4241            update_errors
4242        );
4243    } else {
4244        tracing::info!(
4245            "✅ Reality level successfully updated to {} (hot-reload applied)",
4246            level.value()
4247        );
4248    }
4249
4250    Json(ApiResponse::success(response))
4251}
4252
4253/// List all available reality presets
4254pub async fn list_reality_presets(
4255    State(state): State<AdminState>,
4256) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4257    let persistence = &state.workspace_persistence;
4258    match persistence.list_reality_presets().await {
4259        Ok(preset_paths) => {
4260            let presets: Vec<serde_json::Value> = preset_paths
4261                .iter()
4262                .map(|path| {
4263                    serde_json::json!({
4264                        "id": path.file_name().and_then(|n| n.to_str()).unwrap_or("unknown"),
4265                        "path": path.to_string_lossy(),
4266                        "name": path.file_stem().and_then(|n| n.to_str()).unwrap_or("unknown"),
4267                    })
4268                })
4269                .collect();
4270            Json(ApiResponse::success(presets))
4271        }
4272        Err(e) => Json(ApiResponse::error(format!("Failed to list presets: {}", e))),
4273    }
4274}
4275
4276/// Import a reality preset
4277#[derive(Deserialize)]
4278pub struct ImportPresetRequest {
4279    path: String,
4280}
4281
4282pub async fn import_reality_preset(
4283    State(state): State<AdminState>,
4284    Json(request): Json<ImportPresetRequest>,
4285) -> Json<ApiResponse<serde_json::Value>> {
4286    let persistence = &state.workspace_persistence;
4287    let path = std::path::Path::new(&request.path);
4288
4289    match persistence.import_reality_preset(path).await {
4290        Ok(preset) => {
4291            // Apply the preset to the reality engine
4292            let engine = state.reality_engine.write().await;
4293            engine.apply_preset(preset.clone()).await;
4294
4295            Json(ApiResponse::success(serde_json::json!({
4296                "name": preset.name,
4297                "description": preset.description,
4298                "level": preset.config.level.value(),
4299                "level_name": preset.config.level.name(),
4300            })))
4301        }
4302        Err(e) => Json(ApiResponse::error(format!("Failed to import preset: {}", e))),
4303    }
4304}
4305
4306/// Export current reality configuration as a preset
4307#[derive(Deserialize)]
4308pub struct ExportPresetRequest {
4309    name: String,
4310    description: Option<String>,
4311}
4312
4313pub async fn export_reality_preset(
4314    State(state): State<AdminState>,
4315    Json(request): Json<ExportPresetRequest>,
4316) -> Json<ApiResponse<serde_json::Value>> {
4317    let engine = state.reality_engine.read().await;
4318    let preset = engine.create_preset(request.name.clone(), request.description.clone()).await;
4319
4320    let persistence = &state.workspace_persistence;
4321    let presets_dir = persistence.presets_dir();
4322    let filename = format!("{}.json", request.name.replace(' ', "_").to_lowercase());
4323    let output_path = presets_dir.join(&filename);
4324
4325    match persistence.export_reality_preset(&preset, &output_path).await {
4326        Ok(_) => Json(ApiResponse::success(serde_json::json!({
4327            "name": preset.name,
4328            "description": preset.description,
4329            "path": output_path.to_string_lossy(),
4330            "level": preset.config.level.value(),
4331        }))),
4332        Err(e) => Json(ApiResponse::error(format!("Failed to export preset: {}", e))),
4333    }
4334}
4335
4336// Reality Continuum API handlers
4337
4338/// Get current blend ratio for a path
4339pub async fn get_continuum_ratio(
4340    State(state): State<AdminState>,
4341    Query(params): Query<HashMap<String, String>>,
4342) -> Json<ApiResponse<serde_json::Value>> {
4343    let path = params.get("path").cloned().unwrap_or_else(|| "/".to_string());
4344    let engine = state.continuum_engine.read().await;
4345    let ratio = engine.get_blend_ratio(&path).await;
4346    let config = engine.get_config().await;
4347    let enabled = engine.is_enabled().await;
4348
4349    Json(ApiResponse::success(serde_json::json!({
4350        "path": path,
4351        "blend_ratio": ratio,
4352        "enabled": enabled,
4353        "transition_mode": format!("{:?}", config.transition_mode),
4354        "merge_strategy": format!("{:?}", config.merge_strategy),
4355        "default_ratio": config.default_ratio,
4356    })))
4357}
4358
4359/// Set blend ratio for a path
4360#[derive(Deserialize)]
4361pub struct SetContinuumRatioRequest {
4362    path: String,
4363    ratio: f64,
4364}
4365
4366pub async fn set_continuum_ratio(
4367    State(state): State<AdminState>,
4368    Json(request): Json<SetContinuumRatioRequest>,
4369) -> Json<ApiResponse<serde_json::Value>> {
4370    let ratio = request.ratio.clamp(0.0, 1.0);
4371    let engine = state.continuum_engine.read().await;
4372    engine.set_blend_ratio(&request.path, ratio).await;
4373
4374    Json(ApiResponse::success(serde_json::json!({
4375        "path": request.path,
4376        "blend_ratio": ratio,
4377    })))
4378}
4379
4380/// Get time schedule
4381pub async fn get_continuum_schedule(
4382    State(state): State<AdminState>,
4383) -> Json<ApiResponse<serde_json::Value>> {
4384    let engine = state.continuum_engine.read().await;
4385    let schedule = engine.get_time_schedule().await;
4386
4387    match schedule {
4388        Some(s) => Json(ApiResponse::success(serde_json::json!({
4389            "start_time": s.start_time.to_rfc3339(),
4390            "end_time": s.end_time.to_rfc3339(),
4391            "start_ratio": s.start_ratio,
4392            "end_ratio": s.end_ratio,
4393            "curve": format!("{:?}", s.curve),
4394            "duration_days": s.duration().num_days(),
4395        }))),
4396        None => Json(ApiResponse::success(serde_json::json!(null))),
4397    }
4398}
4399
4400/// Update time schedule
4401#[derive(Deserialize)]
4402pub struct SetContinuumScheduleRequest {
4403    start_time: String,
4404    end_time: String,
4405    start_ratio: f64,
4406    end_ratio: f64,
4407    curve: Option<String>,
4408}
4409
4410pub async fn set_continuum_schedule(
4411    State(state): State<AdminState>,
4412    Json(request): Json<SetContinuumScheduleRequest>,
4413) -> Json<ApiResponse<serde_json::Value>> {
4414    let start_time = chrono::DateTime::parse_from_rfc3339(&request.start_time)
4415        .map_err(|e| format!("Invalid start_time: {}", e))
4416        .map(|dt| dt.with_timezone(&Utc));
4417
4418    let end_time = chrono::DateTime::parse_from_rfc3339(&request.end_time)
4419        .map_err(|e| format!("Invalid end_time: {}", e))
4420        .map(|dt| dt.with_timezone(&Utc));
4421
4422    match (start_time, end_time) {
4423        (Ok(start), Ok(end)) => {
4424            let curve = request
4425                .curve
4426                .as_deref()
4427                .map(|c| match c {
4428                    "linear" => mockforge_core::TransitionCurve::Linear,
4429                    "exponential" => mockforge_core::TransitionCurve::Exponential,
4430                    "sigmoid" => mockforge_core::TransitionCurve::Sigmoid,
4431                    _ => mockforge_core::TransitionCurve::Linear,
4432                })
4433                .unwrap_or(mockforge_core::TransitionCurve::Linear);
4434
4435            let schedule = mockforge_core::TimeSchedule::with_curve(
4436                start,
4437                end,
4438                request.start_ratio.clamp(0.0, 1.0),
4439                request.end_ratio.clamp(0.0, 1.0),
4440                curve,
4441            );
4442
4443            let engine = state.continuum_engine.read().await;
4444            engine.set_time_schedule(schedule.clone()).await;
4445
4446            Json(ApiResponse::success(serde_json::json!({
4447                "start_time": schedule.start_time.to_rfc3339(),
4448                "end_time": schedule.end_time.to_rfc3339(),
4449                "start_ratio": schedule.start_ratio,
4450                "end_ratio": schedule.end_ratio,
4451                "curve": format!("{:?}", schedule.curve),
4452            })))
4453        }
4454        (Err(e), _) | (_, Err(e)) => Json(ApiResponse::error(e)),
4455    }
4456}
4457
4458/// Manually advance blend ratio
4459#[derive(Deserialize)]
4460pub struct AdvanceContinuumRatioRequest {
4461    increment: Option<f64>,
4462}
4463
4464pub async fn advance_continuum_ratio(
4465    State(state): State<AdminState>,
4466    Json(request): Json<AdvanceContinuumRatioRequest>,
4467) -> Json<ApiResponse<serde_json::Value>> {
4468    let increment = request.increment.unwrap_or(0.1);
4469    let engine = state.continuum_engine.read().await;
4470    engine.advance_ratio(increment).await;
4471    let config = engine.get_config().await;
4472
4473    Json(ApiResponse::success(serde_json::json!({
4474        "default_ratio": config.default_ratio,
4475        "increment": increment,
4476    })))
4477}
4478
4479/// Enable or disable continuum
4480#[derive(Deserialize)]
4481pub struct SetContinuumEnabledRequest {
4482    enabled: bool,
4483}
4484
4485pub async fn set_continuum_enabled(
4486    State(state): State<AdminState>,
4487    Json(request): Json<SetContinuumEnabledRequest>,
4488) -> Json<ApiResponse<serde_json::Value>> {
4489    let engine = state.continuum_engine.read().await;
4490    engine.set_enabled(request.enabled).await;
4491
4492    Json(ApiResponse::success(serde_json::json!({
4493        "enabled": request.enabled,
4494    })))
4495}
4496
4497/// Get all manual overrides
4498pub async fn get_continuum_overrides(
4499    State(state): State<AdminState>,
4500) -> Json<ApiResponse<serde_json::Value>> {
4501    let engine = state.continuum_engine.read().await;
4502    let overrides = engine.get_manual_overrides().await;
4503
4504    Json(ApiResponse::success(serde_json::json!(overrides)))
4505}
4506
4507/// Clear all manual overrides
4508pub async fn clear_continuum_overrides(
4509    State(state): State<AdminState>,
4510) -> Json<ApiResponse<serde_json::Value>> {
4511    let engine = state.continuum_engine.read().await;
4512    engine.clear_manual_overrides().await;
4513
4514    Json(ApiResponse::success(serde_json::json!({
4515        "message": "All manual overrides cleared",
4516    })))
4517}
4518
4519pub async fn get_workspace(
4520    State(_state): State<AdminState>,
4521    Path(workspace_id): Path<String>,
4522) -> Json<ApiResponse<serde_json::Value>> {
4523    Json(ApiResponse::success(serde_json::json!({
4524        "workspace": {
4525            "summary": {
4526                "id": workspace_id,
4527                "name": "Mock Workspace",
4528                "description": "A mock workspace"
4529            },
4530            "folders": [],
4531            "requests": []
4532        }
4533    })))
4534}
4535
4536pub async fn delete_workspace(
4537    State(_state): State<AdminState>,
4538    Path(_workspace_id): Path<String>,
4539) -> Json<ApiResponse<String>> {
4540    Json(ApiResponse::success("Workspace deleted".to_string()))
4541}
4542
4543pub async fn set_active_workspace(
4544    State(_state): State<AdminState>,
4545    Path(_workspace_id): Path<String>,
4546) -> Json<ApiResponse<String>> {
4547    Json(ApiResponse::success("Workspace activated".to_string()))
4548}
4549
4550pub async fn create_folder(
4551    State(_state): State<AdminState>,
4552    Path(_workspace_id): Path<String>,
4553    Json(_request): Json<serde_json::Value>,
4554) -> Json<ApiResponse<String>> {
4555    Json(ApiResponse::success("Folder created".to_string()))
4556}
4557
4558pub async fn create_request(
4559    State(_state): State<AdminState>,
4560    Path(_workspace_id): Path<String>,
4561    Json(_request): Json<serde_json::Value>,
4562) -> Json<ApiResponse<String>> {
4563    Json(ApiResponse::success("Request created".to_string()))
4564}
4565
4566pub async fn execute_workspace_request(
4567    State(_state): State<AdminState>,
4568    Path((_workspace_id, _request_id)): Path<(String, String)>,
4569    Json(_request): Json<serde_json::Value>,
4570) -> Json<ApiResponse<serde_json::Value>> {
4571    Json(ApiResponse::success(serde_json::json!({
4572        "status": "executed",
4573        "response": {}
4574    })))
4575}
4576
4577pub async fn get_request_history(
4578    State(_state): State<AdminState>,
4579    Path((_workspace_id, _request_id)): Path<(String, String)>,
4580) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4581    Json(ApiResponse::success(vec![]))
4582}
4583
4584pub async fn get_folder(
4585    State(_state): State<AdminState>,
4586    Path((_workspace_id, folder_id)): Path<(String, String)>,
4587) -> Json<ApiResponse<serde_json::Value>> {
4588    Json(ApiResponse::success(serde_json::json!({
4589        "folder": {
4590            "summary": {
4591                "id": folder_id,
4592                "name": "Mock Folder",
4593                "description": "A mock folder"
4594            },
4595            "requests": []
4596        }
4597    })))
4598}
4599
4600pub async fn import_to_workspace(
4601    State(_state): State<AdminState>,
4602    Path(_workspace_id): Path<String>,
4603    Json(_request): Json<serde_json::Value>,
4604) -> Json<ApiResponse<String>> {
4605    Json(ApiResponse::success("Import to workspace completed".to_string()))
4606}
4607
4608pub async fn export_workspaces(
4609    State(_state): State<AdminState>,
4610    Json(_request): Json<serde_json::Value>,
4611) -> Json<ApiResponse<String>> {
4612    Json(ApiResponse::success("Workspaces exported".to_string()))
4613}
4614
4615// Environment management functions
4616pub async fn get_environments(
4617    State(_state): State<AdminState>,
4618    Path(_workspace_id): Path<String>,
4619) -> Json<ApiResponse<serde_json::Value>> {
4620    // Return a default global environment
4621    let environments = vec![serde_json::json!({
4622        "id": "global",
4623        "name": "Global",
4624        "description": "Global environment variables",
4625        "variable_count": 0,
4626        "is_global": true,
4627        "active": true,
4628        "order": 0
4629    })];
4630
4631    Json(ApiResponse::success(serde_json::json!({
4632        "environments": environments,
4633        "total": 1
4634    })))
4635}
4636
4637pub async fn create_environment(
4638    State(_state): State<AdminState>,
4639    Path(_workspace_id): Path<String>,
4640    Json(_request): Json<serde_json::Value>,
4641) -> Json<ApiResponse<String>> {
4642    Json(ApiResponse::success("Environment created".to_string()))
4643}
4644
4645pub async fn update_environment(
4646    State(_state): State<AdminState>,
4647    Path((_workspace_id, _environment_id)): Path<(String, String)>,
4648    Json(_request): Json<serde_json::Value>,
4649) -> Json<ApiResponse<String>> {
4650    Json(ApiResponse::success("Environment updated".to_string()))
4651}
4652
4653pub async fn delete_environment(
4654    State(_state): State<AdminState>,
4655    Path((_workspace_id, _environment_id)): Path<(String, String)>,
4656) -> Json<ApiResponse<String>> {
4657    Json(ApiResponse::success("Environment deleted".to_string()))
4658}
4659
4660pub async fn set_active_environment(
4661    State(_state): State<AdminState>,
4662    Path((_workspace_id, _environment_id)): Path<(String, String)>,
4663) -> Json<ApiResponse<String>> {
4664    Json(ApiResponse::success("Environment activated".to_string()))
4665}
4666
4667pub async fn update_environments_order(
4668    State(_state): State<AdminState>,
4669    Path(_workspace_id): Path<String>,
4670    Json(_request): Json<serde_json::Value>,
4671) -> Json<ApiResponse<String>> {
4672    Json(ApiResponse::success("Environment order updated".to_string()))
4673}
4674
4675pub async fn get_environment_variables(
4676    State(_state): State<AdminState>,
4677    Path((_workspace_id, _environment_id)): Path<(String, String)>,
4678) -> Json<ApiResponse<serde_json::Value>> {
4679    Json(ApiResponse::success(serde_json::json!({
4680        "variables": []
4681    })))
4682}
4683
4684pub async fn set_environment_variable(
4685    State(_state): State<AdminState>,
4686    Path((_workspace_id, _environment_id)): Path<(String, String)>,
4687    Json(_request): Json<serde_json::Value>,
4688) -> Json<ApiResponse<String>> {
4689    Json(ApiResponse::success("Environment variable set".to_string()))
4690}
4691
4692pub async fn remove_environment_variable(
4693    State(_state): State<AdminState>,
4694    Path((_workspace_id, _environment_id, _variable_name)): Path<(String, String, String)>,
4695) -> Json<ApiResponse<String>> {
4696    Json(ApiResponse::success("Environment variable removed".to_string()))
4697}
4698
4699// Autocomplete functions
4700pub async fn get_autocomplete_suggestions(
4701    State(_state): State<AdminState>,
4702    Path(_workspace_id): Path<String>,
4703    Json(_request): Json<serde_json::Value>,
4704) -> Json<ApiResponse<serde_json::Value>> {
4705    Json(ApiResponse::success(serde_json::json!({
4706        "suggestions": [],
4707        "start_position": 0,
4708        "end_position": 0
4709    })))
4710}
4711
4712// Sync management functions
4713pub async fn get_sync_status(
4714    State(_state): State<AdminState>,
4715    Path(_workspace_id): Path<String>,
4716) -> Json<ApiResponse<serde_json::Value>> {
4717    Json(ApiResponse::success(serde_json::json!({
4718        "status": "disabled"
4719    })))
4720}
4721
4722pub async fn configure_sync(
4723    State(_state): State<AdminState>,
4724    Path(_workspace_id): Path<String>,
4725    Json(_request): Json<serde_json::Value>,
4726) -> Json<ApiResponse<String>> {
4727    Json(ApiResponse::success("Sync configured".to_string()))
4728}
4729
4730pub async fn disable_sync(
4731    State(_state): State<AdminState>,
4732    Path(_workspace_id): Path<String>,
4733) -> Json<ApiResponse<String>> {
4734    Json(ApiResponse::success("Sync disabled".to_string()))
4735}
4736
4737pub async fn trigger_sync(
4738    State(_state): State<AdminState>,
4739    Path(_workspace_id): Path<String>,
4740) -> Json<ApiResponse<String>> {
4741    Json(ApiResponse::success("Sync triggered".to_string()))
4742}
4743
4744pub async fn get_sync_changes(
4745    State(_state): State<AdminState>,
4746    Path(_workspace_id): Path<String>,
4747) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4748    Json(ApiResponse::success(vec![]))
4749}
4750
4751pub async fn confirm_sync_changes(
4752    State(_state): State<AdminState>,
4753    Path(_workspace_id): Path<String>,
4754    Json(_request): Json<serde_json::Value>,
4755) -> Json<ApiResponse<String>> {
4756    Json(ApiResponse::success("Sync changes confirmed".to_string()))
4757}
4758
4759// Missing functions that routes.rs expects
4760pub async fn clear_import_history(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
4761    let mut history = state.import_history.write().await;
4762    history.clear();
4763    Json(ApiResponse::success("Import history cleared".to_string()))
4764}
4765
4766#[cfg(test)]
4767mod tests {
4768    use super::*;
4769
4770    #[test]
4771    fn test_request_metrics_creation() {
4772        use std::collections::HashMap;
4773
4774        let metrics = RequestMetrics {
4775            total_requests: 100,
4776            active_connections: 5,
4777            requests_by_endpoint: HashMap::new(),
4778            response_times: vec![10, 20, 30],
4779            response_times_by_endpoint: HashMap::new(),
4780            errors_by_endpoint: HashMap::new(),
4781            last_request_by_endpoint: HashMap::new(),
4782        };
4783
4784        assert_eq!(metrics.total_requests, 100);
4785        assert_eq!(metrics.active_connections, 5);
4786        assert_eq!(metrics.response_times.len(), 3);
4787    }
4788
4789    #[test]
4790    fn test_system_metrics_creation() {
4791        let metrics = SystemMetrics {
4792            cpu_usage_percent: 45.5,
4793            memory_usage_mb: 100,
4794            active_threads: 10,
4795        };
4796
4797        assert_eq!(metrics.active_threads, 10);
4798        assert!(metrics.cpu_usage_percent > 0.0);
4799        assert_eq!(metrics.memory_usage_mb, 100);
4800    }
4801
4802    #[test]
4803    fn test_time_series_point() {
4804        let point = TimeSeriesPoint {
4805            timestamp: Utc::now(),
4806            value: 42.5,
4807        };
4808
4809        assert_eq!(point.value, 42.5);
4810    }
4811
4812    #[test]
4813    fn test_restart_status() {
4814        let status = RestartStatus {
4815            in_progress: true,
4816            initiated_at: Some(Utc::now()),
4817            reason: Some("Manual restart".to_string()),
4818            success: None,
4819        };
4820
4821        assert!(status.in_progress);
4822        assert!(status.reason.is_some());
4823    }
4824
4825    #[test]
4826    fn test_configuration_state() {
4827        use std::collections::HashMap;
4828
4829        let state = ConfigurationState {
4830            latency_profile: LatencyProfile {
4831                name: "default".to_string(),
4832                base_ms: 100,
4833                jitter_ms: 10,
4834                tag_overrides: HashMap::new(),
4835            },
4836            fault_config: FaultConfig {
4837                enabled: false,
4838                failure_rate: 0.0,
4839                status_codes: vec![],
4840                active_failures: 0,
4841            },
4842            proxy_config: ProxyConfig {
4843                enabled: false,
4844                upstream_url: None,
4845                timeout_seconds: 30,
4846                requests_proxied: 0,
4847            },
4848            validation_settings: ValidationSettings {
4849                mode: "off".to_string(),
4850                aggregate_errors: false,
4851                validate_responses: false,
4852                overrides: HashMap::new(),
4853            },
4854            traffic_shaping: TrafficShapingConfig {
4855                enabled: false,
4856                bandwidth: crate::models::BandwidthConfig {
4857                    enabled: false,
4858                    max_bytes_per_sec: 1_048_576,
4859                    burst_capacity_bytes: 10_485_760,
4860                    tag_overrides: HashMap::new(),
4861                },
4862                burst_loss: crate::models::BurstLossConfig {
4863                    enabled: false,
4864                    burst_probability: 0.1,
4865                    burst_duration_ms: 5000,
4866                    loss_rate_during_burst: 0.5,
4867                    recovery_time_ms: 30000,
4868                    tag_overrides: HashMap::new(),
4869                },
4870            },
4871        };
4872
4873        assert_eq!(state.latency_profile.name, "default");
4874        assert!(!state.fault_config.enabled);
4875        assert!(!state.proxy_config.enabled);
4876    }
4877
4878    #[test]
4879    fn test_admin_state_new() {
4880        let http_addr: std::net::SocketAddr = "127.0.0.1:3000".parse().unwrap();
4881        let state = AdminState::new(
4882            Some(http_addr),
4883            None,
4884            None,
4885            None,
4886            true,
4887            8080,
4888            None,
4889            None,
4890            None,
4891            None,
4892            None,
4893        );
4894
4895        assert_eq!(state.http_server_addr, Some(http_addr));
4896        assert!(state.api_enabled);
4897        assert_eq!(state.admin_port, 8080);
4898    }
4899}