1use axum::{
13 extract::{Path, Query, State},
14 http::{self, StatusCode},
15 response::{
16 sse::{Event, Sse},
17 Html, IntoResponse, Json,
18 },
19};
20use chrono::Utc;
21use futures_util::stream::{self, Stream};
22use mockforge_core::{Error, Result};
23use mockforge_plugin_loader::PluginRegistry;
24use serde::{Deserialize, Serialize};
25use serde_json::json;
26use std::collections::HashMap;
27use std::convert::Infallible;
28use std::process::Command;
29use std::process::Stdio;
30use std::sync::Arc;
31use std::time::Duration;
32use sysinfo::System;
33use tokio::sync::RwLock;
34
35use crate::models::{
37 ApiResponse, ConfigUpdate, DashboardData, DashboardSystemInfo, FaultConfig, HealthCheck,
38 LatencyProfile, LogFilter, MetricsData, ProxyConfig, RequestLog, RouteInfo, ServerInfo,
39 ServerStatus, SimpleMetricsData, SystemInfo, ValidationSettings, ValidationUpdate,
40};
41
42use mockforge_core::workspace_import::{ImportResponse, ImportRoute};
44
45pub mod admin;
47pub mod ai_studio;
48pub mod analytics;
49pub mod analytics_stream;
50pub mod analytics_v2;
51pub mod assets;
52pub mod behavioral_cloning;
53pub mod chains;
54pub mod community;
55pub mod contract_diff;
56pub mod failure_analysis;
57pub mod graph;
58pub mod health;
59pub mod migration;
60pub mod playground;
61pub mod plugin;
62pub mod verification;
63pub mod voice;
64pub mod workspaces;
65
66pub use assets::*;
68pub use chains::*;
69pub use graph::*;
70pub use migration::*;
71pub use plugin::*;
72
73use mockforge_core::workspace_import::WorkspaceImportConfig;
75use mockforge_core::workspace_persistence::WorkspacePersistence;
76
77#[derive(Debug, Clone, Default)]
79pub struct RequestMetrics {
80 pub total_requests: u64,
82 pub active_connections: u64,
84 pub requests_by_endpoint: HashMap<String, u64>,
86 pub response_times: Vec<u64>,
88 pub response_times_by_endpoint: HashMap<String, Vec<u64>>,
90 pub errors_by_endpoint: HashMap<String, u64>,
92 pub last_request_by_endpoint: HashMap<String, chrono::DateTime<chrono::Utc>>,
94}
95
96#[derive(Debug, Clone)]
98pub struct SystemMetrics {
99 pub memory_usage_mb: u64,
101 pub cpu_usage_percent: f64,
103 pub active_threads: u32,
105}
106
107#[derive(Debug, Clone)]
109pub struct TimeSeriesPoint {
110 pub timestamp: chrono::DateTime<chrono::Utc>,
112 pub value: f64,
114}
115
116#[derive(Debug, Clone, Default)]
118pub struct TimeSeriesData {
119 pub memory_usage: Vec<TimeSeriesPoint>,
121 pub cpu_usage: Vec<TimeSeriesPoint>,
123 pub request_count: Vec<TimeSeriesPoint>,
125 pub response_time: Vec<TimeSeriesPoint>,
127}
128
129#[derive(Debug, Clone, Serialize, Deserialize)]
131pub struct RestartStatus {
132 pub in_progress: bool,
134 pub initiated_at: Option<chrono::DateTime<chrono::Utc>>,
136 pub reason: Option<String>,
138 pub success: Option<bool>,
140}
141
142#[derive(Debug, Clone, Serialize, Deserialize)]
144pub struct FixtureInfo {
145 pub id: String,
147 pub protocol: String,
149 pub method: String,
151 pub path: String,
153 pub saved_at: chrono::DateTime<chrono::Utc>,
155 pub file_size: u64,
157 pub file_path: String,
159 pub fingerprint: String,
161 pub metadata: serde_json::Value,
163}
164
165#[derive(Debug, Clone, Serialize, Deserialize)]
167pub struct SmokeTestResult {
168 pub id: String,
170 pub name: String,
172 pub method: String,
174 pub path: String,
176 pub description: String,
178 pub last_run: Option<chrono::DateTime<chrono::Utc>>,
180 pub status: String,
182 pub response_time_ms: Option<u64>,
184 pub error_message: Option<String>,
186 pub status_code: Option<u16>,
188 pub duration_seconds: Option<f64>,
190}
191
192#[derive(Debug, Clone)]
194pub struct SmokeTestContext {
195 pub base_url: String,
197 pub timeout_seconds: u64,
199 pub parallel: bool,
201}
202
203#[derive(Debug, Clone, Serialize)]
205pub struct ConfigurationState {
206 pub latency_profile: LatencyProfile,
208 pub fault_config: FaultConfig,
210 pub proxy_config: ProxyConfig,
212 pub validation_settings: ValidationSettings,
214}
215
216#[derive(Debug, Clone, Serialize, Deserialize)]
218pub struct ImportHistoryEntry {
219 pub id: String,
221 pub format: String,
223 pub timestamp: chrono::DateTime<chrono::Utc>,
225 pub routes_count: usize,
227 pub variables_count: usize,
229 pub warnings_count: usize,
231 pub success: bool,
233 pub filename: Option<String>,
235 pub environment: Option<String>,
237 pub base_url: Option<String>,
239 pub error_message: Option<String>,
241}
242
243#[derive(Clone)]
245pub struct AdminState {
246 pub http_server_addr: Option<std::net::SocketAddr>,
248 pub ws_server_addr: Option<std::net::SocketAddr>,
250 pub grpc_server_addr: Option<std::net::SocketAddr>,
252 pub graphql_server_addr: Option<std::net::SocketAddr>,
254 pub api_enabled: bool,
256 pub admin_port: u16,
258 pub start_time: chrono::DateTime<chrono::Utc>,
260 pub metrics: Arc<RwLock<RequestMetrics>>,
262 pub system_metrics: Arc<RwLock<SystemMetrics>>,
264 pub config: Arc<RwLock<ConfigurationState>>,
266 pub logs: Arc<RwLock<Vec<RequestLog>>>,
268 pub time_series: Arc<RwLock<TimeSeriesData>>,
270 pub restart_status: Arc<RwLock<RestartStatus>>,
272 pub smoke_test_results: Arc<RwLock<Vec<SmokeTestResult>>>,
274 pub import_history: Arc<RwLock<Vec<ImportHistoryEntry>>>,
276 pub workspace_persistence: Arc<WorkspacePersistence>,
278 pub plugin_registry: Arc<RwLock<PluginRegistry>>,
280 pub reality_engine: Arc<RwLock<mockforge_core::RealityEngine>>,
282 pub continuum_engine: Arc<RwLock<mockforge_core::RealityContinuumEngine>>,
284 pub chaos_api_state: Option<std::sync::Arc<mockforge_chaos::api::ChaosApiState>>,
287 pub latency_injector:
290 Option<std::sync::Arc<tokio::sync::RwLock<mockforge_core::latency::LatencyInjector>>>,
291 pub mockai:
294 Option<std::sync::Arc<tokio::sync::RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
295}
296
297impl AdminState {
298 pub async fn start_system_monitoring(&self) {
300 let state_clone = self.clone();
301 tokio::spawn(async move {
302 let mut sys = System::new_all();
303 let mut refresh_count = 0u64;
304
305 tracing::info!("Starting system monitoring background task");
306
307 loop {
308 sys.refresh_all();
310
311 let cpu_usage = sys.global_cpu_usage();
313
314 let _total_memory = sys.total_memory() as f64;
316 let used_memory = sys.used_memory() as f64;
317 let memory_usage_mb = used_memory / 1024.0 / 1024.0;
318
319 let active_threads = sys.cpus().len() as u32;
321
322 let memory_mb_u64 = memory_usage_mb as u64;
324
325 if refresh_count.is_multiple_of(10) {
327 tracing::debug!(
328 "System metrics updated: CPU={:.1}%, Mem={}MB, Threads={}",
329 cpu_usage,
330 memory_mb_u64,
331 active_threads
332 );
333 }
334
335 state_clone
336 .update_system_metrics(memory_mb_u64, cpu_usage as f64, active_threads)
337 .await;
338
339 refresh_count += 1;
340
341 tokio::time::sleep(Duration::from_secs(10)).await;
343 }
344 });
345 }
346
347 pub fn new(
362 http_server_addr: Option<std::net::SocketAddr>,
363 ws_server_addr: Option<std::net::SocketAddr>,
364 grpc_server_addr: Option<std::net::SocketAddr>,
365 graphql_server_addr: Option<std::net::SocketAddr>,
366 api_enabled: bool,
367 admin_port: u16,
368 chaos_api_state: Option<std::sync::Arc<mockforge_chaos::api::ChaosApiState>>,
369 latency_injector: Option<
370 std::sync::Arc<tokio::sync::RwLock<mockforge_core::latency::LatencyInjector>>,
371 >,
372 mockai: Option<
373 std::sync::Arc<tokio::sync::RwLock<mockforge_core::intelligent_behavior::MockAI>>,
374 >,
375 continuum_config: Option<mockforge_core::ContinuumConfig>,
376 virtual_clock: Option<std::sync::Arc<mockforge_core::VirtualClock>>,
377 ) -> Self {
378 let start_time = chrono::Utc::now();
379
380 Self {
381 http_server_addr,
382 ws_server_addr,
383 grpc_server_addr,
384 graphql_server_addr,
385 api_enabled,
386 admin_port,
387 start_time,
388 metrics: Arc::new(RwLock::new(RequestMetrics::default())),
389 system_metrics: Arc::new(RwLock::new(SystemMetrics {
390 memory_usage_mb: 0,
391 cpu_usage_percent: 0.0,
392 active_threads: 0,
393 })),
394 config: Arc::new(RwLock::new(ConfigurationState {
395 latency_profile: LatencyProfile {
396 name: "default".to_string(),
397 base_ms: 50,
398 jitter_ms: 20,
399 tag_overrides: HashMap::new(),
400 },
401 fault_config: FaultConfig {
402 enabled: false,
403 failure_rate: 0.0,
404 status_codes: vec![500, 502, 503],
405 active_failures: 0,
406 },
407 proxy_config: ProxyConfig {
408 enabled: false,
409 upstream_url: None,
410 timeout_seconds: 30,
411 requests_proxied: 0,
412 },
413 validation_settings: ValidationSettings {
414 mode: "enforce".to_string(),
415 aggregate_errors: true,
416 validate_responses: false,
417 overrides: HashMap::new(),
418 },
419 })),
420 logs: Arc::new(RwLock::new(Vec::new())),
421 time_series: Arc::new(RwLock::new(TimeSeriesData::default())),
422 restart_status: Arc::new(RwLock::new(RestartStatus {
423 in_progress: false,
424 initiated_at: None,
425 reason: None,
426 success: None,
427 })),
428 smoke_test_results: Arc::new(RwLock::new(Vec::new())),
429 import_history: Arc::new(RwLock::new(Vec::new())),
430 workspace_persistence: Arc::new(WorkspacePersistence::new("./workspaces")),
431 plugin_registry: Arc::new(RwLock::new(PluginRegistry::new())),
432 reality_engine: Arc::new(RwLock::new(mockforge_core::RealityEngine::new())),
433 continuum_engine: Arc::new(RwLock::new({
434 let config = continuum_config.unwrap_or_default();
435 if let Some(clock) = virtual_clock {
436 mockforge_core::RealityContinuumEngine::with_virtual_clock(config, clock)
437 } else {
438 mockforge_core::RealityContinuumEngine::new(config)
439 }
440 })),
441 chaos_api_state,
442 latency_injector,
443 mockai,
444 }
445 }
446
447 pub async fn record_request(
449 &self,
450 method: &str,
451 path: &str,
452 status_code: u16,
453 response_time_ms: u64,
454 error: Option<String>,
455 ) {
456 let mut metrics = self.metrics.write().await;
457
458 metrics.total_requests += 1;
459 let endpoint = format!("{} {}", method, path);
460 *metrics.requests_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
461
462 if status_code >= 400 {
463 *metrics.errors_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
464 }
465
466 metrics.response_times.push(response_time_ms);
468 if metrics.response_times.len() > 100 {
469 metrics.response_times.remove(0);
470 }
471
472 let endpoint_times = metrics
474 .response_times_by_endpoint
475 .entry(endpoint.clone())
476 .or_insert_with(Vec::new);
477 endpoint_times.push(response_time_ms);
478 if endpoint_times.len() > 50 {
479 endpoint_times.remove(0);
480 }
481
482 metrics.last_request_by_endpoint.insert(endpoint, chrono::Utc::now());
484
485 let total_requests = metrics.total_requests;
487
488 drop(metrics);
490
491 self.update_time_series_on_request(response_time_ms, total_requests).await;
493
494 let mut logs = self.logs.write().await;
496 let log_entry = RequestLog {
497 id: format!("req_{}", total_requests),
498 timestamp: Utc::now(),
499 method: method.to_string(),
500 path: path.to_string(),
501 status_code,
502 response_time_ms,
503 client_ip: None,
504 user_agent: None,
505 headers: HashMap::new(),
506 response_size_bytes: 0,
507 error_message: error,
508 };
509
510 logs.push(log_entry);
511
512 if logs.len() > 1000 {
514 logs.remove(0);
515 }
516 }
517
518 pub async fn get_metrics(&self) -> RequestMetrics {
520 self.metrics.read().await.clone()
521 }
522
523 pub async fn update_system_metrics(&self, memory_mb: u64, cpu_percent: f64, threads: u32) {
525 let mut system_metrics = self.system_metrics.write().await;
526 system_metrics.memory_usage_mb = memory_mb;
527 system_metrics.cpu_usage_percent = cpu_percent;
528 system_metrics.active_threads = threads;
529
530 self.update_time_series_data(memory_mb as f64, cpu_percent).await;
532 }
533
534 async fn update_time_series_data(&self, memory_mb: f64, cpu_percent: f64) {
536 let now = chrono::Utc::now();
537 let mut time_series = self.time_series.write().await;
538
539 time_series.memory_usage.push(TimeSeriesPoint {
541 timestamp: now,
542 value: memory_mb,
543 });
544
545 time_series.cpu_usage.push(TimeSeriesPoint {
547 timestamp: now,
548 value: cpu_percent,
549 });
550
551 let metrics = self.metrics.read().await;
553 time_series.request_count.push(TimeSeriesPoint {
554 timestamp: now,
555 value: metrics.total_requests as f64,
556 });
557
558 let avg_response_time = if !metrics.response_times.is_empty() {
560 metrics.response_times.iter().sum::<u64>() as f64 / metrics.response_times.len() as f64
561 } else {
562 0.0
563 };
564 time_series.response_time.push(TimeSeriesPoint {
565 timestamp: now,
566 value: avg_response_time,
567 });
568
569 const MAX_POINTS: usize = 100;
571 if time_series.memory_usage.len() > MAX_POINTS {
572 time_series.memory_usage.remove(0);
573 }
574 if time_series.cpu_usage.len() > MAX_POINTS {
575 time_series.cpu_usage.remove(0);
576 }
577 if time_series.request_count.len() > MAX_POINTS {
578 time_series.request_count.remove(0);
579 }
580 if time_series.response_time.len() > MAX_POINTS {
581 time_series.response_time.remove(0);
582 }
583 }
584
585 pub async fn get_system_metrics(&self) -> SystemMetrics {
587 self.system_metrics.read().await.clone()
588 }
589
590 pub async fn get_time_series_data(&self) -> TimeSeriesData {
592 self.time_series.read().await.clone()
593 }
594
595 pub async fn get_restart_status(&self) -> RestartStatus {
597 self.restart_status.read().await.clone()
598 }
599
600 pub async fn initiate_restart(&self, reason: String) -> Result<()> {
602 let mut status = self.restart_status.write().await;
603
604 if status.in_progress {
605 return Err(Error::generic("Restart already in progress".to_string()));
606 }
607
608 status.in_progress = true;
609 status.initiated_at = Some(chrono::Utc::now());
610 status.reason = Some(reason);
611 status.success = None;
612
613 Ok(())
614 }
615
616 pub async fn complete_restart(&self, success: bool) {
618 let mut status = self.restart_status.write().await;
619 status.in_progress = false;
620 status.success = Some(success);
621 }
622
623 pub async fn get_smoke_test_results(&self) -> Vec<SmokeTestResult> {
625 self.smoke_test_results.read().await.clone()
626 }
627
628 pub async fn update_smoke_test_result(&self, result: SmokeTestResult) {
630 let mut results = self.smoke_test_results.write().await;
631
632 if let Some(existing) = results.iter_mut().find(|r| r.id == result.id) {
634 *existing = result;
635 } else {
636 results.push(result);
637 }
638
639 if results.len() > 100 {
641 results.remove(0);
642 }
643 }
644
645 pub async fn clear_smoke_test_results(&self) {
647 let mut results = self.smoke_test_results.write().await;
648 results.clear();
649 }
650
651 async fn update_time_series_on_request(&self, response_time_ms: u64, total_requests: u64) {
653 let now = chrono::Utc::now();
654 let mut time_series = self.time_series.write().await;
655
656 time_series.request_count.push(TimeSeriesPoint {
658 timestamp: now,
659 value: total_requests as f64,
660 });
661
662 time_series.response_time.push(TimeSeriesPoint {
664 timestamp: now,
665 value: response_time_ms as f64,
666 });
667
668 const MAX_POINTS: usize = 100;
670 if time_series.request_count.len() > MAX_POINTS {
671 time_series.request_count.remove(0);
672 }
673 if time_series.response_time.len() > MAX_POINTS {
674 time_series.response_time.remove(0);
675 }
676 }
677
678 pub async fn get_config(&self) -> ConfigurationState {
680 self.config.read().await.clone()
681 }
682
683 pub async fn update_latency_config(
685 &self,
686 base_ms: u64,
687 jitter_ms: u64,
688 tag_overrides: HashMap<String, u64>,
689 ) {
690 let mut config = self.config.write().await;
691 config.latency_profile.base_ms = base_ms;
692 config.latency_profile.jitter_ms = jitter_ms;
693 config.latency_profile.tag_overrides = tag_overrides;
694 }
695
696 pub async fn update_fault_config(
698 &self,
699 enabled: bool,
700 failure_rate: f64,
701 status_codes: Vec<u16>,
702 ) {
703 let mut config = self.config.write().await;
704 config.fault_config.enabled = enabled;
705 config.fault_config.failure_rate = failure_rate;
706 config.fault_config.status_codes = status_codes;
707 }
708
709 pub async fn update_proxy_config(
711 &self,
712 enabled: bool,
713 upstream_url: Option<String>,
714 timeout_seconds: u64,
715 ) {
716 let mut config = self.config.write().await;
717 config.proxy_config.enabled = enabled;
718 config.proxy_config.upstream_url = upstream_url;
719 config.proxy_config.timeout_seconds = timeout_seconds;
720 }
721
722 pub async fn update_validation_config(
724 &self,
725 mode: String,
726 aggregate_errors: bool,
727 validate_responses: bool,
728 overrides: HashMap<String, String>,
729 ) {
730 let mut config = self.config.write().await;
731 config.validation_settings.mode = mode;
732 config.validation_settings.aggregate_errors = aggregate_errors;
733 config.validation_settings.validate_responses = validate_responses;
734 config.validation_settings.overrides = overrides;
735 }
736
737 pub async fn get_logs_filtered(&self, filter: &LogFilter) -> Vec<RequestLog> {
739 let logs = self.logs.read().await;
740
741 logs.iter()
742 .rev() .filter(|log| {
744 if let Some(ref method) = filter.method {
745 if log.method != *method {
746 return false;
747 }
748 }
749 if let Some(ref path_pattern) = filter.path_pattern {
750 if !log.path.contains(path_pattern) {
751 return false;
752 }
753 }
754 if let Some(status) = filter.status_code {
755 if log.status_code != status {
756 return false;
757 }
758 }
759 true
760 })
761 .take(filter.limit.unwrap_or(100))
762 .cloned()
763 .collect()
764 }
765
766 pub async fn clear_logs(&self) {
768 let mut logs = self.logs.write().await;
769 logs.clear();
770 }
771}
772
773pub async fn serve_admin_html() -> Html<&'static str> {
775 Html(crate::get_admin_html())
776}
777
778pub async fn serve_admin_css() -> ([(http::HeaderName, &'static str); 1], &'static str) {
780 ([(http::header::CONTENT_TYPE, "text/css")], crate::get_admin_css())
781}
782
783pub async fn serve_admin_js() -> ([(http::HeaderName, &'static str); 1], &'static str) {
785 ([(http::header::CONTENT_TYPE, "application/javascript")], crate::get_admin_js())
786}
787
788pub async fn get_dashboard(State(state): State<AdminState>) -> Json<ApiResponse<DashboardData>> {
790 let uptime = Utc::now().signed_duration_since(state.start_time).num_seconds() as u64;
791
792 let system_metrics = state.get_system_metrics().await;
794 let _config = state.get_config().await;
795
796 let (recent_logs, calculated_metrics): (Vec<RequestLog>, RequestMetrics) =
798 if let Some(global_logger) = mockforge_core::get_global_logger() {
799 let all_logs = global_logger.get_recent_logs(None).await;
801 let recent_logs_subset = global_logger.get_recent_logs(Some(20)).await;
802
803 let total_requests = all_logs.len() as u64;
805 let mut requests_by_endpoint = HashMap::new();
806 let mut errors_by_endpoint = HashMap::new();
807 let mut response_times = Vec::new();
808 let mut last_request_by_endpoint = HashMap::new();
809
810 for log in &all_logs {
811 let endpoint_key = format!("{} {}", log.method, log.path);
812 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
813
814 if log.status_code >= 400 {
815 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
816 }
817
818 response_times.push(log.response_time_ms);
819 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
820 }
821
822 let calculated_metrics = RequestMetrics {
823 total_requests,
824 active_connections: 0, requests_by_endpoint,
826 response_times,
827 response_times_by_endpoint: HashMap::new(), errors_by_endpoint,
829 last_request_by_endpoint,
830 };
831
832 let recent_logs = recent_logs_subset
834 .into_iter()
835 .map(|log| RequestLog {
836 id: log.id,
837 timestamp: log.timestamp,
838 method: log.method,
839 path: log.path,
840 status_code: log.status_code,
841 response_time_ms: log.response_time_ms,
842 client_ip: log.client_ip,
843 user_agent: log.user_agent,
844 headers: log.headers,
845 response_size_bytes: log.response_size_bytes,
846 error_message: log.error_message,
847 })
848 .collect();
849
850 (recent_logs, calculated_metrics)
851 } else {
852 let logs = state.logs.read().await;
854 let recent_logs = logs.iter().rev().take(10).cloned().collect();
855 let metrics = state.get_metrics().await;
856 (recent_logs, metrics)
857 };
858
859 let metrics = calculated_metrics;
860
861 let system_info = SystemInfo {
862 version: env!("CARGO_PKG_VERSION").to_string(),
863 uptime_seconds: uptime,
864 memory_usage_mb: system_metrics.memory_usage_mb,
865 cpu_usage_percent: system_metrics.cpu_usage_percent,
866 active_threads: system_metrics.active_threads as usize,
867 total_routes: metrics.requests_by_endpoint.len(),
868 total_fixtures: count_fixtures().unwrap_or(0),
869 };
870
871 let servers = vec![
872 ServerStatus {
873 server_type: "HTTP".to_string(),
874 address: state.http_server_addr.map(|addr| addr.to_string()),
875 running: state.http_server_addr.is_some(),
876 start_time: Some(state.start_time),
877 uptime_seconds: Some(uptime),
878 active_connections: metrics.active_connections,
879 total_requests: count_requests_by_server_type(&metrics, "HTTP"),
880 },
881 ServerStatus {
882 server_type: "WebSocket".to_string(),
883 address: state.ws_server_addr.map(|addr| addr.to_string()),
884 running: state.ws_server_addr.is_some(),
885 start_time: Some(state.start_time),
886 uptime_seconds: Some(uptime),
887 active_connections: metrics.active_connections / 2, total_requests: count_requests_by_server_type(&metrics, "WebSocket"),
889 },
890 ServerStatus {
891 server_type: "gRPC".to_string(),
892 address: state.grpc_server_addr.map(|addr| addr.to_string()),
893 running: state.grpc_server_addr.is_some(),
894 start_time: Some(state.start_time),
895 uptime_seconds: Some(uptime),
896 active_connections: metrics.active_connections / 3, total_requests: count_requests_by_server_type(&metrics, "gRPC"),
898 },
899 ];
900
901 let mut routes = Vec::new();
903 for (endpoint, count) in &metrics.requests_by_endpoint {
904 let parts: Vec<&str> = endpoint.splitn(2, ' ').collect();
905 if parts.len() == 2 {
906 let method = parts[0].to_string();
907 let path = parts[1].to_string();
908 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
909
910 routes.push(RouteInfo {
911 method: Some(method.clone()),
912 path: path.clone(),
913 priority: 0,
914 has_fixtures: route_has_fixtures(&method, &path),
915 latency_ms: calculate_endpoint_latency(&metrics, endpoint),
916 request_count: *count,
917 last_request: get_endpoint_last_request(&metrics, endpoint),
918 error_count,
919 });
920 }
921 }
922
923 let dashboard = DashboardData {
924 server_info: ServerInfo {
925 version: env!("CARGO_PKG_VERSION").to_string(),
926 build_time: option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown").to_string(),
927 git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("unknown").to_string(),
928 http_server: state.http_server_addr.map(|addr| addr.to_string()),
929 ws_server: state.ws_server_addr.map(|addr| addr.to_string()),
930 grpc_server: state.grpc_server_addr.map(|addr| addr.to_string()),
931 graphql_server: state.graphql_server_addr.map(|addr| addr.to_string()),
932 api_enabled: state.api_enabled,
933 admin_port: state.admin_port,
934 },
935 system_info: DashboardSystemInfo {
936 os: std::env::consts::OS.to_string(),
937 arch: std::env::consts::ARCH.to_string(),
938 uptime,
939 memory_usage: system_metrics.memory_usage_mb * 1024 * 1024, },
941 metrics: SimpleMetricsData {
942 total_requests: metrics.requests_by_endpoint.values().sum(),
943 active_requests: metrics.active_connections,
944 average_response_time: if metrics.response_times.is_empty() {
945 0.0
946 } else {
947 metrics.response_times.iter().sum::<u64>() as f64
948 / metrics.response_times.len() as f64
949 },
950 error_rate: {
951 let total_requests = metrics.requests_by_endpoint.values().sum::<u64>();
952 let total_errors = metrics.errors_by_endpoint.values().sum::<u64>();
953 if total_requests == 0 {
954 0.0
955 } else {
956 total_errors as f64 / total_requests as f64
957 }
958 },
959 },
960 servers,
961 recent_logs,
962 system: system_info,
963 };
964
965 Json(ApiResponse::success(dashboard))
966}
967
968pub async fn get_routes(State(state): State<AdminState>) -> impl IntoResponse {
970 if let Some(http_addr) = state.http_server_addr {
971 let url = format!("http://{}/__mockforge/routes", http_addr);
973 if let Ok(response) = reqwest::get(&url).await {
974 if response.status().is_success() {
975 if let Ok(body) = response.text().await {
976 return (StatusCode::OK, [("content-type", "application/json")], body);
977 }
978 }
979 }
980 }
981
982 (
984 StatusCode::OK,
985 [("content-type", "application/json")],
986 r#"{"routes":[]}"#.to_string(),
987 )
988}
989
990pub async fn get_server_info(State(state): State<AdminState>) -> Json<serde_json::Value> {
992 Json(json!({
993 "http_server": state.http_server_addr.map(|addr| addr.to_string()),
994 "ws_server": state.ws_server_addr.map(|addr| addr.to_string()),
995 "grpc_server": state.grpc_server_addr.map(|addr| addr.to_string()),
996 "admin_port": state.admin_port
997 }))
998}
999
1000pub async fn get_health() -> Json<HealthCheck> {
1002 Json(
1003 HealthCheck::healthy()
1004 .with_service("http".to_string(), "healthy".to_string())
1005 .with_service("websocket".to_string(), "healthy".to_string())
1006 .with_service("grpc".to_string(), "healthy".to_string()),
1007 )
1008}
1009
1010pub async fn get_logs(
1012 State(state): State<AdminState>,
1013 Query(params): Query<HashMap<String, String>>,
1014) -> Json<ApiResponse<Vec<RequestLog>>> {
1015 let mut filter = LogFilter::default();
1016
1017 if let Some(method) = params.get("method") {
1018 filter.method = Some(method.clone());
1019 }
1020 if let Some(path) = params.get("path") {
1021 filter.path_pattern = Some(path.clone());
1022 }
1023 if let Some(status) = params.get("status").and_then(|s| s.parse().ok()) {
1024 filter.status_code = Some(status);
1025 }
1026 if let Some(limit) = params.get("limit").and_then(|s| s.parse().ok()) {
1027 filter.limit = Some(limit);
1028 }
1029
1030 let logs = if let Some(global_logger) = mockforge_core::get_global_logger() {
1032 let centralized_logs = global_logger.get_recent_logs(filter.limit).await;
1034
1035 centralized_logs
1037 .into_iter()
1038 .filter(|log| {
1039 if let Some(ref method) = filter.method {
1040 if log.method != *method {
1041 return false;
1042 }
1043 }
1044 if let Some(ref path_pattern) = filter.path_pattern {
1045 if !log.path.contains(path_pattern) {
1046 return false;
1047 }
1048 }
1049 if let Some(status) = filter.status_code {
1050 if log.status_code != status {
1051 return false;
1052 }
1053 }
1054 true
1055 })
1056 .map(|log| RequestLog {
1057 id: log.id,
1058 timestamp: log.timestamp,
1059 method: log.method,
1060 path: log.path,
1061 status_code: log.status_code,
1062 response_time_ms: log.response_time_ms,
1063 client_ip: log.client_ip,
1064 user_agent: log.user_agent,
1065 headers: log.headers,
1066 response_size_bytes: log.response_size_bytes,
1067 error_message: log.error_message,
1068 })
1069 .collect()
1070 } else {
1071 state.get_logs_filtered(&filter).await
1073 };
1074
1075 Json(ApiResponse::success(logs))
1076}
1077
1078pub async fn get_reality_trace(
1082 Path(request_id): Path<String>,
1083) -> Json<ApiResponse<Option<mockforge_core::request_logger::RealityTraceMetadata>>> {
1084 if let Some(global_logger) = mockforge_core::get_global_logger() {
1085 let logs = global_logger.get_recent_logs(None).await;
1086 if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1087 Json(ApiResponse::success(log_entry.reality_metadata))
1088 } else {
1089 Json(ApiResponse::error(format!("Request {} not found", request_id)))
1090 }
1091 } else {
1092 Json(ApiResponse::error("Request logger not initialized".to_string()))
1093 }
1094}
1095
1096pub async fn get_response_trace(
1100 Path(request_id): Path<String>,
1101) -> Json<ApiResponse<Option<serde_json::Value>>> {
1102 if let Some(global_logger) = mockforge_core::get_global_logger() {
1103 let logs = global_logger.get_recent_logs(None).await;
1104 if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1105 let trace = log_entry
1108 .metadata
1109 .get("response_generation_trace")
1110 .and_then(|s| serde_json::from_str::<serde_json::Value>(s).ok());
1111 Json(ApiResponse::success(trace))
1112 } else {
1113 Json(ApiResponse::error(format!("Request {} not found", request_id)))
1114 }
1115 } else {
1116 Json(ApiResponse::error("Request logger not initialized".to_string()))
1117 }
1118}
1119
1120const RECENT_LOGS_LIMIT: usize = 20;
1122const RECENT_LOGS_TTL_MINUTES: i64 = 5;
1123
1124pub async fn logs_sse(
1126 State(_state): State<AdminState>,
1127) -> Sse<impl Stream<Item = std::result::Result<Event, Infallible>>> {
1128 tracing::info!("SSE endpoint /logs/sse accessed - starting real-time log streaming for recent requests only");
1129
1130 let stream = stream::unfold(std::collections::HashSet::new(), |mut seen_ids| async move {
1131 tokio::time::sleep(Duration::from_millis(500)).await;
1132
1133 if let Some(global_logger) = mockforge_core::get_global_logger() {
1135 let centralized_logs = global_logger.get_recent_logs(Some(RECENT_LOGS_LIMIT)).await;
1136
1137 tracing::debug!(
1138 "SSE: Checking logs - total logs: {}, seen logs: {}",
1139 centralized_logs.len(),
1140 seen_ids.len()
1141 );
1142
1143 let now = chrono::Utc::now();
1145 let ttl_cutoff = now - chrono::Duration::minutes(RECENT_LOGS_TTL_MINUTES);
1146
1147 let new_logs: Vec<RequestLog> = centralized_logs
1149 .into_iter()
1150 .filter(|log| {
1151 log.timestamp > ttl_cutoff && !seen_ids.contains(&log.id)
1153 })
1154 .map(|log| RequestLog {
1155 id: log.id,
1156 timestamp: log.timestamp,
1157 method: log.method,
1158 path: log.path,
1159 status_code: log.status_code,
1160 response_time_ms: log.response_time_ms,
1161 client_ip: log.client_ip,
1162 user_agent: log.user_agent,
1163 headers: log.headers,
1164 response_size_bytes: log.response_size_bytes,
1165 error_message: log.error_message,
1166 })
1167 .collect();
1168
1169 for log in &new_logs {
1171 seen_ids.insert(log.id.clone());
1172 }
1173
1174 if !new_logs.is_empty() {
1176 tracing::info!("SSE: Sending {} new logs to client", new_logs.len());
1177
1178 let event_data = serde_json::to_string(&new_logs).unwrap_or_default();
1179 let event = Ok(Event::default().event("new_logs").data(event_data));
1180
1181 return Some((event, seen_ids));
1182 }
1183 }
1184
1185 let event = Ok(Event::default().event("keep_alive").data(""));
1187 Some((event, seen_ids))
1188 });
1189
1190 Sse::new(stream).keep_alive(
1191 axum::response::sse::KeepAlive::new()
1192 .interval(Duration::from_secs(15))
1193 .text("keep-alive-text"),
1194 )
1195}
1196
1197pub async fn get_metrics(State(state): State<AdminState>) -> Json<ApiResponse<MetricsData>> {
1199 let metrics = if let Some(global_logger) = mockforge_core::get_global_logger() {
1201 let all_logs = global_logger.get_recent_logs(None).await;
1202
1203 let total_requests = all_logs.len() as u64;
1204 let mut requests_by_endpoint = HashMap::new();
1205 let mut errors_by_endpoint = HashMap::new();
1206 let mut response_times = Vec::new();
1207 let mut last_request_by_endpoint = HashMap::new();
1208
1209 for log in &all_logs {
1210 let endpoint_key = format!("{} {}", log.method, log.path);
1211 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1212
1213 if log.status_code >= 400 {
1214 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1215 }
1216
1217 response_times.push(log.response_time_ms);
1218 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
1219 }
1220
1221 RequestMetrics {
1222 total_requests,
1223 active_connections: 0,
1224 requests_by_endpoint,
1225 response_times,
1226 response_times_by_endpoint: HashMap::new(),
1227 errors_by_endpoint,
1228 last_request_by_endpoint,
1229 }
1230 } else {
1231 state.get_metrics().await
1232 };
1233
1234 let system_metrics = state.get_system_metrics().await;
1235 let time_series = state.get_time_series_data().await;
1236
1237 fn calculate_percentile(sorted_data: &[u64], percentile: f64) -> u64 {
1239 if sorted_data.is_empty() {
1240 return 0;
1241 }
1242 let idx = ((sorted_data.len() as f64) * percentile).ceil() as usize;
1243 let idx = idx.min(sorted_data.len().saturating_sub(1));
1244 sorted_data[idx]
1245 }
1246
1247 let mut response_times = metrics.response_times.clone();
1249 response_times.sort();
1250
1251 let p50 = calculate_percentile(&response_times, 0.50);
1252 let p75 = calculate_percentile(&response_times, 0.75);
1253 let p90 = calculate_percentile(&response_times, 0.90);
1254 let p95 = calculate_percentile(&response_times, 0.95);
1255 let p99 = calculate_percentile(&response_times, 0.99);
1256 let p999 = calculate_percentile(&response_times, 0.999);
1257
1258 let mut response_times_by_endpoint: HashMap<String, Vec<u64>> = HashMap::new();
1260 if let Some(global_logger) = mockforge_core::get_global_logger() {
1261 let all_logs = global_logger.get_recent_logs(None).await;
1262 for log in &all_logs {
1263 let endpoint_key = format!("{} {}", log.method, log.path);
1264 response_times_by_endpoint
1265 .entry(endpoint_key)
1266 .or_insert_with(Vec::new)
1267 .push(log.response_time_ms);
1268 }
1269 }
1270
1271 let mut endpoint_percentiles: HashMap<String, HashMap<String, u64>> = HashMap::new();
1273 for (endpoint, times) in &mut response_times_by_endpoint {
1274 times.sort();
1275 if !times.is_empty() {
1276 endpoint_percentiles.insert(
1277 endpoint.clone(),
1278 HashMap::from([
1279 ("p50".to_string(), calculate_percentile(times, 0.50)),
1280 ("p75".to_string(), calculate_percentile(times, 0.75)),
1281 ("p90".to_string(), calculate_percentile(times, 0.90)),
1282 ("p95".to_string(), calculate_percentile(times, 0.95)),
1283 ("p99".to_string(), calculate_percentile(times, 0.99)),
1284 ("p999".to_string(), calculate_percentile(times, 0.999)),
1285 ]),
1286 );
1287 }
1288 }
1289
1290 let mut error_rate_by_endpoint = HashMap::new();
1292 for (endpoint, total_count) in &metrics.requests_by_endpoint {
1293 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
1294 let error_rate = if *total_count > 0 {
1295 error_count as f64 / *total_count as f64
1296 } else {
1297 0.0
1298 };
1299 error_rate_by_endpoint.insert(endpoint.clone(), error_rate);
1300 }
1301
1302 let memory_usage_over_time = if time_series.memory_usage.is_empty() {
1305 vec![(Utc::now(), system_metrics.memory_usage_mb)]
1306 } else {
1307 time_series
1308 .memory_usage
1309 .iter()
1310 .map(|point| (point.timestamp, point.value as u64))
1311 .collect()
1312 };
1313
1314 let cpu_usage_over_time = if time_series.cpu_usage.is_empty() {
1315 vec![(Utc::now(), system_metrics.cpu_usage_percent)]
1316 } else {
1317 time_series
1318 .cpu_usage
1319 .iter()
1320 .map(|point| (point.timestamp, point.value))
1321 .collect()
1322 };
1323
1324 let latency_over_time: Vec<(chrono::DateTime<chrono::Utc>, u64)> =
1326 if let Some(global_logger) = mockforge_core::get_global_logger() {
1327 let all_logs = global_logger.get_recent_logs(Some(100)).await;
1328 all_logs.iter().map(|log| (log.timestamp, log.response_time_ms)).collect()
1329 } else {
1330 Vec::new()
1331 };
1332
1333 let metrics_data = MetricsData {
1334 requests_by_endpoint: metrics.requests_by_endpoint,
1335 response_time_percentiles: HashMap::from([
1336 ("p50".to_string(), p50),
1337 ("p75".to_string(), p75),
1338 ("p90".to_string(), p90),
1339 ("p95".to_string(), p95),
1340 ("p99".to_string(), p99),
1341 ("p999".to_string(), p999),
1342 ]),
1343 endpoint_percentiles: Some(endpoint_percentiles),
1344 latency_over_time: Some(latency_over_time),
1345 error_rate_by_endpoint,
1346 memory_usage_over_time,
1347 cpu_usage_over_time,
1348 };
1349
1350 Json(ApiResponse::success(metrics_data))
1351}
1352
1353pub async fn update_latency(
1355 State(state): State<AdminState>,
1356 headers: axum::http::HeaderMap,
1357 Json(update): Json<ConfigUpdate>,
1358) -> Json<ApiResponse<String>> {
1359 use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1360 use crate::rbac::{extract_user_context, get_default_user_context};
1361
1362 if update.config_type != "latency" {
1363 return Json(ApiResponse::error("Invalid config type".to_string()));
1364 }
1365
1366 let base_ms = update.data.get("base_ms").and_then(|v| v.as_u64()).unwrap_or(50);
1368 let jitter_ms = update.data.get("jitter_ms").and_then(|v| v.as_u64()).unwrap_or(20);
1369
1370 let tag_overrides: std::collections::HashMap<String, u64> = update
1371 .data
1372 .get("tag_overrides")
1373 .and_then(|v| v.as_object())
1374 .map(|obj| obj.iter().filter_map(|(k, v)| v.as_u64().map(|val| (k.clone(), val))).collect())
1375 .unwrap_or_default();
1376
1377 state.update_latency_config(base_ms, jitter_ms, tag_overrides.clone()).await;
1379
1380 if let Some(audit_store) = get_global_audit_store() {
1382 let metadata = serde_json::json!({
1383 "base_ms": base_ms,
1384 "jitter_ms": jitter_ms,
1385 "tag_overrides": tag_overrides,
1386 });
1387 let mut audit_log = create_audit_log(
1388 AdminActionType::ConfigLatencyUpdated,
1389 format!("Latency profile updated: base_ms={}, jitter_ms={}", base_ms, jitter_ms),
1390 None,
1391 true,
1392 None,
1393 Some(metadata),
1394 );
1395
1396 if let Some(user_ctx) = extract_user_context(&headers).or_else(get_default_user_context) {
1398 audit_log.user_id = Some(user_ctx.user_id);
1399 audit_log.username = Some(user_ctx.username);
1400 }
1401
1402 if let Some(ip) = headers
1404 .get("x-forwarded-for")
1405 .or_else(|| headers.get("x-real-ip"))
1406 .and_then(|h| h.to_str().ok())
1407 {
1408 audit_log.ip_address = Some(ip.to_string());
1409 }
1410
1411 if let Some(ua) = headers.get("user-agent").and_then(|h| h.to_str().ok()) {
1413 audit_log.user_agent = Some(ua.to_string());
1414 }
1415
1416 audit_store.record(audit_log).await;
1417 }
1418
1419 tracing::info!("Updated latency profile: base_ms={}, jitter_ms={}", base_ms, jitter_ms);
1420
1421 Json(ApiResponse::success("Latency profile updated".to_string()))
1422}
1423
1424pub async fn update_faults(
1426 State(state): State<AdminState>,
1427 Json(update): Json<ConfigUpdate>,
1428) -> Json<ApiResponse<String>> {
1429 if update.config_type != "faults" {
1430 return Json(ApiResponse::error("Invalid config type".to_string()));
1431 }
1432
1433 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1435
1436 let failure_rate = update.data.get("failure_rate").and_then(|v| v.as_f64()).unwrap_or(0.0);
1437
1438 let status_codes = update
1439 .data
1440 .get("status_codes")
1441 .and_then(|v| v.as_array())
1442 .map(|arr| arr.iter().filter_map(|v| v.as_u64().map(|n| n as u16)).collect())
1443 .unwrap_or_else(|| vec![500, 502, 503]);
1444
1445 state.update_fault_config(enabled, failure_rate, status_codes).await;
1447
1448 tracing::info!(
1449 "Updated fault configuration: enabled={}, failure_rate={}",
1450 enabled,
1451 failure_rate
1452 );
1453
1454 Json(ApiResponse::success("Fault configuration updated".to_string()))
1455}
1456
1457pub async fn update_proxy(
1459 State(state): State<AdminState>,
1460 Json(update): Json<ConfigUpdate>,
1461) -> Json<ApiResponse<String>> {
1462 if update.config_type != "proxy" {
1463 return Json(ApiResponse::error("Invalid config type".to_string()));
1464 }
1465
1466 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1468
1469 let upstream_url =
1470 update.data.get("upstream_url").and_then(|v| v.as_str()).map(|s| s.to_string());
1471
1472 let timeout_seconds = update.data.get("timeout_seconds").and_then(|v| v.as_u64()).unwrap_or(30);
1473
1474 state.update_proxy_config(enabled, upstream_url.clone(), timeout_seconds).await;
1476
1477 tracing::info!(
1478 "Updated proxy configuration: enabled={}, upstream_url={:?}",
1479 enabled,
1480 upstream_url
1481 );
1482
1483 Json(ApiResponse::success("Proxy configuration updated".to_string()))
1484}
1485
1486pub async fn clear_logs(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1488 state.clear_logs().await;
1490 tracing::info!("Cleared all request logs");
1491
1492 Json(ApiResponse::success("Logs cleared".to_string()))
1493}
1494
1495pub async fn restart_servers(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1497 use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1498 let current_status = state.get_restart_status().await;
1500 if current_status.in_progress {
1501 return Json(ApiResponse::error("Server restart already in progress".to_string()));
1502 }
1503
1504 let restart_result = state
1506 .initiate_restart("Manual restart requested via admin UI".to_string())
1507 .await;
1508
1509 let success = restart_result.is_ok();
1510 let error_msg = restart_result.as_ref().err().map(|e| format!("{}", e));
1511
1512 if let Some(audit_store) = get_global_audit_store() {
1514 let audit_log = create_audit_log(
1515 AdminActionType::ServerRestarted,
1516 "Server restart initiated via admin UI".to_string(),
1517 None,
1518 success,
1519 error_msg.clone(),
1520 None,
1521 );
1522 audit_store.record(audit_log).await;
1523 }
1524
1525 if let Err(e) = restart_result {
1526 return Json(ApiResponse::error(format!("Failed to initiate restart: {}", e)));
1527 }
1528
1529 let state_clone = state.clone();
1531 tokio::spawn(async move {
1532 if let Err(e) = perform_server_restart(&state_clone).await {
1533 tracing::error!("Server restart failed: {}", e);
1534 state_clone.complete_restart(false).await;
1535 } else {
1536 tracing::info!("Server restart completed successfully");
1537 state_clone.complete_restart(true).await;
1538 }
1539 });
1540
1541 tracing::info!("Server restart initiated via admin UI");
1542 Json(ApiResponse::success(
1543 "Server restart initiated. Please wait for completion.".to_string(),
1544 ))
1545}
1546
1547async fn perform_server_restart(_state: &AdminState) -> Result<()> {
1549 let current_pid = std::process::id();
1551 tracing::info!("Initiating restart for process PID: {}", current_pid);
1552
1553 let parent_pid = get_parent_process_id(current_pid).await?;
1555 tracing::info!("Found parent process PID: {}", parent_pid);
1556
1557 if let Ok(()) = restart_via_parent_signal(parent_pid).await {
1559 tracing::info!("Restart initiated via parent process signal");
1560 return Ok(());
1561 }
1562
1563 if let Ok(()) = restart_via_process_replacement().await {
1565 tracing::info!("Restart initiated via process replacement");
1566 return Ok(());
1567 }
1568
1569 restart_via_script().await
1571}
1572
1573async fn get_parent_process_id(pid: u32) -> Result<u32> {
1575 #[cfg(target_os = "linux")]
1577 {
1578 let stat_path = format!("/proc/{}/stat", pid);
1580 if let Ok(ppid) = tokio::task::spawn_blocking(move || -> Result<u32> {
1581 let content = std::fs::read_to_string(&stat_path)
1582 .map_err(|e| Error::generic(format!("Failed to read {}: {}", stat_path, e)))?;
1583
1584 let fields: Vec<&str> = content.split_whitespace().collect();
1585 if fields.len() > 3 {
1586 fields[3]
1587 .parse::<u32>()
1588 .map_err(|e| Error::generic(format!("Failed to parse PPID: {}", e)))
1589 } else {
1590 Err(Error::generic("Insufficient fields in /proc/pid/stat".to_string()))
1591 }
1592 })
1593 .await
1594 {
1595 return ppid;
1596 }
1597 }
1598
1599 Ok(1) }
1602
1603async fn restart_via_parent_signal(parent_pid: u32) -> Result<()> {
1605 #[cfg(unix)]
1606 {
1607 use std::process::Command;
1608
1609 let output = Command::new("kill")
1611 .args(["-TERM", &parent_pid.to_string()])
1612 .output()
1613 .map_err(|e| Error::generic(format!("Failed to send signal: {}", e)))?;
1614
1615 if !output.status.success() {
1616 return Err(Error::generic(
1617 "Failed to send restart signal to parent process".to_string(),
1618 ));
1619 }
1620
1621 tokio::time::sleep(Duration::from_millis(100)).await;
1623 Ok(())
1624 }
1625
1626 #[cfg(not(unix))]
1627 {
1628 Err(Error::generic(
1629 "Signal-based restart not supported on this platform".to_string(),
1630 ))
1631 }
1632}
1633
1634async fn restart_via_process_replacement() -> Result<()> {
1636 let current_exe = std::env::current_exe()
1638 .map_err(|e| Error::generic(format!("Failed to get current executable: {}", e)))?;
1639
1640 let args: Vec<String> = std::env::args().collect();
1642
1643 tracing::info!("Restarting with command: {:?}", args);
1644
1645 let mut child = Command::new(¤t_exe)
1647 .args(&args[1..]) .stdout(Stdio::inherit())
1649 .stderr(Stdio::inherit())
1650 .spawn()
1651 .map_err(|e| Error::generic(format!("Failed to start new process: {}", e)))?;
1652
1653 tokio::time::sleep(Duration::from_millis(500)).await;
1655
1656 match child.try_wait() {
1658 Ok(Some(status)) => {
1659 if status.success() {
1660 tracing::info!("New process started successfully");
1661 Ok(())
1662 } else {
1663 Err(Error::generic("New process exited with error".to_string()))
1664 }
1665 }
1666 Ok(None) => {
1667 tracing::info!("New process is running, exiting current process");
1668 std::process::exit(0);
1670 }
1671 Err(e) => Err(Error::generic(format!("Failed to check new process status: {}", e))),
1672 }
1673}
1674
1675async fn restart_via_script() -> Result<()> {
1677 let script_paths = ["./scripts/restart.sh", "./restart.sh", "restart.sh"];
1679
1680 for script_path in &script_paths {
1681 if std::path::Path::new(script_path).exists() {
1682 tracing::info!("Using restart script: {}", script_path);
1683
1684 let output = Command::new("bash")
1685 .arg(script_path)
1686 .output()
1687 .map_err(|e| Error::generic(format!("Failed to execute restart script: {}", e)))?;
1688
1689 if output.status.success() {
1690 return Ok(());
1691 } else {
1692 tracing::warn!(
1693 "Restart script failed: {}",
1694 String::from_utf8_lossy(&output.stderr)
1695 );
1696 }
1697 }
1698 }
1699
1700 let clear_script = "./scripts/clear-ports.sh";
1702 if std::path::Path::new(clear_script).exists() {
1703 tracing::info!("Using clear-ports script as fallback");
1704
1705 let _ = Command::new("bash").arg(clear_script).output();
1706 }
1707
1708 Err(Error::generic(
1709 "No restart mechanism available. Please restart manually.".to_string(),
1710 ))
1711}
1712
1713pub async fn get_restart_status(
1715 State(state): State<AdminState>,
1716) -> Json<ApiResponse<RestartStatus>> {
1717 let status = state.get_restart_status().await;
1718 Json(ApiResponse::success(status))
1719}
1720
1721pub async fn get_audit_logs(
1723 Query(params): Query<std::collections::HashMap<String, String>>,
1724) -> Json<ApiResponse<Vec<crate::audit::AdminAuditLog>>> {
1725 use crate::audit::{get_global_audit_store, AdminActionType};
1726
1727 let action_type_str = params.get("action_type");
1728 let user_id = params.get("user_id").map(|s| s.as_str());
1729 let limit = params.get("limit").and_then(|s| s.parse::<usize>().ok());
1730 let offset = params.get("offset").and_then(|s| s.parse::<usize>().ok());
1731
1732 let action_type = action_type_str.and_then(|s| {
1734 match s.as_str() {
1736 "config_latency_updated" => Some(AdminActionType::ConfigLatencyUpdated),
1737 "config_faults_updated" => Some(AdminActionType::ConfigFaultsUpdated),
1738 "server_restarted" => Some(AdminActionType::ServerRestarted),
1739 "logs_cleared" => Some(AdminActionType::LogsCleared),
1740 _ => None,
1741 }
1742 });
1743
1744 if let Some(audit_store) = get_global_audit_store() {
1745 let logs = audit_store.get_logs(action_type, user_id, limit, offset).await;
1746 Json(ApiResponse::success(logs))
1747 } else {
1748 Json(ApiResponse::error("Audit logging not initialized".to_string()))
1749 }
1750}
1751
1752pub async fn get_audit_stats() -> Json<ApiResponse<crate::audit::AuditLogStats>> {
1754 use crate::audit::get_global_audit_store;
1755
1756 if let Some(audit_store) = get_global_audit_store() {
1757 let stats = audit_store.get_stats().await;
1758 Json(ApiResponse::success(stats))
1759 } else {
1760 Json(ApiResponse::error("Audit logging not initialized".to_string()))
1761 }
1762}
1763
1764pub async fn get_config(State(state): State<AdminState>) -> Json<ApiResponse<serde_json::Value>> {
1766 let config_state = state.get_config().await;
1767
1768 let config = json!({
1769 "latency": {
1770 "enabled": true,
1771 "base_ms": config_state.latency_profile.base_ms,
1772 "jitter_ms": config_state.latency_profile.jitter_ms,
1773 "tag_overrides": config_state.latency_profile.tag_overrides
1774 },
1775 "faults": {
1776 "enabled": config_state.fault_config.enabled,
1777 "failure_rate": config_state.fault_config.failure_rate,
1778 "status_codes": config_state.fault_config.status_codes
1779 },
1780 "proxy": {
1781 "enabled": config_state.proxy_config.enabled,
1782 "upstream_url": config_state.proxy_config.upstream_url,
1783 "timeout_seconds": config_state.proxy_config.timeout_seconds
1784 },
1785 "validation": {
1786 "mode": config_state.validation_settings.mode,
1787 "aggregate_errors": config_state.validation_settings.aggregate_errors,
1788 "validate_responses": config_state.validation_settings.validate_responses,
1789 "overrides": config_state.validation_settings.overrides
1790 }
1791 });
1792
1793 Json(ApiResponse::success(config))
1794}
1795
1796pub fn count_fixtures() -> Result<usize> {
1798 let fixtures_dir =
1800 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1801 let fixtures_path = std::path::Path::new(&fixtures_dir);
1802
1803 if !fixtures_path.exists() {
1804 return Ok(0);
1805 }
1806
1807 let mut total_count = 0;
1808
1809 let http_fixtures_path = fixtures_path.join("http");
1811 if http_fixtures_path.exists() {
1812 total_count += count_fixtures_in_directory(&http_fixtures_path)?;
1813 }
1814
1815 let ws_fixtures_path = fixtures_path.join("websocket");
1817 if ws_fixtures_path.exists() {
1818 total_count += count_fixtures_in_directory(&ws_fixtures_path)?;
1819 }
1820
1821 let grpc_fixtures_path = fixtures_path.join("grpc");
1823 if grpc_fixtures_path.exists() {
1824 total_count += count_fixtures_in_directory(&grpc_fixtures_path)?;
1825 }
1826
1827 Ok(total_count)
1828}
1829
1830fn count_fixtures_in_directory(dir_path: &std::path::Path) -> Result<usize> {
1832 let mut count = 0;
1833
1834 if let Ok(entries) = std::fs::read_dir(dir_path) {
1835 for entry in entries {
1836 let entry = entry
1837 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1838 let path = entry.path();
1839
1840 if path.is_dir() {
1841 count += count_fixtures_in_directory(&path)?;
1843 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1844 count += 1;
1846 }
1847 }
1848 }
1849
1850 Ok(count)
1851}
1852
1853pub fn route_has_fixtures(method: &str, path: &str) -> bool {
1855 let fixtures_dir =
1857 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1858 let fixtures_path = std::path::Path::new(&fixtures_dir);
1859
1860 if !fixtures_path.exists() {
1861 return false;
1862 }
1863
1864 let method_lower = method.to_lowercase();
1866 let path_hash = path.replace(['/', ':'], "_");
1867 let http_fixtures_path = fixtures_path.join("http").join(&method_lower).join(&path_hash);
1868
1869 if http_fixtures_path.exists() {
1870 if let Ok(entries) = std::fs::read_dir(&http_fixtures_path) {
1872 for entry in entries.flatten() {
1873 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1874 return true;
1875 }
1876 }
1877 }
1878 }
1879
1880 if method.to_uppercase() == "WS" {
1882 let ws_fixtures_path = fixtures_path.join("websocket").join(&path_hash);
1883
1884 if ws_fixtures_path.exists() {
1885 if let Ok(entries) = std::fs::read_dir(&ws_fixtures_path) {
1886 for entry in entries.flatten() {
1887 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1888 return true;
1889 }
1890 }
1891 }
1892 }
1893 }
1894
1895 false
1896}
1897
1898fn calculate_endpoint_latency(metrics: &RequestMetrics, endpoint: &str) -> Option<u64> {
1900 metrics.response_times_by_endpoint.get(endpoint).and_then(|times| {
1901 if times.is_empty() {
1902 None
1903 } else {
1904 let sum: u64 = times.iter().sum();
1905 Some(sum / times.len() as u64)
1906 }
1907 })
1908}
1909
1910fn get_endpoint_last_request(
1912 metrics: &RequestMetrics,
1913 endpoint: &str,
1914) -> Option<chrono::DateTime<chrono::Utc>> {
1915 metrics.last_request_by_endpoint.get(endpoint).copied()
1916}
1917
1918fn count_requests_by_server_type(metrics: &RequestMetrics, server_type: &str) -> u64 {
1920 match server_type {
1921 "HTTP" => {
1922 metrics
1924 .requests_by_endpoint
1925 .iter()
1926 .filter(|(endpoint, _)| {
1927 let method = endpoint.split(' ').next().unwrap_or("");
1928 matches!(
1929 method,
1930 "GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "HEAD" | "OPTIONS"
1931 )
1932 })
1933 .map(|(_, count)| count)
1934 .sum()
1935 }
1936 "WebSocket" => {
1937 metrics
1939 .requests_by_endpoint
1940 .iter()
1941 .filter(|(endpoint, _)| {
1942 let method = endpoint.split(' ').next().unwrap_or("");
1943 method == "WS"
1944 })
1945 .map(|(_, count)| count)
1946 .sum()
1947 }
1948 "gRPC" => {
1949 metrics
1951 .requests_by_endpoint
1952 .iter()
1953 .filter(|(endpoint, _)| {
1954 let method = endpoint.split(' ').next().unwrap_or("");
1955 method == "gRPC"
1956 })
1957 .map(|(_, count)| count)
1958 .sum()
1959 }
1960 _ => 0,
1961 }
1962}
1963
1964pub async fn get_fixtures() -> Json<ApiResponse<Vec<FixtureInfo>>> {
1966 match scan_fixtures_directory() {
1967 Ok(fixtures) => Json(ApiResponse::success(fixtures)),
1968 Err(e) => {
1969 tracing::error!("Failed to scan fixtures directory: {}", e);
1970 Json(ApiResponse::error(format!("Failed to load fixtures: {}", e)))
1971 }
1972 }
1973}
1974
1975fn scan_fixtures_directory() -> Result<Vec<FixtureInfo>> {
1977 let fixtures_dir =
1978 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1979 let fixtures_path = std::path::Path::new(&fixtures_dir);
1980
1981 if !fixtures_path.exists() {
1982 tracing::warn!("Fixtures directory does not exist: {}", fixtures_dir);
1983 return Ok(Vec::new());
1984 }
1985
1986 let mut all_fixtures = Vec::new();
1987
1988 let http_fixtures = scan_protocol_fixtures(fixtures_path, "http")?;
1990 all_fixtures.extend(http_fixtures);
1991
1992 let ws_fixtures = scan_protocol_fixtures(fixtures_path, "websocket")?;
1994 all_fixtures.extend(ws_fixtures);
1995
1996 let grpc_fixtures = scan_protocol_fixtures(fixtures_path, "grpc")?;
1998 all_fixtures.extend(grpc_fixtures);
1999
2000 all_fixtures.sort_by(|a, b| b.saved_at.cmp(&a.saved_at));
2002
2003 tracing::info!("Found {} fixtures in directory: {}", all_fixtures.len(), fixtures_dir);
2004 Ok(all_fixtures)
2005}
2006
2007fn scan_protocol_fixtures(
2009 fixtures_path: &std::path::Path,
2010 protocol: &str,
2011) -> Result<Vec<FixtureInfo>> {
2012 let protocol_path = fixtures_path.join(protocol);
2013 let mut fixtures = Vec::new();
2014
2015 if !protocol_path.exists() {
2016 return Ok(fixtures);
2017 }
2018
2019 if let Ok(entries) = std::fs::read_dir(&protocol_path) {
2021 for entry in entries {
2022 let entry = entry
2023 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2024 let path = entry.path();
2025
2026 if path.is_dir() {
2027 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2029 fixtures.extend(sub_fixtures);
2030 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2031 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2033 fixtures.push(fixture);
2034 }
2035 }
2036 }
2037 }
2038
2039 Ok(fixtures)
2040}
2041
2042fn scan_directory_recursive(
2044 dir_path: &std::path::Path,
2045 protocol: &str,
2046) -> Result<Vec<FixtureInfo>> {
2047 let mut fixtures = Vec::new();
2048
2049 if let Ok(entries) = std::fs::read_dir(dir_path) {
2050 for entry in entries {
2051 let entry = entry
2052 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2053 let path = entry.path();
2054
2055 if path.is_dir() {
2056 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2058 fixtures.extend(sub_fixtures);
2059 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2060 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2062 fixtures.push(fixture);
2063 }
2064 }
2065 }
2066 }
2067
2068 Ok(fixtures)
2069}
2070
2071fn parse_fixture_file_sync(file_path: &std::path::Path, protocol: &str) -> Result<FixtureInfo> {
2073 let metadata = std::fs::metadata(file_path)
2075 .map_err(|e| Error::generic(format!("Failed to read file metadata: {}", e)))?;
2076
2077 let file_size = metadata.len();
2078 let modified_time = metadata
2079 .modified()
2080 .map_err(|e| Error::generic(format!("Failed to get file modification time: {}", e)))?;
2081
2082 let saved_at = chrono::DateTime::from(modified_time);
2083
2084 let content = std::fs::read_to_string(file_path)
2086 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2087
2088 let fixture_data: serde_json::Value = serde_json::from_str(&content)
2089 .map_err(|e| Error::generic(format!("Failed to parse fixture JSON: {}", e)))?;
2090
2091 let (method, path) = extract_method_and_path(&fixture_data, protocol)?;
2093
2094 let id = generate_fixture_id(file_path, &content);
2096
2097 let fingerprint = extract_fingerprint(file_path, &fixture_data)?;
2099
2100 let fixtures_dir =
2102 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2103 let fixtures_path = std::path::Path::new(&fixtures_dir);
2104 let file_path_str = file_path
2105 .strip_prefix(fixtures_path)
2106 .unwrap_or(file_path)
2107 .to_string_lossy()
2108 .to_string();
2109
2110 Ok(FixtureInfo {
2111 id,
2112 protocol: protocol.to_string(),
2113 method,
2114 path,
2115 saved_at,
2116 file_size,
2117 file_path: file_path_str,
2118 fingerprint,
2119 metadata: fixture_data,
2120 })
2121}
2122
2123fn extract_method_and_path(
2125 fixture_data: &serde_json::Value,
2126 protocol: &str,
2127) -> Result<(String, String)> {
2128 match protocol {
2129 "http" => {
2130 let method = fixture_data
2132 .get("request")
2133 .and_then(|req| req.get("method"))
2134 .and_then(|m| m.as_str())
2135 .unwrap_or("UNKNOWN")
2136 .to_uppercase();
2137
2138 let path = fixture_data
2139 .get("request")
2140 .and_then(|req| req.get("path"))
2141 .and_then(|p| p.as_str())
2142 .unwrap_or("/unknown")
2143 .to_string();
2144
2145 Ok((method, path))
2146 }
2147 "websocket" => {
2148 let path = fixture_data
2150 .get("path")
2151 .and_then(|p| p.as_str())
2152 .or_else(|| {
2153 fixture_data
2154 .get("request")
2155 .and_then(|req| req.get("path"))
2156 .and_then(|p| p.as_str())
2157 })
2158 .unwrap_or("/ws")
2159 .to_string();
2160
2161 Ok(("WS".to_string(), path))
2162 }
2163 "grpc" => {
2164 let service =
2166 fixture_data.get("service").and_then(|s| s.as_str()).unwrap_or("UnknownService");
2167
2168 let method =
2169 fixture_data.get("method").and_then(|m| m.as_str()).unwrap_or("UnknownMethod");
2170
2171 let path = format!("/{}/{}", service, method);
2172 Ok(("gRPC".to_string(), path))
2173 }
2174 _ => {
2175 let path = fixture_data
2176 .get("path")
2177 .and_then(|p| p.as_str())
2178 .unwrap_or("/unknown")
2179 .to_string();
2180 Ok((protocol.to_uppercase(), path))
2181 }
2182 }
2183}
2184
2185fn generate_fixture_id(file_path: &std::path::Path, content: &str) -> String {
2187 use std::collections::hash_map::DefaultHasher;
2188 use std::hash::{Hash, Hasher};
2189
2190 let mut hasher = DefaultHasher::new();
2191 file_path.hash(&mut hasher);
2192 content.hash(&mut hasher);
2193 format!("fixture_{:x}", hasher.finish())
2194}
2195
2196fn extract_fingerprint(
2198 file_path: &std::path::Path,
2199 fixture_data: &serde_json::Value,
2200) -> Result<String> {
2201 if let Some(fingerprint) = fixture_data.get("fingerprint").and_then(|f| f.as_str()) {
2203 return Ok(fingerprint.to_string());
2204 }
2205
2206 if let Some(file_name) = file_path.file_stem().and_then(|s| s.to_str()) {
2208 if let Some(hash) = file_name.split('_').next_back() {
2210 if hash.len() >= 8 && hash.chars().all(|c| c.is_alphanumeric()) {
2211 return Ok(hash.to_string());
2212 }
2213 }
2214 }
2215
2216 use std::collections::hash_map::DefaultHasher;
2218 use std::hash::{Hash, Hasher};
2219
2220 let mut hasher = DefaultHasher::new();
2221 file_path.hash(&mut hasher);
2222 Ok(format!("{:x}", hasher.finish()))
2223}
2224
2225pub async fn delete_fixture(
2227 Json(payload): Json<FixtureDeleteRequest>,
2228) -> Json<ApiResponse<String>> {
2229 match delete_fixture_by_id(&payload.fixture_id).await {
2230 Ok(_) => {
2231 tracing::info!("Successfully deleted fixture: {}", payload.fixture_id);
2232 Json(ApiResponse::success("Fixture deleted successfully".to_string()))
2233 }
2234 Err(e) => {
2235 tracing::error!("Failed to delete fixture {}: {}", payload.fixture_id, e);
2236 Json(ApiResponse::error(format!("Failed to delete fixture: {}", e)))
2237 }
2238 }
2239}
2240
2241pub async fn delete_fixtures_bulk(
2243 Json(payload): Json<FixtureBulkDeleteRequest>,
2244) -> Json<ApiResponse<FixtureBulkDeleteResult>> {
2245 let mut deleted_count = 0;
2246 let mut errors = Vec::new();
2247
2248 for fixture_id in &payload.fixture_ids {
2249 match delete_fixture_by_id(fixture_id).await {
2250 Ok(_) => {
2251 deleted_count += 1;
2252 tracing::info!("Successfully deleted fixture: {}", fixture_id);
2253 }
2254 Err(e) => {
2255 errors.push(format!("Failed to delete {}: {}", fixture_id, e));
2256 tracing::error!("Failed to delete fixture {}: {}", fixture_id, e);
2257 }
2258 }
2259 }
2260
2261 let result = FixtureBulkDeleteResult {
2262 deleted_count,
2263 total_requested: payload.fixture_ids.len(),
2264 errors: errors.clone(),
2265 };
2266
2267 if errors.is_empty() {
2268 Json(ApiResponse::success(result))
2269 } else {
2270 Json(ApiResponse::error(format!(
2271 "Partial success: {} deleted, {} errors",
2272 deleted_count,
2273 errors.len()
2274 )))
2275 }
2276}
2277
2278async fn delete_fixture_by_id(fixture_id: &str) -> Result<()> {
2280 let fixtures_dir =
2283 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2284 let fixtures_path = std::path::Path::new(&fixtures_dir);
2285
2286 if !fixtures_path.exists() {
2287 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2288 }
2289
2290 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2292
2293 let file_path_clone = file_path.clone();
2295 tokio::task::spawn_blocking(move || {
2296 if file_path_clone.exists() {
2297 std::fs::remove_file(&file_path_clone).map_err(|e| {
2298 Error::generic(format!(
2299 "Failed to delete fixture file {}: {}",
2300 file_path_clone.display(),
2301 e
2302 ))
2303 })
2304 } else {
2305 Err(Error::generic(format!("Fixture file not found: {}", file_path_clone.display())))
2306 }
2307 })
2308 .await
2309 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2310
2311 tracing::info!("Deleted fixture file: {}", file_path.display());
2312
2313 cleanup_empty_directories(&file_path).await;
2315
2316 Ok(())
2317}
2318
2319fn find_fixture_file_by_id(
2321 fixtures_path: &std::path::Path,
2322 fixture_id: &str,
2323) -> Result<std::path::PathBuf> {
2324 let protocols = ["http", "websocket", "grpc"];
2326
2327 for protocol in &protocols {
2328 let protocol_path = fixtures_path.join(protocol);
2329 if let Ok(found_path) = search_fixture_in_directory(&protocol_path, fixture_id) {
2330 return Ok(found_path);
2331 }
2332 }
2333
2334 Err(Error::generic(format!(
2335 "Fixture with ID '{}' not found in any protocol directory",
2336 fixture_id
2337 )))
2338}
2339
2340fn search_fixture_in_directory(
2342 dir_path: &std::path::Path,
2343 fixture_id: &str,
2344) -> Result<std::path::PathBuf> {
2345 if !dir_path.exists() {
2346 return Err(Error::generic(format!("Directory does not exist: {}", dir_path.display())));
2347 }
2348
2349 if let Ok(entries) = std::fs::read_dir(dir_path) {
2350 for entry in entries {
2351 let entry = entry
2352 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2353 let path = entry.path();
2354
2355 if path.is_dir() {
2356 if let Ok(found_path) = search_fixture_in_directory(&path, fixture_id) {
2358 return Ok(found_path);
2359 }
2360 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2361 if let Ok(fixture_info) = parse_fixture_file_sync(&path, "unknown") {
2363 if fixture_info.id == fixture_id {
2364 return Ok(path);
2365 }
2366 }
2367 }
2368 }
2369 }
2370
2371 Err(Error::generic(format!(
2372 "Fixture not found in directory: {}",
2373 dir_path.display()
2374 )))
2375}
2376
2377async fn cleanup_empty_directories(file_path: &std::path::Path) {
2379 let file_path = file_path.to_path_buf();
2380
2381 let _ = tokio::task::spawn_blocking(move || {
2383 if let Some(parent) = file_path.parent() {
2384 let mut current = parent;
2386 let fixtures_dir =
2387 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2388 let fixtures_path = std::path::Path::new(&fixtures_dir);
2389
2390 while current != fixtures_path && current.parent().is_some() {
2391 if let Ok(entries) = std::fs::read_dir(current) {
2392 if entries.count() == 0 {
2393 if let Err(e) = std::fs::remove_dir(current) {
2394 tracing::debug!(
2395 "Failed to remove empty directory {}: {}",
2396 current.display(),
2397 e
2398 );
2399 break;
2400 } else {
2401 tracing::debug!("Removed empty directory: {}", current.display());
2402 }
2403 } else {
2404 break;
2405 }
2406 } else {
2407 break;
2408 }
2409
2410 if let Some(next_parent) = current.parent() {
2411 current = next_parent;
2412 } else {
2413 break;
2414 }
2415 }
2416 }
2417 })
2418 .await;
2419}
2420
2421pub async fn download_fixture(Query(params): Query<HashMap<String, String>>) -> impl IntoResponse {
2423 let fixture_id = match params.get("id") {
2425 Some(id) => id,
2426 None => {
2427 return (
2428 http::StatusCode::BAD_REQUEST,
2429 [(http::header::CONTENT_TYPE, "application/json")],
2430 r#"{"error": "Missing fixture ID parameter"}"#,
2431 )
2432 .into_response();
2433 }
2434 };
2435
2436 match download_fixture_by_id(fixture_id).await {
2438 Ok((content, file_name)) => (
2439 http::StatusCode::OK,
2440 [
2441 (http::header::CONTENT_TYPE, "application/json".to_string()),
2442 (
2443 http::header::CONTENT_DISPOSITION,
2444 format!("attachment; filename=\"{}\"", file_name),
2445 ),
2446 ],
2447 content,
2448 )
2449 .into_response(),
2450 Err(e) => {
2451 tracing::error!("Failed to download fixture {}: {}", fixture_id, e);
2452 let error_response = format!(r#"{{"error": "Failed to download fixture: {}"}}"#, e);
2453 (
2454 http::StatusCode::NOT_FOUND,
2455 [(http::header::CONTENT_TYPE, "application/json".to_string())],
2456 error_response,
2457 )
2458 .into_response()
2459 }
2460 }
2461}
2462
2463async fn download_fixture_by_id(fixture_id: &str) -> Result<(String, String)> {
2465 let fixtures_dir =
2467 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2468 let fixtures_path = std::path::Path::new(&fixtures_dir);
2469
2470 if !fixtures_path.exists() {
2471 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2472 }
2473
2474 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2475
2476 let file_path_clone = file_path.clone();
2478 let (content, file_name) = tokio::task::spawn_blocking(move || {
2479 let content = std::fs::read_to_string(&file_path_clone)
2480 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2481
2482 let file_name = file_path_clone
2483 .file_name()
2484 .and_then(|name| name.to_str())
2485 .unwrap_or("fixture.json")
2486 .to_string();
2487
2488 Ok::<_, Error>((content, file_name))
2489 })
2490 .await
2491 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2492
2493 tracing::info!("Downloaded fixture file: {} ({} bytes)", file_path.display(), content.len());
2494 Ok((content, file_name))
2495}
2496
2497pub async fn rename_fixture(
2499 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2500 Json(payload): Json<FixtureRenameRequest>,
2501) -> Json<ApiResponse<String>> {
2502 match rename_fixture_by_id(&fixture_id, &payload.new_name).await {
2503 Ok(new_path) => {
2504 tracing::info!("Successfully renamed fixture: {} -> {}", fixture_id, payload.new_name);
2505 Json(ApiResponse::success(format!("Fixture renamed successfully to: {}", new_path)))
2506 }
2507 Err(e) => {
2508 tracing::error!("Failed to rename fixture {}: {}", fixture_id, e);
2509 Json(ApiResponse::error(format!("Failed to rename fixture: {}", e)))
2510 }
2511 }
2512}
2513
2514async fn rename_fixture_by_id(fixture_id: &str, new_name: &str) -> Result<String> {
2516 if new_name.is_empty() {
2518 return Err(Error::generic("New name cannot be empty".to_string()));
2519 }
2520
2521 let new_name = if new_name.ends_with(".json") {
2523 new_name.to_string()
2524 } else {
2525 format!("{}.json", new_name)
2526 };
2527
2528 let fixtures_dir =
2530 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2531 let fixtures_path = std::path::Path::new(&fixtures_dir);
2532
2533 if !fixtures_path.exists() {
2534 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2535 }
2536
2537 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2538
2539 let parent = old_path
2541 .parent()
2542 .ok_or_else(|| Error::generic("Could not determine parent directory".to_string()))?;
2543
2544 let new_path = parent.join(&new_name);
2545
2546 if new_path.exists() {
2548 return Err(Error::generic(format!(
2549 "A fixture with name '{}' already exists in the same directory",
2550 new_name
2551 )));
2552 }
2553
2554 let old_path_clone = old_path.clone();
2556 let new_path_clone = new_path.clone();
2557 tokio::task::spawn_blocking(move || {
2558 std::fs::rename(&old_path_clone, &new_path_clone)
2559 .map_err(|e| Error::generic(format!("Failed to rename fixture file: {}", e)))
2560 })
2561 .await
2562 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2563
2564 tracing::info!("Renamed fixture file: {} -> {}", old_path.display(), new_path.display());
2565
2566 Ok(new_path
2568 .strip_prefix(fixtures_path)
2569 .unwrap_or(&new_path)
2570 .to_string_lossy()
2571 .to_string())
2572}
2573
2574pub async fn move_fixture(
2576 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2577 Json(payload): Json<FixtureMoveRequest>,
2578) -> Json<ApiResponse<String>> {
2579 match move_fixture_by_id(&fixture_id, &payload.new_path).await {
2580 Ok(new_location) => {
2581 tracing::info!("Successfully moved fixture: {} -> {}", fixture_id, payload.new_path);
2582 Json(ApiResponse::success(format!("Fixture moved successfully to: {}", new_location)))
2583 }
2584 Err(e) => {
2585 tracing::error!("Failed to move fixture {}: {}", fixture_id, e);
2586 Json(ApiResponse::error(format!("Failed to move fixture: {}", e)))
2587 }
2588 }
2589}
2590
2591async fn move_fixture_by_id(fixture_id: &str, new_path: &str) -> Result<String> {
2593 if new_path.is_empty() {
2595 return Err(Error::generic("New path cannot be empty".to_string()));
2596 }
2597
2598 let fixtures_dir =
2600 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2601 let fixtures_path = std::path::Path::new(&fixtures_dir);
2602
2603 if !fixtures_path.exists() {
2604 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2605 }
2606
2607 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2608
2609 let new_full_path = if new_path.starts_with('/') {
2611 fixtures_path.join(new_path.trim_start_matches('/'))
2613 } else {
2614 fixtures_path.join(new_path)
2616 };
2617
2618 let new_full_path = if new_full_path.extension().and_then(|s| s.to_str()) == Some("json") {
2620 new_full_path
2621 } else {
2622 if new_full_path.is_dir() || !new_path.contains('.') {
2624 let file_name = old_path.file_name().ok_or_else(|| {
2625 Error::generic("Could not determine original file name".to_string())
2626 })?;
2627 new_full_path.join(file_name)
2628 } else {
2629 new_full_path.with_extension("json")
2630 }
2631 };
2632
2633 if new_full_path.exists() {
2635 return Err(Error::generic(format!(
2636 "A fixture already exists at path: {}",
2637 new_full_path.display()
2638 )));
2639 }
2640
2641 let old_path_clone = old_path.clone();
2643 let new_full_path_clone = new_full_path.clone();
2644 tokio::task::spawn_blocking(move || {
2645 if let Some(parent) = new_full_path_clone.parent() {
2647 std::fs::create_dir_all(parent)
2648 .map_err(|e| Error::generic(format!("Failed to create target directory: {}", e)))?;
2649 }
2650
2651 std::fs::rename(&old_path_clone, &new_full_path_clone)
2653 .map_err(|e| Error::generic(format!("Failed to move fixture file: {}", e)))
2654 })
2655 .await
2656 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2657
2658 tracing::info!("Moved fixture file: {} -> {}", old_path.display(), new_full_path.display());
2659
2660 cleanup_empty_directories(&old_path).await;
2662
2663 Ok(new_full_path
2665 .strip_prefix(fixtures_path)
2666 .unwrap_or(&new_full_path)
2667 .to_string_lossy()
2668 .to_string())
2669}
2670
2671pub async fn get_validation(
2673 State(state): State<AdminState>,
2674) -> Json<ApiResponse<ValidationSettings>> {
2675 let config_state = state.get_config().await;
2677
2678 Json(ApiResponse::success(config_state.validation_settings))
2679}
2680
2681pub async fn update_validation(
2683 State(state): State<AdminState>,
2684 Json(update): Json<ValidationUpdate>,
2685) -> Json<ApiResponse<String>> {
2686 match update.mode.as_str() {
2688 "enforce" | "warn" | "off" => {}
2689 _ => {
2690 return Json(ApiResponse::error(
2691 "Invalid validation mode. Must be 'enforce', 'warn', or 'off'".to_string(),
2692 ))
2693 }
2694 }
2695
2696 let mode = update.mode.clone();
2698 state
2699 .update_validation_config(
2700 update.mode,
2701 update.aggregate_errors,
2702 update.validate_responses,
2703 update.overrides.unwrap_or_default(),
2704 )
2705 .await;
2706
2707 tracing::info!(
2708 "Updated validation settings: mode={}, aggregate_errors={}",
2709 mode,
2710 update.aggregate_errors
2711 );
2712
2713 Json(ApiResponse::success("Validation settings updated".to_string()))
2714}
2715
2716pub async fn get_env_vars() -> Json<ApiResponse<HashMap<String, String>>> {
2718 let mut env_vars = HashMap::new();
2720
2721 let relevant_vars = [
2722 "MOCKFORGE_LATENCY_ENABLED",
2724 "MOCKFORGE_FAILURES_ENABLED",
2725 "MOCKFORGE_PROXY_ENABLED",
2726 "MOCKFORGE_RECORD_ENABLED",
2727 "MOCKFORGE_REPLAY_ENABLED",
2728 "MOCKFORGE_LOG_LEVEL",
2729 "MOCKFORGE_CONFIG_FILE",
2730 "RUST_LOG",
2731 "MOCKFORGE_HTTP_PORT",
2733 "MOCKFORGE_HTTP_HOST",
2734 "MOCKFORGE_HTTP_OPENAPI_SPEC",
2735 "MOCKFORGE_CORS_ENABLED",
2736 "MOCKFORGE_REQUEST_TIMEOUT_SECS",
2737 "MOCKFORGE_WS_PORT",
2739 "MOCKFORGE_WS_HOST",
2740 "MOCKFORGE_WS_REPLAY_FILE",
2741 "MOCKFORGE_WS_CONNECTION_TIMEOUT_SECS",
2742 "MOCKFORGE_GRPC_PORT",
2744 "MOCKFORGE_GRPC_HOST",
2745 "MOCKFORGE_ADMIN_ENABLED",
2747 "MOCKFORGE_ADMIN_PORT",
2748 "MOCKFORGE_ADMIN_HOST",
2749 "MOCKFORGE_ADMIN_MOUNT_PATH",
2750 "MOCKFORGE_ADMIN_API_ENABLED",
2751 "MOCKFORGE_RESPONSE_TEMPLATE_EXPAND",
2753 "MOCKFORGE_REQUEST_VALIDATION",
2754 "MOCKFORGE_AGGREGATE_ERRORS",
2755 "MOCKFORGE_RESPONSE_VALIDATION",
2756 "MOCKFORGE_VALIDATION_STATUS",
2757 "MOCKFORGE_RAG_ENABLED",
2759 "MOCKFORGE_FAKE_TOKENS",
2760 "MOCKFORGE_FIXTURES_DIR",
2762 ];
2763
2764 for var_name in &relevant_vars {
2765 if let Ok(value) = std::env::var(var_name) {
2766 env_vars.insert(var_name.to_string(), value);
2767 }
2768 }
2769
2770 Json(ApiResponse::success(env_vars))
2771}
2772
2773pub async fn update_env_var(Json(update): Json<EnvVarUpdate>) -> Json<ApiResponse<String>> {
2775 std::env::set_var(&update.key, &update.value);
2777
2778 tracing::info!("Updated environment variable: {}={}", update.key, update.value);
2779
2780 Json(ApiResponse::success(format!(
2783 "Environment variable {} updated to '{}'. Note: This change is not persisted and will be lost on restart.",
2784 update.key, update.value
2785 )))
2786}
2787
2788pub async fn get_file_content(
2790 Json(request): Json<FileContentRequest>,
2791) -> Json<ApiResponse<String>> {
2792 if let Err(e) = validate_file_path(&request.file_path) {
2794 return Json(ApiResponse::error(format!("Invalid file path: {}", e)));
2795 }
2796
2797 match tokio::fs::read_to_string(&request.file_path).await {
2799 Ok(content) => {
2800 if let Err(e) = validate_file_content(&content) {
2802 return Json(ApiResponse::error(format!("Invalid file content: {}", e)));
2803 }
2804 Json(ApiResponse::success(content))
2805 }
2806 Err(e) => Json(ApiResponse::error(format!("Failed to read file: {}", e))),
2807 }
2808}
2809
2810pub async fn save_file_content(Json(request): Json<FileSaveRequest>) -> Json<ApiResponse<String>> {
2812 match save_file_to_filesystem(&request.file_path, &request.content).await {
2813 Ok(_) => {
2814 tracing::info!("Successfully saved file: {}", request.file_path);
2815 Json(ApiResponse::success("File saved successfully".to_string()))
2816 }
2817 Err(e) => {
2818 tracing::error!("Failed to save file {}: {}", request.file_path, e);
2819 Json(ApiResponse::error(format!("Failed to save file: {}", e)))
2820 }
2821 }
2822}
2823
2824async fn save_file_to_filesystem(file_path: &str, content: &str) -> Result<()> {
2826 validate_file_path(file_path)?;
2828
2829 validate_file_content(content)?;
2831
2832 let path = std::path::PathBuf::from(file_path);
2834 let content = content.to_string();
2835
2836 let path_clone = path.clone();
2838 let content_clone = content.clone();
2839 tokio::task::spawn_blocking(move || {
2840 if let Some(parent) = path_clone.parent() {
2842 std::fs::create_dir_all(parent).map_err(|e| {
2843 Error::generic(format!("Failed to create directory {}: {}", parent.display(), e))
2844 })?;
2845 }
2846
2847 std::fs::write(&path_clone, &content_clone).map_err(|e| {
2849 Error::generic(format!("Failed to write file {}: {}", path_clone.display(), e))
2850 })?;
2851
2852 let written_content = std::fs::read_to_string(&path_clone).map_err(|e| {
2854 Error::generic(format!("Failed to verify written file {}: {}", path_clone.display(), e))
2855 })?;
2856
2857 if written_content != content_clone {
2858 return Err(Error::generic(format!(
2859 "File content verification failed for {}",
2860 path_clone.display()
2861 )));
2862 }
2863
2864 Ok::<_, Error>(())
2865 })
2866 .await
2867 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2868
2869 tracing::info!("File saved successfully: {} ({} bytes)", path.display(), content.len());
2870 Ok(())
2871}
2872
2873fn validate_file_path(file_path: &str) -> Result<()> {
2875 if file_path.contains("..") {
2877 return Err(Error::generic("Path traversal detected in file path".to_string()));
2878 }
2879
2880 let path = std::path::Path::new(file_path);
2882 if path.is_absolute() {
2883 let allowed_dirs = [
2885 std::env::current_dir().unwrap_or_default(),
2886 std::path::PathBuf::from("."),
2887 std::path::PathBuf::from("fixtures"),
2888 std::path::PathBuf::from("config"),
2889 ];
2890
2891 let mut is_allowed = false;
2892 for allowed_dir in &allowed_dirs {
2893 if path.starts_with(allowed_dir) {
2894 is_allowed = true;
2895 break;
2896 }
2897 }
2898
2899 if !is_allowed {
2900 return Err(Error::generic("File path is outside allowed directories".to_string()));
2901 }
2902 }
2903
2904 let dangerous_extensions = ["exe", "bat", "cmd", "sh", "ps1", "scr", "com"];
2906 if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
2907 if dangerous_extensions.contains(&extension.to_lowercase().as_str()) {
2908 return Err(Error::generic(format!(
2909 "Dangerous file extension not allowed: {}",
2910 extension
2911 )));
2912 }
2913 }
2914
2915 Ok(())
2916}
2917
2918fn validate_file_content(content: &str) -> Result<()> {
2920 if content.len() > 10 * 1024 * 1024 {
2922 return Err(Error::generic("File content too large (max 10MB)".to_string()));
2924 }
2925
2926 if content.contains('\0') {
2928 return Err(Error::generic("File content contains null bytes".to_string()));
2929 }
2930
2931 Ok(())
2932}
2933
2934#[derive(Debug, Clone, Serialize, Deserialize)]
2936pub struct FixtureDeleteRequest {
2937 pub fixture_id: String,
2938}
2939
2940#[derive(Debug, Clone, Serialize, Deserialize)]
2942pub struct EnvVarUpdate {
2943 pub key: String,
2944 pub value: String,
2945}
2946
2947#[derive(Debug, Clone, Serialize, Deserialize)]
2949pub struct FixtureBulkDeleteRequest {
2950 pub fixture_ids: Vec<String>,
2951}
2952
2953#[derive(Debug, Clone, Serialize, Deserialize)]
2955pub struct FixtureBulkDeleteResult {
2956 pub deleted_count: usize,
2957 pub total_requested: usize,
2958 pub errors: Vec<String>,
2959}
2960
2961#[derive(Debug, Clone, Serialize, Deserialize)]
2963pub struct FixtureRenameRequest {
2964 pub new_name: String,
2965}
2966
2967#[derive(Debug, Clone, Serialize, Deserialize)]
2969pub struct FixtureMoveRequest {
2970 pub new_path: String,
2971}
2972
2973#[derive(Debug, Clone, Serialize, Deserialize)]
2975pub struct FileContentRequest {
2976 pub file_path: String,
2977 pub file_type: String,
2978}
2979
2980#[derive(Debug, Clone, Serialize, Deserialize)]
2982pub struct FileSaveRequest {
2983 pub file_path: String,
2984 pub content: String,
2985}
2986
2987pub async fn get_smoke_tests(
2989 State(state): State<AdminState>,
2990) -> Json<ApiResponse<Vec<SmokeTestResult>>> {
2991 let results = state.get_smoke_test_results().await;
2992 Json(ApiResponse::success(results))
2993}
2994
2995pub async fn run_smoke_tests_endpoint(
2997 State(state): State<AdminState>,
2998) -> Json<ApiResponse<String>> {
2999 tracing::info!("Starting smoke test execution");
3000
3001 let state_clone = state.clone();
3003 tokio::spawn(async move {
3004 if let Err(e) = execute_smoke_tests(&state_clone).await {
3005 tracing::error!("Smoke test execution failed: {}", e);
3006 } else {
3007 tracing::info!("Smoke test execution completed successfully");
3008 }
3009 });
3010
3011 Json(ApiResponse::success(
3012 "Smoke tests started. Check results in the smoke tests section.".to_string(),
3013 ))
3014}
3015
3016async fn execute_smoke_tests(state: &AdminState) -> Result<()> {
3018 let base_url =
3020 std::env::var("MOCKFORGE_BASE_URL").unwrap_or_else(|_| "http://localhost:3000".to_string());
3021
3022 let context = SmokeTestContext {
3023 base_url,
3024 timeout_seconds: 30,
3025 parallel: true,
3026 };
3027
3028 let fixtures = scan_fixtures_directory()?;
3030
3031 let http_fixtures: Vec<&FixtureInfo> =
3033 fixtures.iter().filter(|f| f.protocol == "http").collect();
3034
3035 if http_fixtures.is_empty() {
3036 tracing::warn!("No HTTP fixtures found for smoke testing");
3037 return Ok(());
3038 }
3039
3040 tracing::info!("Running smoke tests for {} HTTP fixtures", http_fixtures.len());
3041
3042 let mut test_results = Vec::new();
3044
3045 for fixture in http_fixtures {
3046 let test_result = create_smoke_test_from_fixture(fixture);
3047 test_results.push(test_result);
3048 }
3049
3050 let mut executed_results = Vec::new();
3052 for mut test_result in test_results {
3053 test_result.status = "running".to_string();
3055 state.update_smoke_test_result(test_result.clone()).await;
3056
3057 let start_time = std::time::Instant::now();
3059 match execute_single_smoke_test(&test_result, &context).await {
3060 Ok((status_code, response_time_ms)) => {
3061 test_result.status = "passed".to_string();
3062 test_result.status_code = Some(status_code);
3063 test_result.response_time_ms = Some(response_time_ms);
3064 test_result.error_message = None;
3065 }
3066 Err(e) => {
3067 test_result.status = "failed".to_string();
3068 test_result.error_message = Some(e.to_string());
3069 test_result.status_code = None;
3070 test_result.response_time_ms = None;
3071 }
3072 }
3073
3074 let duration = start_time.elapsed();
3075 test_result.duration_seconds = Some(duration.as_secs_f64());
3076 test_result.last_run = Some(chrono::Utc::now());
3077
3078 executed_results.push(test_result.clone());
3079 state.update_smoke_test_result(test_result).await;
3080 }
3081
3082 tracing::info!("Smoke test execution completed: {} tests run", executed_results.len());
3083 Ok(())
3084}
3085
3086fn create_smoke_test_from_fixture(fixture: &FixtureInfo) -> SmokeTestResult {
3088 let test_name = format!("{} {}", fixture.method, fixture.path);
3089 let description = format!("Smoke test for {} endpoint", fixture.path);
3090
3091 SmokeTestResult {
3092 id: format!("smoke_{}", fixture.id),
3093 name: test_name,
3094 method: fixture.method.clone(),
3095 path: fixture.path.clone(),
3096 description,
3097 last_run: None,
3098 status: "pending".to_string(),
3099 response_time_ms: None,
3100 error_message: None,
3101 status_code: None,
3102 duration_seconds: None,
3103 }
3104}
3105
3106async fn execute_single_smoke_test(
3108 test: &SmokeTestResult,
3109 context: &SmokeTestContext,
3110) -> Result<(u16, u64)> {
3111 let url = format!("{}{}", context.base_url, test.path);
3112 let client = reqwest::Client::builder()
3113 .timeout(std::time::Duration::from_secs(context.timeout_seconds))
3114 .build()
3115 .map_err(|e| Error::generic(format!("Failed to create HTTP client: {}", e)))?;
3116
3117 let start_time = std::time::Instant::now();
3118
3119 let response = match test.method.as_str() {
3120 "GET" => client.get(&url).send().await,
3121 "POST" => client.post(&url).send().await,
3122 "PUT" => client.put(&url).send().await,
3123 "DELETE" => client.delete(&url).send().await,
3124 "PATCH" => client.patch(&url).send().await,
3125 "HEAD" => client.head(&url).send().await,
3126 "OPTIONS" => client.request(reqwest::Method::OPTIONS, &url).send().await,
3127 _ => {
3128 return Err(Error::generic(format!("Unsupported HTTP method: {}", test.method)));
3129 }
3130 };
3131
3132 let response_time = start_time.elapsed();
3133 let response_time_ms = response_time.as_millis() as u64;
3134
3135 match response {
3136 Ok(resp) => {
3137 let status_code = resp.status().as_u16();
3138 if (200..400).contains(&status_code) {
3139 Ok((status_code, response_time_ms))
3140 } else {
3141 Err(Error::generic(format!(
3142 "HTTP error: {} {}",
3143 status_code,
3144 resp.status().canonical_reason().unwrap_or("Unknown")
3145 )))
3146 }
3147 }
3148 Err(e) => Err(Error::generic(format!("Request failed: {}", e))),
3149 }
3150}
3151
3152pub async fn install_plugin(Json(request): Json<serde_json::Value>) -> impl IntoResponse {
3154 let source = request.get("source").and_then(|s| s.as_str()).unwrap_or("");
3156
3157 if source.is_empty() {
3158 return Json(json!({
3159 "success": false,
3160 "error": "Plugin source is required"
3161 }));
3162 }
3163
3164 let plugin_path = if source.starts_with("http://") || source.starts_with("https://") {
3166 match download_plugin_from_url(source).await {
3168 Ok(temp_path) => temp_path,
3169 Err(e) => {
3170 return Json(json!({
3171 "success": false,
3172 "error": format!("Failed to download plugin: {}", e)
3173 }))
3174 }
3175 }
3176 } else {
3177 std::path::PathBuf::from(source)
3179 };
3180
3181 if !plugin_path.exists() {
3183 return Json(json!({
3184 "success": false,
3185 "error": format!("Plugin file not found: {}", source)
3186 }));
3187 }
3188
3189 Json(json!({
3191 "success": true,
3192 "message": format!("Plugin would be installed from: {}", source)
3193 }))
3194}
3195
3196async fn download_plugin_from_url(url: &str) -> Result<std::path::PathBuf> {
3198 let temp_file =
3200 std::env::temp_dir().join(format!("plugin_{}.tmp", chrono::Utc::now().timestamp()));
3201 let temp_path = temp_file.clone();
3202
3203 let response = reqwest::get(url)
3205 .await
3206 .map_err(|e| Error::generic(format!("Failed to download from URL: {}", e)))?;
3207
3208 if !response.status().is_success() {
3209 return Err(Error::generic(format!(
3210 "HTTP error {}: {}",
3211 response.status().as_u16(),
3212 response.status().canonical_reason().unwrap_or("Unknown")
3213 )));
3214 }
3215
3216 let bytes = response
3218 .bytes()
3219 .await
3220 .map_err(|e| Error::generic(format!("Failed to read response: {}", e)))?;
3221
3222 tokio::fs::write(&temp_file, &bytes)
3224 .await
3225 .map_err(|e| Error::generic(format!("Failed to write temporary file: {}", e)))?;
3226
3227 Ok(temp_path)
3228}
3229
3230pub async fn serve_icon() -> impl IntoResponse {
3231 ([(http::header::CONTENT_TYPE, "image/png")], "")
3233}
3234
3235pub async fn serve_icon_32() -> impl IntoResponse {
3236 ([(http::header::CONTENT_TYPE, "image/png")], "")
3237}
3238
3239pub async fn serve_icon_48() -> impl IntoResponse {
3240 ([(http::header::CONTENT_TYPE, "image/png")], "")
3241}
3242
3243pub async fn serve_logo() -> impl IntoResponse {
3244 ([(http::header::CONTENT_TYPE, "image/png")], "")
3245}
3246
3247pub async fn serve_logo_40() -> impl IntoResponse {
3248 ([(http::header::CONTENT_TYPE, "image/png")], "")
3249}
3250
3251pub async fn serve_logo_80() -> impl IntoResponse {
3252 ([(http::header::CONTENT_TYPE, "image/png")], "")
3253}
3254
3255pub async fn update_traffic_shaping(
3257 State(_state): State<AdminState>,
3258 Json(_config): Json<serde_json::Value>,
3259) -> Json<ApiResponse<String>> {
3260 Json(ApiResponse::success("Traffic shaping updated".to_string()))
3261}
3262
3263pub async fn import_postman(
3264 State(state): State<AdminState>,
3265 Json(request): Json<serde_json::Value>,
3266) -> Json<ApiResponse<String>> {
3267 use mockforge_core::workspace_import::{import_postman_to_workspace, WorkspaceImportConfig};
3268 use uuid::Uuid;
3269
3270 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3271 let filename = request.get("filename").and_then(|v| v.as_str());
3272 let environment = request.get("environment").and_then(|v| v.as_str());
3273 let base_url = request.get("base_url").and_then(|v| v.as_str());
3274
3275 let import_result = match mockforge_core::import::import_postman_collection(content, base_url) {
3277 Ok(result) => result,
3278 Err(e) => {
3279 let entry = ImportHistoryEntry {
3281 id: Uuid::new_v4().to_string(),
3282 format: "postman".to_string(),
3283 timestamp: chrono::Utc::now(),
3284 routes_count: 0,
3285 variables_count: 0,
3286 warnings_count: 0,
3287 success: false,
3288 filename: filename.map(|s| s.to_string()),
3289 environment: environment.map(|s| s.to_string()),
3290 base_url: base_url.map(|s| s.to_string()),
3291 error_message: Some(e.clone()),
3292 };
3293 let mut history = state.import_history.write().await;
3294 history.push(entry);
3295
3296 return Json(ApiResponse::error(format!("Postman import failed: {}", e)));
3297 }
3298 };
3299
3300 let workspace_name = filename
3302 .and_then(|f| f.split('.').next())
3303 .unwrap_or("Imported Postman Collection");
3304
3305 let config = WorkspaceImportConfig {
3306 create_folders: true,
3307 base_folder_name: None,
3308 preserve_hierarchy: true,
3309 max_depth: 5,
3310 };
3311
3312 let routes: Vec<ImportRoute> = import_result
3314 .routes
3315 .into_iter()
3316 .map(|route| ImportRoute {
3317 method: route.method,
3318 path: route.path,
3319 headers: route.headers,
3320 body: route.body,
3321 response: ImportResponse {
3322 status: route.response.status,
3323 headers: route.response.headers,
3324 body: route.response.body,
3325 },
3326 })
3327 .collect();
3328
3329 match import_postman_to_workspace(routes, workspace_name.to_string(), config) {
3330 Ok(workspace_result) => {
3331 if let Err(e) =
3333 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3334 {
3335 tracing::error!("Failed to save workspace: {}", e);
3336 return Json(ApiResponse::error(format!(
3337 "Import succeeded but failed to save workspace: {}",
3338 e
3339 )));
3340 }
3341
3342 let entry = ImportHistoryEntry {
3344 id: Uuid::new_v4().to_string(),
3345 format: "postman".to_string(),
3346 timestamp: chrono::Utc::now(),
3347 routes_count: workspace_result.request_count,
3348 variables_count: import_result.variables.len(),
3349 warnings_count: workspace_result.warnings.len(),
3350 success: true,
3351 filename: filename.map(|s| s.to_string()),
3352 environment: environment.map(|s| s.to_string()),
3353 base_url: base_url.map(|s| s.to_string()),
3354 error_message: None,
3355 };
3356 let mut history = state.import_history.write().await;
3357 history.push(entry);
3358
3359 Json(ApiResponse::success(format!(
3360 "Successfully imported {} routes into workspace '{}'",
3361 workspace_result.request_count, workspace_name
3362 )))
3363 }
3364 Err(e) => {
3365 let entry = ImportHistoryEntry {
3367 id: Uuid::new_v4().to_string(),
3368 format: "postman".to_string(),
3369 timestamp: chrono::Utc::now(),
3370 routes_count: 0,
3371 variables_count: 0,
3372 warnings_count: 0,
3373 success: false,
3374 filename: filename.map(|s| s.to_string()),
3375 environment: environment.map(|s| s.to_string()),
3376 base_url: base_url.map(|s| s.to_string()),
3377 error_message: Some(e.to_string()),
3378 };
3379 let mut history = state.import_history.write().await;
3380 history.push(entry);
3381
3382 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3383 }
3384 }
3385}
3386
3387pub async fn import_insomnia(
3388 State(state): State<AdminState>,
3389 Json(request): Json<serde_json::Value>,
3390) -> Json<ApiResponse<String>> {
3391 use uuid::Uuid;
3392
3393 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3394 let filename = request.get("filename").and_then(|v| v.as_str());
3395 let environment = request.get("environment").and_then(|v| v.as_str());
3396 let base_url = request.get("base_url").and_then(|v| v.as_str());
3397
3398 let import_result = match mockforge_core::import::import_insomnia_export(content, environment) {
3400 Ok(result) => result,
3401 Err(e) => {
3402 let entry = ImportHistoryEntry {
3404 id: Uuid::new_v4().to_string(),
3405 format: "insomnia".to_string(),
3406 timestamp: chrono::Utc::now(),
3407 routes_count: 0,
3408 variables_count: 0,
3409 warnings_count: 0,
3410 success: false,
3411 filename: filename.map(|s| s.to_string()),
3412 environment: environment.map(|s| s.to_string()),
3413 base_url: base_url.map(|s| s.to_string()),
3414 error_message: Some(e.clone()),
3415 };
3416 let mut history = state.import_history.write().await;
3417 history.push(entry);
3418
3419 return Json(ApiResponse::error(format!("Insomnia import failed: {}", e)));
3420 }
3421 };
3422
3423 let workspace_name = filename
3425 .and_then(|f| f.split('.').next())
3426 .unwrap_or("Imported Insomnia Collection");
3427
3428 let _config = WorkspaceImportConfig {
3429 create_folders: true,
3430 base_folder_name: None,
3431 preserve_hierarchy: true,
3432 max_depth: 5,
3433 };
3434
3435 let variables_count = import_result.variables.len();
3437
3438 match mockforge_core::workspace_import::create_workspace_from_insomnia(
3439 import_result,
3440 Some(workspace_name.to_string()),
3441 ) {
3442 Ok(workspace_result) => {
3443 if let Err(e) =
3445 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3446 {
3447 tracing::error!("Failed to save workspace: {}", e);
3448 return Json(ApiResponse::error(format!(
3449 "Import succeeded but failed to save workspace: {}",
3450 e
3451 )));
3452 }
3453
3454 let entry = ImportHistoryEntry {
3456 id: Uuid::new_v4().to_string(),
3457 format: "insomnia".to_string(),
3458 timestamp: chrono::Utc::now(),
3459 routes_count: workspace_result.request_count,
3460 variables_count,
3461 warnings_count: workspace_result.warnings.len(),
3462 success: true,
3463 filename: filename.map(|s| s.to_string()),
3464 environment: environment.map(|s| s.to_string()),
3465 base_url: base_url.map(|s| s.to_string()),
3466 error_message: None,
3467 };
3468 let mut history = state.import_history.write().await;
3469 history.push(entry);
3470
3471 Json(ApiResponse::success(format!(
3472 "Successfully imported {} routes into workspace '{}'",
3473 workspace_result.request_count, workspace_name
3474 )))
3475 }
3476 Err(e) => {
3477 let entry = ImportHistoryEntry {
3479 id: Uuid::new_v4().to_string(),
3480 format: "insomnia".to_string(),
3481 timestamp: chrono::Utc::now(),
3482 routes_count: 0,
3483 variables_count: 0,
3484 warnings_count: 0,
3485 success: false,
3486 filename: filename.map(|s| s.to_string()),
3487 environment: environment.map(|s| s.to_string()),
3488 base_url: base_url.map(|s| s.to_string()),
3489 error_message: Some(e.to_string()),
3490 };
3491 let mut history = state.import_history.write().await;
3492 history.push(entry);
3493
3494 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3495 }
3496 }
3497}
3498
3499pub async fn import_openapi(
3500 State(_state): State<AdminState>,
3501 Json(_request): Json<serde_json::Value>,
3502) -> Json<ApiResponse<String>> {
3503 Json(ApiResponse::success("OpenAPI import completed".to_string()))
3504}
3505
3506pub async fn import_curl(
3507 State(state): State<AdminState>,
3508 Json(request): Json<serde_json::Value>,
3509) -> Json<ApiResponse<String>> {
3510 use uuid::Uuid;
3511
3512 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3513 let filename = request.get("filename").and_then(|v| v.as_str());
3514 let base_url = request.get("base_url").and_then(|v| v.as_str());
3515
3516 let import_result = match mockforge_core::import::import_curl_commands(content, base_url) {
3518 Ok(result) => result,
3519 Err(e) => {
3520 let entry = ImportHistoryEntry {
3522 id: Uuid::new_v4().to_string(),
3523 format: "curl".to_string(),
3524 timestamp: chrono::Utc::now(),
3525 routes_count: 0,
3526 variables_count: 0,
3527 warnings_count: 0,
3528 success: false,
3529 filename: filename.map(|s| s.to_string()),
3530 environment: None,
3531 base_url: base_url.map(|s| s.to_string()),
3532 error_message: Some(e.clone()),
3533 };
3534 let mut history = state.import_history.write().await;
3535 history.push(entry);
3536
3537 return Json(ApiResponse::error(format!("Curl import failed: {}", e)));
3538 }
3539 };
3540
3541 let workspace_name =
3543 filename.and_then(|f| f.split('.').next()).unwrap_or("Imported Curl Commands");
3544
3545 match mockforge_core::workspace_import::create_workspace_from_curl(
3546 import_result,
3547 Some(workspace_name.to_string()),
3548 ) {
3549 Ok(workspace_result) => {
3550 if let Err(e) =
3552 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3553 {
3554 tracing::error!("Failed to save workspace: {}", e);
3555 return Json(ApiResponse::error(format!(
3556 "Import succeeded but failed to save workspace: {}",
3557 e
3558 )));
3559 }
3560
3561 let entry = ImportHistoryEntry {
3563 id: Uuid::new_v4().to_string(),
3564 format: "curl".to_string(),
3565 timestamp: chrono::Utc::now(),
3566 routes_count: workspace_result.request_count,
3567 variables_count: 0, warnings_count: workspace_result.warnings.len(),
3569 success: true,
3570 filename: filename.map(|s| s.to_string()),
3571 environment: None,
3572 base_url: base_url.map(|s| s.to_string()),
3573 error_message: None,
3574 };
3575 let mut history = state.import_history.write().await;
3576 history.push(entry);
3577
3578 Json(ApiResponse::success(format!(
3579 "Successfully imported {} routes into workspace '{}'",
3580 workspace_result.request_count, workspace_name
3581 )))
3582 }
3583 Err(e) => {
3584 let entry = ImportHistoryEntry {
3586 id: Uuid::new_v4().to_string(),
3587 format: "curl".to_string(),
3588 timestamp: chrono::Utc::now(),
3589 routes_count: 0,
3590 variables_count: 0,
3591 warnings_count: 0,
3592 success: false,
3593 filename: filename.map(|s| s.to_string()),
3594 environment: None,
3595 base_url: base_url.map(|s| s.to_string()),
3596 error_message: Some(e.to_string()),
3597 };
3598 let mut history = state.import_history.write().await;
3599 history.push(entry);
3600
3601 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3602 }
3603 }
3604}
3605
3606pub async fn preview_import(
3607 State(_state): State<AdminState>,
3608 Json(request): Json<serde_json::Value>,
3609) -> Json<ApiResponse<serde_json::Value>> {
3610 use mockforge_core::import::{
3611 import_curl_commands, import_insomnia_export, import_postman_collection,
3612 };
3613
3614 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3615 let filename = request.get("filename").and_then(|v| v.as_str());
3616 let environment = request.get("environment").and_then(|v| v.as_str());
3617 let base_url = request.get("base_url").and_then(|v| v.as_str());
3618
3619 let format = if let Some(fname) = filename {
3621 if fname.to_lowercase().contains("postman")
3622 || fname.to_lowercase().ends_with(".postman_collection")
3623 {
3624 "postman"
3625 } else if fname.to_lowercase().contains("insomnia")
3626 || fname.to_lowercase().ends_with(".insomnia")
3627 {
3628 "insomnia"
3629 } else if fname.to_lowercase().contains("curl")
3630 || fname.to_lowercase().ends_with(".sh")
3631 || fname.to_lowercase().ends_with(".curl")
3632 {
3633 "curl"
3634 } else {
3635 "unknown"
3636 }
3637 } else {
3638 "unknown"
3639 };
3640
3641 match format {
3642 "postman" => match import_postman_collection(content, base_url) {
3643 Ok(import_result) => {
3644 let routes: Vec<serde_json::Value> = import_result
3645 .routes
3646 .into_iter()
3647 .map(|route| {
3648 serde_json::json!({
3649 "method": route.method,
3650 "path": route.path,
3651 "headers": route.headers,
3652 "body": route.body,
3653 "status_code": route.response.status,
3654 "response": serde_json::json!({
3655 "status": route.response.status,
3656 "headers": route.response.headers,
3657 "body": route.response.body
3658 })
3659 })
3660 })
3661 .collect();
3662
3663 let response = serde_json::json!({
3664 "routes": routes,
3665 "variables": import_result.variables,
3666 "warnings": import_result.warnings
3667 });
3668
3669 Json(ApiResponse::success(response))
3670 }
3671 Err(e) => Json(ApiResponse::error(format!("Postman import failed: {}", e))),
3672 },
3673 "insomnia" => match import_insomnia_export(content, environment) {
3674 Ok(import_result) => {
3675 let routes: Vec<serde_json::Value> = import_result
3676 .routes
3677 .into_iter()
3678 .map(|route| {
3679 serde_json::json!({
3680 "method": route.method,
3681 "path": route.path,
3682 "headers": route.headers,
3683 "body": route.body,
3684 "status_code": route.response.status,
3685 "response": serde_json::json!({
3686 "status": route.response.status,
3687 "headers": route.response.headers,
3688 "body": route.response.body
3689 })
3690 })
3691 })
3692 .collect();
3693
3694 let response = serde_json::json!({
3695 "routes": routes,
3696 "variables": import_result.variables,
3697 "warnings": import_result.warnings
3698 });
3699
3700 Json(ApiResponse::success(response))
3701 }
3702 Err(e) => Json(ApiResponse::error(format!("Insomnia import failed: {}", e))),
3703 },
3704 "curl" => match import_curl_commands(content, base_url) {
3705 Ok(import_result) => {
3706 let routes: Vec<serde_json::Value> = import_result
3707 .routes
3708 .into_iter()
3709 .map(|route| {
3710 serde_json::json!({
3711 "method": route.method,
3712 "path": route.path,
3713 "headers": route.headers,
3714 "body": route.body,
3715 "status_code": route.response.status,
3716 "response": serde_json::json!({
3717 "status": route.response.status,
3718 "headers": route.response.headers,
3719 "body": route.response.body
3720 })
3721 })
3722 })
3723 .collect();
3724
3725 let response = serde_json::json!({
3726 "routes": routes,
3727 "variables": serde_json::json!({}),
3728 "warnings": import_result.warnings
3729 });
3730
3731 Json(ApiResponse::success(response))
3732 }
3733 Err(e) => Json(ApiResponse::error(format!("Curl import failed: {}", e))),
3734 },
3735 _ => Json(ApiResponse::error("Unsupported import format".to_string())),
3736 }
3737}
3738
3739pub async fn get_import_history(
3740 State(state): State<AdminState>,
3741) -> Json<ApiResponse<serde_json::Value>> {
3742 let history = state.import_history.read().await;
3743 let total = history.len();
3744
3745 let imports: Vec<serde_json::Value> = history
3746 .iter()
3747 .rev()
3748 .take(50)
3749 .map(|entry| {
3750 serde_json::json!({
3751 "id": entry.id,
3752 "format": entry.format,
3753 "timestamp": entry.timestamp.to_rfc3339(),
3754 "routes_count": entry.routes_count,
3755 "variables_count": entry.variables_count,
3756 "warnings_count": entry.warnings_count,
3757 "success": entry.success,
3758 "filename": entry.filename,
3759 "environment": entry.environment,
3760 "base_url": entry.base_url,
3761 "error_message": entry.error_message
3762 })
3763 })
3764 .collect();
3765
3766 let response = serde_json::json!({
3767 "imports": imports,
3768 "total": total
3769 });
3770
3771 Json(ApiResponse::success(response))
3772}
3773
3774pub async fn get_admin_api_state(
3775 State(_state): State<AdminState>,
3776) -> Json<ApiResponse<serde_json::Value>> {
3777 Json(ApiResponse::success(serde_json::json!({
3778 "status": "active"
3779 })))
3780}
3781
3782pub async fn get_admin_api_replay(
3783 State(_state): State<AdminState>,
3784) -> Json<ApiResponse<serde_json::Value>> {
3785 Json(ApiResponse::success(serde_json::json!({
3786 "replay": []
3787 })))
3788}
3789
3790pub async fn get_sse_status(
3791 State(_state): State<AdminState>,
3792) -> Json<ApiResponse<serde_json::Value>> {
3793 Json(ApiResponse::success(serde_json::json!({
3794 "available": true,
3795 "endpoint": "/sse",
3796 "config": {
3797 "event_type": "status",
3798 "interval_ms": 1000,
3799 "data_template": "{}"
3800 }
3801 })))
3802}
3803
3804pub async fn get_sse_connections(
3805 State(_state): State<AdminState>,
3806) -> Json<ApiResponse<serde_json::Value>> {
3807 Json(ApiResponse::success(serde_json::json!({
3808 "active_connections": 0
3809 })))
3810}
3811
3812pub async fn get_workspaces(
3814 State(_state): State<AdminState>,
3815) -> Json<ApiResponse<Vec<serde_json::Value>>> {
3816 Json(ApiResponse::success(vec![]))
3817}
3818
3819pub async fn create_workspace(
3820 State(_state): State<AdminState>,
3821 Json(_request): Json<serde_json::Value>,
3822) -> Json<ApiResponse<String>> {
3823 Json(ApiResponse::success("Workspace created".to_string()))
3824}
3825
3826pub async fn open_workspace_from_directory(
3827 State(_state): State<AdminState>,
3828 Json(_request): Json<serde_json::Value>,
3829) -> Json<ApiResponse<String>> {
3830 Json(ApiResponse::success("Workspace opened from directory".to_string()))
3831}
3832
3833pub async fn get_reality_level(
3837 State(state): State<AdminState>,
3838) -> Json<ApiResponse<serde_json::Value>> {
3839 let engine = state.reality_engine.read().await;
3840 let level = engine.get_level().await;
3841 let config = engine.get_config().await;
3842
3843 Json(ApiResponse::success(serde_json::json!({
3844 "level": level.value(),
3845 "level_name": level.name(),
3846 "description": level.description(),
3847 "chaos": {
3848 "enabled": config.chaos.enabled,
3849 "error_rate": config.chaos.error_rate,
3850 "delay_rate": config.chaos.delay_rate,
3851 },
3852 "latency": {
3853 "base_ms": config.latency.base_ms,
3854 "jitter_ms": config.latency.jitter_ms,
3855 },
3856 "mockai": {
3857 "enabled": config.mockai.enabled,
3858 },
3859 })))
3860}
3861
3862#[derive(Deserialize)]
3864pub struct SetRealityLevelRequest {
3865 level: u8,
3866}
3867
3868pub async fn set_reality_level(
3869 State(state): State<AdminState>,
3870 Json(request): Json<SetRealityLevelRequest>,
3871) -> Json<ApiResponse<serde_json::Value>> {
3872 let level = match mockforge_core::RealityLevel::from_value(request.level) {
3873 Some(l) => l,
3874 None => {
3875 return Json(ApiResponse::error(format!(
3876 "Invalid reality level: {}. Must be between 1 and 5.",
3877 request.level
3878 )));
3879 }
3880 };
3881
3882 let mut engine = state.reality_engine.write().await;
3884 engine.set_level(level).await;
3885 let config = engine.get_config().await;
3886 drop(engine); let mut update_errors = Vec::new();
3890
3891 if let Some(ref chaos_api_state) = state.chaos_api_state {
3893 use mockforge_chaos::config::ChaosConfig;
3894 let mut chaos_config = chaos_api_state.config.write().await;
3895
3896 use mockforge_chaos::config::{FaultInjectionConfig, LatencyConfig};
3899
3900 let latency_config = if config.latency.base_ms > 0 {
3901 Some(LatencyConfig {
3902 enabled: true,
3903 fixed_delay_ms: Some(config.latency.base_ms),
3904 random_delay_range_ms: config
3905 .latency
3906 .max_ms
3907 .map(|max| (config.latency.min_ms, max)),
3908 jitter_percent: if config.latency.jitter_ms > 0 {
3909 (config.latency.jitter_ms as f64 / config.latency.base_ms as f64).min(1.0)
3910 } else {
3911 0.0
3912 },
3913 probability: 1.0,
3914 })
3915 } else {
3916 None
3917 };
3918
3919 let fault_injection_config = if config.chaos.enabled {
3920 Some(FaultInjectionConfig {
3921 enabled: true,
3922 http_errors: config.chaos.status_codes.clone(),
3923 http_error_probability: config.chaos.error_rate,
3924 connection_errors: false,
3925 connection_error_probability: 0.0,
3926 timeout_errors: config.chaos.inject_timeouts,
3927 timeout_ms: config.chaos.timeout_ms,
3928 timeout_probability: if config.chaos.inject_timeouts {
3929 config.chaos.error_rate
3930 } else {
3931 0.0
3932 },
3933 partial_responses: false,
3934 partial_response_probability: 0.0,
3935 payload_corruption: false,
3936 payload_corruption_probability: 0.0,
3937 corruption_type: mockforge_chaos::config::CorruptionType::None,
3938 error_pattern: Some(mockforge_chaos::config::ErrorPattern::Random {
3939 probability: config.chaos.error_rate,
3940 }),
3941 mockai_enabled: false,
3942 })
3943 } else {
3944 None
3945 };
3946
3947 chaos_config.enabled = config.chaos.enabled;
3949 chaos_config.latency = latency_config;
3950 chaos_config.fault_injection = fault_injection_config;
3951
3952 drop(chaos_config);
3953 tracing::info!("✅ Updated chaos config for reality level {}", level.value());
3954
3955 }
3960
3961 if let Some(ref latency_injector) = state.latency_injector {
3963 match mockforge_core::latency::LatencyInjector::update_profile_async(
3964 latency_injector,
3965 config.latency.clone(),
3966 )
3967 .await
3968 {
3969 Ok(_) => {
3970 tracing::info!("✅ Updated latency injector for reality level {}", level.value());
3971 }
3972 Err(e) => {
3973 let error_msg = format!("Failed to update latency injector: {}", e);
3974 tracing::warn!("{}", error_msg);
3975 update_errors.push(error_msg);
3976 }
3977 }
3978 }
3979
3980 if let Some(ref mockai) = state.mockai {
3982 match mockforge_core::intelligent_behavior::MockAI::update_config_async(
3983 mockai,
3984 config.mockai.clone(),
3985 )
3986 .await
3987 {
3988 Ok(_) => {
3989 tracing::info!("✅ Updated MockAI config for reality level {}", level.value());
3990 }
3991 Err(e) => {
3992 let error_msg = format!("Failed to update MockAI: {}", e);
3993 tracing::warn!("{}", error_msg);
3994 update_errors.push(error_msg);
3995 }
3996 }
3997 }
3998
3999 let mut response = serde_json::json!({
4001 "level": level.value(),
4002 "level_name": level.name(),
4003 "description": level.description(),
4004 "chaos": {
4005 "enabled": config.chaos.enabled,
4006 "error_rate": config.chaos.error_rate,
4007 "delay_rate": config.chaos.delay_rate,
4008 },
4009 "latency": {
4010 "base_ms": config.latency.base_ms,
4011 "jitter_ms": config.latency.jitter_ms,
4012 },
4013 "mockai": {
4014 "enabled": config.mockai.enabled,
4015 },
4016 });
4017
4018 if !update_errors.is_empty() {
4020 response["warnings"] = serde_json::json!(update_errors);
4021 tracing::warn!(
4022 "Reality level updated to {} but some subsystems failed to update: {:?}",
4023 level.value(),
4024 update_errors
4025 );
4026 } else {
4027 tracing::info!(
4028 "✅ Reality level successfully updated to {} (hot-reload applied)",
4029 level.value()
4030 );
4031 }
4032
4033 Json(ApiResponse::success(response))
4034}
4035
4036pub async fn list_reality_presets(
4038 State(state): State<AdminState>,
4039) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4040 let persistence = &state.workspace_persistence;
4041 match persistence.list_reality_presets().await {
4042 Ok(preset_paths) => {
4043 let presets: Vec<serde_json::Value> = preset_paths
4044 .iter()
4045 .map(|path| {
4046 serde_json::json!({
4047 "id": path.file_name().and_then(|n| n.to_str()).unwrap_or("unknown"),
4048 "path": path.to_string_lossy(),
4049 "name": path.file_stem().and_then(|n| n.to_str()).unwrap_or("unknown"),
4050 })
4051 })
4052 .collect();
4053 Json(ApiResponse::success(presets))
4054 }
4055 Err(e) => Json(ApiResponse::error(format!("Failed to list presets: {}", e))),
4056 }
4057}
4058
4059#[derive(Deserialize)]
4061pub struct ImportPresetRequest {
4062 path: String,
4063}
4064
4065pub async fn import_reality_preset(
4066 State(state): State<AdminState>,
4067 Json(request): Json<ImportPresetRequest>,
4068) -> Json<ApiResponse<serde_json::Value>> {
4069 let persistence = &state.workspace_persistence;
4070 let path = std::path::Path::new(&request.path);
4071
4072 match persistence.import_reality_preset(path).await {
4073 Ok(preset) => {
4074 let mut engine = state.reality_engine.write().await;
4076 engine.apply_preset(preset.clone()).await;
4077
4078 Json(ApiResponse::success(serde_json::json!({
4079 "name": preset.name,
4080 "description": preset.description,
4081 "level": preset.config.level.value(),
4082 "level_name": preset.config.level.name(),
4083 })))
4084 }
4085 Err(e) => Json(ApiResponse::error(format!("Failed to import preset: {}", e))),
4086 }
4087}
4088
4089#[derive(Deserialize)]
4091pub struct ExportPresetRequest {
4092 name: String,
4093 description: Option<String>,
4094}
4095
4096pub async fn export_reality_preset(
4097 State(state): State<AdminState>,
4098 Json(request): Json<ExportPresetRequest>,
4099) -> Json<ApiResponse<serde_json::Value>> {
4100 let engine = state.reality_engine.read().await;
4101 let preset = engine.create_preset(request.name.clone(), request.description.clone()).await;
4102
4103 let persistence = &state.workspace_persistence;
4104 let presets_dir = persistence.presets_dir();
4105 let filename = format!("{}.json", request.name.replace(' ', "_").to_lowercase());
4106 let output_path = presets_dir.join(&filename);
4107
4108 match persistence.export_reality_preset(&preset, &output_path).await {
4109 Ok(_) => Json(ApiResponse::success(serde_json::json!({
4110 "name": preset.name,
4111 "description": preset.description,
4112 "path": output_path.to_string_lossy(),
4113 "level": preset.config.level.value(),
4114 }))),
4115 Err(e) => Json(ApiResponse::error(format!("Failed to export preset: {}", e))),
4116 }
4117}
4118
4119pub async fn get_continuum_ratio(
4123 State(state): State<AdminState>,
4124 Query(params): Query<std::collections::HashMap<String, String>>,
4125) -> Json<ApiResponse<serde_json::Value>> {
4126 let path = params.get("path").cloned().unwrap_or_else(|| "/".to_string());
4127 let engine = state.continuum_engine.read().await;
4128 let ratio = engine.get_blend_ratio(&path).await;
4129 let config = engine.get_config().await;
4130 let enabled = engine.is_enabled().await;
4131
4132 Json(ApiResponse::success(serde_json::json!({
4133 "path": path,
4134 "blend_ratio": ratio,
4135 "enabled": enabled,
4136 "transition_mode": format!("{:?}", config.transition_mode),
4137 "merge_strategy": format!("{:?}", config.merge_strategy),
4138 "default_ratio": config.default_ratio,
4139 })))
4140}
4141
4142#[derive(Deserialize)]
4144pub struct SetContinuumRatioRequest {
4145 path: String,
4146 ratio: f64,
4147}
4148
4149pub async fn set_continuum_ratio(
4150 State(state): State<AdminState>,
4151 Json(request): Json<SetContinuumRatioRequest>,
4152) -> Json<ApiResponse<serde_json::Value>> {
4153 let ratio = request.ratio.clamp(0.0, 1.0);
4154 let engine = state.continuum_engine.read().await;
4155 engine.set_blend_ratio(&request.path, ratio).await;
4156
4157 Json(ApiResponse::success(serde_json::json!({
4158 "path": request.path,
4159 "blend_ratio": ratio,
4160 })))
4161}
4162
4163pub async fn get_continuum_schedule(
4165 State(state): State<AdminState>,
4166) -> Json<ApiResponse<serde_json::Value>> {
4167 let engine = state.continuum_engine.read().await;
4168 let schedule = engine.get_time_schedule().await;
4169
4170 match schedule {
4171 Some(s) => Json(ApiResponse::success(serde_json::json!({
4172 "start_time": s.start_time.to_rfc3339(),
4173 "end_time": s.end_time.to_rfc3339(),
4174 "start_ratio": s.start_ratio,
4175 "end_ratio": s.end_ratio,
4176 "curve": format!("{:?}", s.curve),
4177 "duration_days": s.duration().num_days(),
4178 }))),
4179 None => Json(ApiResponse::success(serde_json::json!(null))),
4180 }
4181}
4182
4183#[derive(Deserialize)]
4185pub struct SetContinuumScheduleRequest {
4186 start_time: String,
4187 end_time: String,
4188 start_ratio: f64,
4189 end_ratio: f64,
4190 curve: Option<String>,
4191}
4192
4193pub async fn set_continuum_schedule(
4194 State(state): State<AdminState>,
4195 Json(request): Json<SetContinuumScheduleRequest>,
4196) -> Json<ApiResponse<serde_json::Value>> {
4197 let start_time = chrono::DateTime::parse_from_rfc3339(&request.start_time)
4198 .map_err(|e| format!("Invalid start_time: {}", e))
4199 .and_then(|dt| Ok(dt.with_timezone(&chrono::Utc)));
4200
4201 let end_time = chrono::DateTime::parse_from_rfc3339(&request.end_time)
4202 .map_err(|e| format!("Invalid end_time: {}", e))
4203 .and_then(|dt| Ok(dt.with_timezone(&chrono::Utc)));
4204
4205 match (start_time, end_time) {
4206 (Ok(start), Ok(end)) => {
4207 let curve = request
4208 .curve
4209 .as_deref()
4210 .map(|c| match c {
4211 "linear" => mockforge_core::TransitionCurve::Linear,
4212 "exponential" => mockforge_core::TransitionCurve::Exponential,
4213 "sigmoid" => mockforge_core::TransitionCurve::Sigmoid,
4214 _ => mockforge_core::TransitionCurve::Linear,
4215 })
4216 .unwrap_or(mockforge_core::TransitionCurve::Linear);
4217
4218 let schedule = mockforge_core::TimeSchedule::with_curve(
4219 start,
4220 end,
4221 request.start_ratio.clamp(0.0, 1.0),
4222 request.end_ratio.clamp(0.0, 1.0),
4223 curve,
4224 );
4225
4226 let engine = state.continuum_engine.read().await;
4227 engine.set_time_schedule(schedule.clone()).await;
4228
4229 Json(ApiResponse::success(serde_json::json!({
4230 "start_time": schedule.start_time.to_rfc3339(),
4231 "end_time": schedule.end_time.to_rfc3339(),
4232 "start_ratio": schedule.start_ratio,
4233 "end_ratio": schedule.end_ratio,
4234 "curve": format!("{:?}", schedule.curve),
4235 })))
4236 }
4237 (Err(e), _) | (_, Err(e)) => Json(ApiResponse::error(e)),
4238 }
4239}
4240
4241#[derive(Deserialize)]
4243pub struct AdvanceContinuumRatioRequest {
4244 increment: Option<f64>,
4245}
4246
4247pub async fn advance_continuum_ratio(
4248 State(state): State<AdminState>,
4249 Json(request): Json<AdvanceContinuumRatioRequest>,
4250) -> Json<ApiResponse<serde_json::Value>> {
4251 let increment = request.increment.unwrap_or(0.1);
4252 let engine = state.continuum_engine.read().await;
4253 engine.advance_ratio(increment).await;
4254 let config = engine.get_config().await;
4255
4256 Json(ApiResponse::success(serde_json::json!({
4257 "default_ratio": config.default_ratio,
4258 "increment": increment,
4259 })))
4260}
4261
4262#[derive(Deserialize)]
4264pub struct SetContinuumEnabledRequest {
4265 enabled: bool,
4266}
4267
4268pub async fn set_continuum_enabled(
4269 State(state): State<AdminState>,
4270 Json(request): Json<SetContinuumEnabledRequest>,
4271) -> Json<ApiResponse<serde_json::Value>> {
4272 let engine = state.continuum_engine.read().await;
4273 engine.set_enabled(request.enabled).await;
4274
4275 Json(ApiResponse::success(serde_json::json!({
4276 "enabled": request.enabled,
4277 })))
4278}
4279
4280pub async fn get_continuum_overrides(
4282 State(state): State<AdminState>,
4283) -> Json<ApiResponse<serde_json::Value>> {
4284 let engine = state.continuum_engine.read().await;
4285 let overrides = engine.get_manual_overrides().await;
4286
4287 Json(ApiResponse::success(serde_json::json!(overrides)))
4288}
4289
4290pub async fn clear_continuum_overrides(
4292 State(state): State<AdminState>,
4293) -> Json<ApiResponse<serde_json::Value>> {
4294 let engine = state.continuum_engine.read().await;
4295 engine.clear_manual_overrides().await;
4296
4297 Json(ApiResponse::success(serde_json::json!({
4298 "message": "All manual overrides cleared",
4299 })))
4300}
4301
4302pub async fn get_workspace(
4303 State(_state): State<AdminState>,
4304 axum::extract::Path(workspace_id): axum::extract::Path<String>,
4305) -> Json<ApiResponse<serde_json::Value>> {
4306 Json(ApiResponse::success(serde_json::json!({
4307 "workspace": {
4308 "summary": {
4309 "id": workspace_id,
4310 "name": "Mock Workspace",
4311 "description": "A mock workspace"
4312 },
4313 "folders": [],
4314 "requests": []
4315 }
4316 })))
4317}
4318
4319pub async fn delete_workspace(
4320 State(_state): State<AdminState>,
4321 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4322) -> Json<ApiResponse<String>> {
4323 Json(ApiResponse::success("Workspace deleted".to_string()))
4324}
4325
4326pub async fn set_active_workspace(
4327 State(_state): State<AdminState>,
4328 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4329) -> Json<ApiResponse<String>> {
4330 Json(ApiResponse::success("Workspace activated".to_string()))
4331}
4332
4333pub async fn create_folder(
4334 State(_state): State<AdminState>,
4335 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4336 Json(_request): Json<serde_json::Value>,
4337) -> Json<ApiResponse<String>> {
4338 Json(ApiResponse::success("Folder created".to_string()))
4339}
4340
4341pub async fn create_request(
4342 State(_state): State<AdminState>,
4343 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4344 Json(_request): Json<serde_json::Value>,
4345) -> Json<ApiResponse<String>> {
4346 Json(ApiResponse::success("Request created".to_string()))
4347}
4348
4349pub async fn execute_workspace_request(
4350 State(_state): State<AdminState>,
4351 axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
4352 Json(_request): Json<serde_json::Value>,
4353) -> Json<ApiResponse<serde_json::Value>> {
4354 Json(ApiResponse::success(serde_json::json!({
4355 "status": "executed",
4356 "response": {}
4357 })))
4358}
4359
4360pub async fn get_request_history(
4361 State(_state): State<AdminState>,
4362 axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
4363) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4364 Json(ApiResponse::success(vec![]))
4365}
4366
4367pub async fn get_folder(
4368 State(_state): State<AdminState>,
4369 axum::extract::Path((_workspace_id, folder_id)): axum::extract::Path<(String, String)>,
4370) -> Json<ApiResponse<serde_json::Value>> {
4371 Json(ApiResponse::success(serde_json::json!({
4372 "folder": {
4373 "summary": {
4374 "id": folder_id,
4375 "name": "Mock Folder",
4376 "description": "A mock folder"
4377 },
4378 "requests": []
4379 }
4380 })))
4381}
4382
4383pub async fn import_to_workspace(
4384 State(_state): State<AdminState>,
4385 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4386 Json(_request): Json<serde_json::Value>,
4387) -> Json<ApiResponse<String>> {
4388 Json(ApiResponse::success("Import to workspace completed".to_string()))
4389}
4390
4391pub async fn export_workspaces(
4392 State(_state): State<AdminState>,
4393 Json(_request): Json<serde_json::Value>,
4394) -> Json<ApiResponse<String>> {
4395 Json(ApiResponse::success("Workspaces exported".to_string()))
4396}
4397
4398pub async fn get_environments(
4400 State(_state): State<AdminState>,
4401 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4402) -> Json<ApiResponse<serde_json::Value>> {
4403 let environments = vec![serde_json::json!({
4405 "id": "global",
4406 "name": "Global",
4407 "description": "Global environment variables",
4408 "variable_count": 0,
4409 "is_global": true,
4410 "active": true,
4411 "order": 0
4412 })];
4413
4414 Json(ApiResponse::success(serde_json::json!({
4415 "environments": environments,
4416 "total": 1
4417 })))
4418}
4419
4420pub async fn create_environment(
4421 State(_state): State<AdminState>,
4422 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4423 Json(_request): Json<serde_json::Value>,
4424) -> Json<ApiResponse<String>> {
4425 Json(ApiResponse::success("Environment created".to_string()))
4426}
4427
4428pub async fn update_environment(
4429 State(_state): State<AdminState>,
4430 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4431 Json(_request): Json<serde_json::Value>,
4432) -> Json<ApiResponse<String>> {
4433 Json(ApiResponse::success("Environment updated".to_string()))
4434}
4435
4436pub async fn delete_environment(
4437 State(_state): State<AdminState>,
4438 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4439) -> Json<ApiResponse<String>> {
4440 Json(ApiResponse::success("Environment deleted".to_string()))
4441}
4442
4443pub async fn set_active_environment(
4444 State(_state): State<AdminState>,
4445 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4446) -> Json<ApiResponse<String>> {
4447 Json(ApiResponse::success("Environment activated".to_string()))
4448}
4449
4450pub async fn update_environments_order(
4451 State(_state): State<AdminState>,
4452 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4453 Json(_request): Json<serde_json::Value>,
4454) -> Json<ApiResponse<String>> {
4455 Json(ApiResponse::success("Environment order updated".to_string()))
4456}
4457
4458pub async fn get_environment_variables(
4459 State(_state): State<AdminState>,
4460 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4461) -> Json<ApiResponse<serde_json::Value>> {
4462 Json(ApiResponse::success(serde_json::json!({
4463 "variables": []
4464 })))
4465}
4466
4467pub async fn set_environment_variable(
4468 State(_state): State<AdminState>,
4469 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4470 Json(_request): Json<serde_json::Value>,
4471) -> Json<ApiResponse<String>> {
4472 Json(ApiResponse::success("Environment variable set".to_string()))
4473}
4474
4475pub async fn remove_environment_variable(
4476 State(_state): State<AdminState>,
4477 axum::extract::Path((_workspace_id, _environment_id, _variable_name)): axum::extract::Path<(
4478 String,
4479 String,
4480 String,
4481 )>,
4482) -> Json<ApiResponse<String>> {
4483 Json(ApiResponse::success("Environment variable removed".to_string()))
4484}
4485
4486pub async fn get_autocomplete_suggestions(
4488 State(_state): State<AdminState>,
4489 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4490 Json(_request): Json<serde_json::Value>,
4491) -> Json<ApiResponse<serde_json::Value>> {
4492 Json(ApiResponse::success(serde_json::json!({
4493 "suggestions": [],
4494 "start_position": 0,
4495 "end_position": 0
4496 })))
4497}
4498
4499pub async fn get_sync_status(
4501 State(_state): State<AdminState>,
4502 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4503) -> Json<ApiResponse<serde_json::Value>> {
4504 Json(ApiResponse::success(serde_json::json!({
4505 "status": "disabled"
4506 })))
4507}
4508
4509pub async fn configure_sync(
4510 State(_state): State<AdminState>,
4511 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4512 Json(_request): Json<serde_json::Value>,
4513) -> Json<ApiResponse<String>> {
4514 Json(ApiResponse::success("Sync configured".to_string()))
4515}
4516
4517pub async fn disable_sync(
4518 State(_state): State<AdminState>,
4519 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4520) -> Json<ApiResponse<String>> {
4521 Json(ApiResponse::success("Sync disabled".to_string()))
4522}
4523
4524pub async fn trigger_sync(
4525 State(_state): State<AdminState>,
4526 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4527) -> Json<ApiResponse<String>> {
4528 Json(ApiResponse::success("Sync triggered".to_string()))
4529}
4530
4531pub async fn get_sync_changes(
4532 State(_state): State<AdminState>,
4533 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4534) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4535 Json(ApiResponse::success(vec![]))
4536}
4537
4538pub async fn confirm_sync_changes(
4539 State(_state): State<AdminState>,
4540 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4541 Json(_request): Json<serde_json::Value>,
4542) -> Json<ApiResponse<String>> {
4543 Json(ApiResponse::success("Sync changes confirmed".to_string()))
4544}
4545
4546pub async fn validate_plugin(
4548 State(_state): State<AdminState>,
4549 Json(_request): Json<serde_json::Value>,
4550) -> Json<ApiResponse<String>> {
4551 Json(ApiResponse::success("Plugin validated".to_string()))
4552}
4553
4554pub async fn clear_import_history(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
4556 let mut history = state.import_history.write().await;
4557 history.clear();
4558 Json(ApiResponse::success("Import history cleared".to_string()))
4559}
4560
4561#[cfg(test)]
4562mod tests {
4563 use super::*;
4564
4565 #[test]
4566 fn test_request_metrics_creation() {
4567 use std::collections::HashMap;
4568
4569 let metrics = RequestMetrics {
4570 total_requests: 100,
4571 active_connections: 5,
4572 requests_by_endpoint: HashMap::new(),
4573 response_times: vec![10, 20, 30],
4574 response_times_by_endpoint: HashMap::new(),
4575 errors_by_endpoint: HashMap::new(),
4576 last_request_by_endpoint: HashMap::new(),
4577 };
4578
4579 assert_eq!(metrics.total_requests, 100);
4580 assert_eq!(metrics.active_connections, 5);
4581 assert_eq!(metrics.response_times.len(), 3);
4582 }
4583
4584 #[test]
4585 fn test_system_metrics_creation() {
4586 let metrics = SystemMetrics {
4587 cpu_usage_percent: 45.5,
4588 memory_usage_mb: 100,
4589 active_threads: 10,
4590 };
4591
4592 assert_eq!(metrics.active_threads, 10);
4593 assert!(metrics.cpu_usage_percent > 0.0);
4594 assert_eq!(metrics.memory_usage_mb, 100);
4595 }
4596
4597 #[test]
4598 fn test_time_series_point() {
4599 let point = TimeSeriesPoint {
4600 timestamp: chrono::Utc::now(),
4601 value: 42.5,
4602 };
4603
4604 assert_eq!(point.value, 42.5);
4605 }
4606
4607 #[test]
4608 fn test_restart_status() {
4609 let status = RestartStatus {
4610 in_progress: true,
4611 initiated_at: Some(chrono::Utc::now()),
4612 reason: Some("Manual restart".to_string()),
4613 success: None,
4614 };
4615
4616 assert!(status.in_progress);
4617 assert!(status.reason.is_some());
4618 }
4619
4620 #[test]
4621 fn test_configuration_state() {
4622 use std::collections::HashMap;
4623
4624 let state = ConfigurationState {
4625 latency_profile: crate::models::LatencyProfile {
4626 name: "default".to_string(),
4627 base_ms: 100,
4628 jitter_ms: 10,
4629 tag_overrides: HashMap::new(),
4630 },
4631 fault_config: crate::models::FaultConfig {
4632 enabled: false,
4633 failure_rate: 0.0,
4634 status_codes: vec![],
4635 active_failures: 0,
4636 },
4637 proxy_config: crate::models::ProxyConfig {
4638 enabled: false,
4639 upstream_url: None,
4640 timeout_seconds: 30,
4641 requests_proxied: 0,
4642 },
4643 validation_settings: crate::models::ValidationSettings {
4644 mode: "off".to_string(),
4645 aggregate_errors: false,
4646 validate_responses: false,
4647 overrides: HashMap::new(),
4648 },
4649 };
4650
4651 assert_eq!(state.latency_profile.name, "default");
4652 assert!(!state.fault_config.enabled);
4653 assert!(!state.proxy_config.enabled);
4654 }
4655
4656 #[test]
4657 fn test_admin_state_new() {
4658 let http_addr: std::net::SocketAddr = "127.0.0.1:3000".parse().unwrap();
4659 let state = AdminState::new(
4660 Some(http_addr),
4661 None,
4662 None,
4663 None,
4664 true,
4665 8080,
4666 None,
4667 None,
4668 None,
4669 None,
4670 None,
4671 );
4672
4673 assert_eq!(state.http_server_addr, Some(http_addr));
4674 assert!(state.api_enabled);
4675 assert_eq!(state.admin_port, 8080);
4676 }
4677}