1use axum::{
13 extract::{Path, Query, State},
14 http::{self, StatusCode},
15 response::{
16 sse::{Event, Sse},
17 Html, IntoResponse, Json,
18 },
19};
20use chrono::Utc;
21use futures_util::stream::{self, Stream};
22use mockforge_core::{Error, Result};
23use mockforge_plugin_loader::PluginRegistry;
24use serde::{Deserialize, Serialize};
25use serde_json::json;
26use std::collections::HashMap;
27use std::convert::Infallible;
28use std::process::Command;
29use std::process::Stdio;
30use std::sync::Arc;
31use std::time::Duration;
32use sysinfo::System;
33use tokio::sync::RwLock;
34
35use crate::models::{
37 ApiResponse, ConfigUpdate, DashboardData, DashboardSystemInfo, FaultConfig, HealthCheck,
38 LatencyProfile, LogFilter, MetricsData, ProxyConfig, RequestLog, RouteInfo, ServerInfo,
39 ServerStatus, SimpleMetricsData, SystemInfo, ValidationSettings, ValidationUpdate,
40};
41
42use mockforge_core::workspace_import::{ImportResponse, ImportRoute};
44
45pub mod admin;
47pub mod ai_studio;
48pub mod analytics;
49pub mod analytics_stream;
50pub mod analytics_v2;
51pub mod assets;
52pub mod behavioral_cloning;
53pub mod chains;
54pub mod community;
55pub mod contract_diff;
56pub mod coverage_metrics;
57pub mod failure_analysis;
58pub mod graph;
59pub mod health;
60pub mod migration;
61pub mod pillar_analytics;
62pub mod playground;
63pub mod plugin;
64pub mod promotions;
65pub mod protocol_contracts;
66pub mod verification;
67pub mod voice;
68pub mod workspaces;
69
70pub use assets::*;
72pub use chains::*;
73pub use graph::*;
74pub use migration::*;
75pub use plugin::*;
76
77use mockforge_core::workspace_import::WorkspaceImportConfig;
79use mockforge_core::workspace_persistence::WorkspacePersistence;
80
81#[derive(Debug, Clone, Default)]
83pub struct RequestMetrics {
84 pub total_requests: u64,
86 pub active_connections: u64,
88 pub requests_by_endpoint: HashMap<String, u64>,
90 pub response_times: Vec<u64>,
92 pub response_times_by_endpoint: HashMap<String, Vec<u64>>,
94 pub errors_by_endpoint: HashMap<String, u64>,
96 pub last_request_by_endpoint: HashMap<String, chrono::DateTime<chrono::Utc>>,
98}
99
100#[derive(Debug, Clone)]
102pub struct SystemMetrics {
103 pub memory_usage_mb: u64,
105 pub cpu_usage_percent: f64,
107 pub active_threads: u32,
109}
110
111#[derive(Debug, Clone)]
113pub struct TimeSeriesPoint {
114 pub timestamp: chrono::DateTime<chrono::Utc>,
116 pub value: f64,
118}
119
120#[derive(Debug, Clone, Default)]
122pub struct TimeSeriesData {
123 pub memory_usage: Vec<TimeSeriesPoint>,
125 pub cpu_usage: Vec<TimeSeriesPoint>,
127 pub request_count: Vec<TimeSeriesPoint>,
129 pub response_time: Vec<TimeSeriesPoint>,
131}
132
133#[derive(Debug, Clone, Serialize, Deserialize)]
135pub struct RestartStatus {
136 pub in_progress: bool,
138 pub initiated_at: Option<chrono::DateTime<chrono::Utc>>,
140 pub reason: Option<String>,
142 pub success: Option<bool>,
144}
145
146#[derive(Debug, Clone, Serialize, Deserialize)]
148pub struct FixtureInfo {
149 pub id: String,
151 pub protocol: String,
153 pub method: String,
155 pub path: String,
157 pub saved_at: chrono::DateTime<chrono::Utc>,
159 pub file_size: u64,
161 pub file_path: String,
163 pub fingerprint: String,
165 pub metadata: serde_json::Value,
167}
168
169#[derive(Debug, Clone, Serialize, Deserialize)]
171pub struct SmokeTestResult {
172 pub id: String,
174 pub name: String,
176 pub method: String,
178 pub path: String,
180 pub description: String,
182 pub last_run: Option<chrono::DateTime<chrono::Utc>>,
184 pub status: String,
186 pub response_time_ms: Option<u64>,
188 pub error_message: Option<String>,
190 pub status_code: Option<u16>,
192 pub duration_seconds: Option<f64>,
194}
195
196#[derive(Debug, Clone)]
198pub struct SmokeTestContext {
199 pub base_url: String,
201 pub timeout_seconds: u64,
203 pub parallel: bool,
205}
206
207#[derive(Debug, Clone, Serialize)]
209pub struct ConfigurationState {
210 pub latency_profile: LatencyProfile,
212 pub fault_config: FaultConfig,
214 pub proxy_config: ProxyConfig,
216 pub validation_settings: ValidationSettings,
218}
219
220#[derive(Debug, Clone, Serialize, Deserialize)]
222pub struct ImportHistoryEntry {
223 pub id: String,
225 pub format: String,
227 pub timestamp: chrono::DateTime<chrono::Utc>,
229 pub routes_count: usize,
231 pub variables_count: usize,
233 pub warnings_count: usize,
235 pub success: bool,
237 pub filename: Option<String>,
239 pub environment: Option<String>,
241 pub base_url: Option<String>,
243 pub error_message: Option<String>,
245}
246
247#[derive(Clone)]
249pub struct AdminState {
250 pub http_server_addr: Option<std::net::SocketAddr>,
252 pub ws_server_addr: Option<std::net::SocketAddr>,
254 pub grpc_server_addr: Option<std::net::SocketAddr>,
256 pub graphql_server_addr: Option<std::net::SocketAddr>,
258 pub api_enabled: bool,
260 pub admin_port: u16,
262 pub start_time: chrono::DateTime<chrono::Utc>,
264 pub metrics: Arc<RwLock<RequestMetrics>>,
266 pub system_metrics: Arc<RwLock<SystemMetrics>>,
268 pub config: Arc<RwLock<ConfigurationState>>,
270 pub logs: Arc<RwLock<Vec<RequestLog>>>,
272 pub time_series: Arc<RwLock<TimeSeriesData>>,
274 pub restart_status: Arc<RwLock<RestartStatus>>,
276 pub smoke_test_results: Arc<RwLock<Vec<SmokeTestResult>>>,
278 pub import_history: Arc<RwLock<Vec<ImportHistoryEntry>>>,
280 pub workspace_persistence: Arc<WorkspacePersistence>,
282 pub plugin_registry: Arc<RwLock<PluginRegistry>>,
284 pub reality_engine: Arc<RwLock<mockforge_core::RealityEngine>>,
286 pub continuum_engine: Arc<RwLock<mockforge_core::RealityContinuumEngine>>,
288 pub chaos_api_state: Option<std::sync::Arc<mockforge_chaos::api::ChaosApiState>>,
291 pub latency_injector:
294 Option<std::sync::Arc<tokio::sync::RwLock<mockforge_core::latency::LatencyInjector>>>,
295 pub mockai:
298 Option<std::sync::Arc<tokio::sync::RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
299}
300
301impl AdminState {
302 pub async fn start_system_monitoring(&self) {
304 let state_clone = self.clone();
305 tokio::spawn(async move {
306 let mut sys = System::new_all();
307 let mut refresh_count = 0u64;
308
309 tracing::info!("Starting system monitoring background task");
310
311 loop {
312 sys.refresh_all();
314
315 let cpu_usage = sys.global_cpu_usage();
317
318 let _total_memory = sys.total_memory() as f64;
320 let used_memory = sys.used_memory() as f64;
321 let memory_usage_mb = used_memory / 1024.0 / 1024.0;
322
323 let active_threads = sys.cpus().len() as u32;
325
326 let memory_mb_u64 = memory_usage_mb as u64;
328
329 if refresh_count.is_multiple_of(10) {
331 tracing::debug!(
332 "System metrics updated: CPU={:.1}%, Mem={}MB, Threads={}",
333 cpu_usage,
334 memory_mb_u64,
335 active_threads
336 );
337 }
338
339 state_clone
340 .update_system_metrics(memory_mb_u64, cpu_usage as f64, active_threads)
341 .await;
342
343 refresh_count += 1;
344
345 tokio::time::sleep(Duration::from_secs(10)).await;
347 }
348 });
349 }
350
351 pub fn new(
366 http_server_addr: Option<std::net::SocketAddr>,
367 ws_server_addr: Option<std::net::SocketAddr>,
368 grpc_server_addr: Option<std::net::SocketAddr>,
369 graphql_server_addr: Option<std::net::SocketAddr>,
370 api_enabled: bool,
371 admin_port: u16,
372 chaos_api_state: Option<std::sync::Arc<mockforge_chaos::api::ChaosApiState>>,
373 latency_injector: Option<
374 std::sync::Arc<tokio::sync::RwLock<mockforge_core::latency::LatencyInjector>>,
375 >,
376 mockai: Option<
377 std::sync::Arc<tokio::sync::RwLock<mockforge_core::intelligent_behavior::MockAI>>,
378 >,
379 continuum_config: Option<mockforge_core::ContinuumConfig>,
380 virtual_clock: Option<std::sync::Arc<mockforge_core::VirtualClock>>,
381 ) -> Self {
382 let start_time = chrono::Utc::now();
383
384 Self {
385 http_server_addr,
386 ws_server_addr,
387 grpc_server_addr,
388 graphql_server_addr,
389 api_enabled,
390 admin_port,
391 start_time,
392 metrics: Arc::new(RwLock::new(RequestMetrics::default())),
393 system_metrics: Arc::new(RwLock::new(SystemMetrics {
394 memory_usage_mb: 0,
395 cpu_usage_percent: 0.0,
396 active_threads: 0,
397 })),
398 config: Arc::new(RwLock::new(ConfigurationState {
399 latency_profile: LatencyProfile {
400 name: "default".to_string(),
401 base_ms: 50,
402 jitter_ms: 20,
403 tag_overrides: HashMap::new(),
404 },
405 fault_config: FaultConfig {
406 enabled: false,
407 failure_rate: 0.0,
408 status_codes: vec![500, 502, 503],
409 active_failures: 0,
410 },
411 proxy_config: ProxyConfig {
412 enabled: false,
413 upstream_url: None,
414 timeout_seconds: 30,
415 requests_proxied: 0,
416 },
417 validation_settings: ValidationSettings {
418 mode: "enforce".to_string(),
419 aggregate_errors: true,
420 validate_responses: false,
421 overrides: HashMap::new(),
422 },
423 })),
424 logs: Arc::new(RwLock::new(Vec::new())),
425 time_series: Arc::new(RwLock::new(TimeSeriesData::default())),
426 restart_status: Arc::new(RwLock::new(RestartStatus {
427 in_progress: false,
428 initiated_at: None,
429 reason: None,
430 success: None,
431 })),
432 smoke_test_results: Arc::new(RwLock::new(Vec::new())),
433 import_history: Arc::new(RwLock::new(Vec::new())),
434 workspace_persistence: Arc::new(WorkspacePersistence::new("./workspaces")),
435 plugin_registry: Arc::new(RwLock::new(PluginRegistry::new())),
436 reality_engine: Arc::new(RwLock::new(mockforge_core::RealityEngine::new())),
437 continuum_engine: Arc::new(RwLock::new({
438 let config = continuum_config.unwrap_or_default();
439 if let Some(clock) = virtual_clock {
440 mockforge_core::RealityContinuumEngine::with_virtual_clock(config, clock)
441 } else {
442 mockforge_core::RealityContinuumEngine::new(config)
443 }
444 })),
445 chaos_api_state,
446 latency_injector,
447 mockai,
448 }
449 }
450
451 pub async fn record_request(
453 &self,
454 method: &str,
455 path: &str,
456 status_code: u16,
457 response_time_ms: u64,
458 error: Option<String>,
459 ) {
460 let mut metrics = self.metrics.write().await;
461
462 metrics.total_requests += 1;
463 let endpoint = format!("{} {}", method, path);
464 *metrics.requests_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
465
466 if status_code >= 400 {
467 *metrics.errors_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
468 }
469
470 metrics.response_times.push(response_time_ms);
472 if metrics.response_times.len() > 100 {
473 metrics.response_times.remove(0);
474 }
475
476 let endpoint_times = metrics
478 .response_times_by_endpoint
479 .entry(endpoint.clone())
480 .or_insert_with(Vec::new);
481 endpoint_times.push(response_time_ms);
482 if endpoint_times.len() > 50 {
483 endpoint_times.remove(0);
484 }
485
486 metrics.last_request_by_endpoint.insert(endpoint, chrono::Utc::now());
488
489 let total_requests = metrics.total_requests;
491
492 drop(metrics);
494
495 self.update_time_series_on_request(response_time_ms, total_requests).await;
497
498 let mut logs = self.logs.write().await;
500 let log_entry = RequestLog {
501 id: format!("req_{}", total_requests),
502 timestamp: Utc::now(),
503 method: method.to_string(),
504 path: path.to_string(),
505 status_code,
506 response_time_ms,
507 client_ip: None,
508 user_agent: None,
509 headers: HashMap::new(),
510 response_size_bytes: 0,
511 error_message: error,
512 };
513
514 logs.push(log_entry);
515
516 if logs.len() > 1000 {
518 logs.remove(0);
519 }
520 }
521
522 pub async fn get_metrics(&self) -> RequestMetrics {
524 self.metrics.read().await.clone()
525 }
526
527 pub async fn update_system_metrics(&self, memory_mb: u64, cpu_percent: f64, threads: u32) {
529 let mut system_metrics = self.system_metrics.write().await;
530 system_metrics.memory_usage_mb = memory_mb;
531 system_metrics.cpu_usage_percent = cpu_percent;
532 system_metrics.active_threads = threads;
533
534 self.update_time_series_data(memory_mb as f64, cpu_percent).await;
536 }
537
538 async fn update_time_series_data(&self, memory_mb: f64, cpu_percent: f64) {
540 let now = chrono::Utc::now();
541 let mut time_series = self.time_series.write().await;
542
543 time_series.memory_usage.push(TimeSeriesPoint {
545 timestamp: now,
546 value: memory_mb,
547 });
548
549 time_series.cpu_usage.push(TimeSeriesPoint {
551 timestamp: now,
552 value: cpu_percent,
553 });
554
555 let metrics = self.metrics.read().await;
557 time_series.request_count.push(TimeSeriesPoint {
558 timestamp: now,
559 value: metrics.total_requests as f64,
560 });
561
562 let avg_response_time = if !metrics.response_times.is_empty() {
564 metrics.response_times.iter().sum::<u64>() as f64 / metrics.response_times.len() as f64
565 } else {
566 0.0
567 };
568 time_series.response_time.push(TimeSeriesPoint {
569 timestamp: now,
570 value: avg_response_time,
571 });
572
573 const MAX_POINTS: usize = 100;
575 if time_series.memory_usage.len() > MAX_POINTS {
576 time_series.memory_usage.remove(0);
577 }
578 if time_series.cpu_usage.len() > MAX_POINTS {
579 time_series.cpu_usage.remove(0);
580 }
581 if time_series.request_count.len() > MAX_POINTS {
582 time_series.request_count.remove(0);
583 }
584 if time_series.response_time.len() > MAX_POINTS {
585 time_series.response_time.remove(0);
586 }
587 }
588
589 pub async fn get_system_metrics(&self) -> SystemMetrics {
591 self.system_metrics.read().await.clone()
592 }
593
594 pub async fn get_time_series_data(&self) -> TimeSeriesData {
596 self.time_series.read().await.clone()
597 }
598
599 pub async fn get_restart_status(&self) -> RestartStatus {
601 self.restart_status.read().await.clone()
602 }
603
604 pub async fn initiate_restart(&self, reason: String) -> Result<()> {
606 let mut status = self.restart_status.write().await;
607
608 if status.in_progress {
609 return Err(Error::generic("Restart already in progress".to_string()));
610 }
611
612 status.in_progress = true;
613 status.initiated_at = Some(chrono::Utc::now());
614 status.reason = Some(reason);
615 status.success = None;
616
617 Ok(())
618 }
619
620 pub async fn complete_restart(&self, success: bool) {
622 let mut status = self.restart_status.write().await;
623 status.in_progress = false;
624 status.success = Some(success);
625 }
626
627 pub async fn get_smoke_test_results(&self) -> Vec<SmokeTestResult> {
629 self.smoke_test_results.read().await.clone()
630 }
631
632 pub async fn update_smoke_test_result(&self, result: SmokeTestResult) {
634 let mut results = self.smoke_test_results.write().await;
635
636 if let Some(existing) = results.iter_mut().find(|r| r.id == result.id) {
638 *existing = result;
639 } else {
640 results.push(result);
641 }
642
643 if results.len() > 100 {
645 results.remove(0);
646 }
647 }
648
649 pub async fn clear_smoke_test_results(&self) {
651 let mut results = self.smoke_test_results.write().await;
652 results.clear();
653 }
654
655 async fn update_time_series_on_request(&self, response_time_ms: u64, total_requests: u64) {
657 let now = chrono::Utc::now();
658 let mut time_series = self.time_series.write().await;
659
660 time_series.request_count.push(TimeSeriesPoint {
662 timestamp: now,
663 value: total_requests as f64,
664 });
665
666 time_series.response_time.push(TimeSeriesPoint {
668 timestamp: now,
669 value: response_time_ms as f64,
670 });
671
672 const MAX_POINTS: usize = 100;
674 if time_series.request_count.len() > MAX_POINTS {
675 time_series.request_count.remove(0);
676 }
677 if time_series.response_time.len() > MAX_POINTS {
678 time_series.response_time.remove(0);
679 }
680 }
681
682 pub async fn get_config(&self) -> ConfigurationState {
684 self.config.read().await.clone()
685 }
686
687 pub async fn update_latency_config(
689 &self,
690 base_ms: u64,
691 jitter_ms: u64,
692 tag_overrides: HashMap<String, u64>,
693 ) {
694 let mut config = self.config.write().await;
695 config.latency_profile.base_ms = base_ms;
696 config.latency_profile.jitter_ms = jitter_ms;
697 config.latency_profile.tag_overrides = tag_overrides;
698 }
699
700 pub async fn update_fault_config(
702 &self,
703 enabled: bool,
704 failure_rate: f64,
705 status_codes: Vec<u16>,
706 ) {
707 let mut config = self.config.write().await;
708 config.fault_config.enabled = enabled;
709 config.fault_config.failure_rate = failure_rate;
710 config.fault_config.status_codes = status_codes;
711 }
712
713 pub async fn update_proxy_config(
715 &self,
716 enabled: bool,
717 upstream_url: Option<String>,
718 timeout_seconds: u64,
719 ) {
720 let mut config = self.config.write().await;
721 config.proxy_config.enabled = enabled;
722 config.proxy_config.upstream_url = upstream_url;
723 config.proxy_config.timeout_seconds = timeout_seconds;
724 }
725
726 pub async fn update_validation_config(
728 &self,
729 mode: String,
730 aggregate_errors: bool,
731 validate_responses: bool,
732 overrides: HashMap<String, String>,
733 ) {
734 let mut config = self.config.write().await;
735 config.validation_settings.mode = mode;
736 config.validation_settings.aggregate_errors = aggregate_errors;
737 config.validation_settings.validate_responses = validate_responses;
738 config.validation_settings.overrides = overrides;
739 }
740
741 pub async fn get_logs_filtered(&self, filter: &LogFilter) -> Vec<RequestLog> {
743 let logs = self.logs.read().await;
744
745 logs.iter()
746 .rev() .filter(|log| {
748 if let Some(ref method) = filter.method {
749 if log.method != *method {
750 return false;
751 }
752 }
753 if let Some(ref path_pattern) = filter.path_pattern {
754 if !log.path.contains(path_pattern) {
755 return false;
756 }
757 }
758 if let Some(status) = filter.status_code {
759 if log.status_code != status {
760 return false;
761 }
762 }
763 true
764 })
765 .take(filter.limit.unwrap_or(100))
766 .cloned()
767 .collect()
768 }
769
770 pub async fn clear_logs(&self) {
772 let mut logs = self.logs.write().await;
773 logs.clear();
774 }
775}
776
777pub async fn serve_admin_html() -> Html<&'static str> {
779 Html(crate::get_admin_html())
780}
781
782pub async fn serve_admin_css() -> ([(http::HeaderName, &'static str); 1], &'static str) {
784 ([(http::header::CONTENT_TYPE, "text/css")], crate::get_admin_css())
785}
786
787pub async fn serve_admin_js() -> ([(http::HeaderName, &'static str); 1], &'static str) {
789 ([(http::header::CONTENT_TYPE, "application/javascript")], crate::get_admin_js())
790}
791
792pub async fn get_dashboard(State(state): State<AdminState>) -> Json<ApiResponse<DashboardData>> {
794 let uptime = Utc::now().signed_duration_since(state.start_time).num_seconds() as u64;
795
796 let system_metrics = state.get_system_metrics().await;
798 let _config = state.get_config().await;
799
800 let (recent_logs, calculated_metrics): (Vec<RequestLog>, RequestMetrics) =
802 if let Some(global_logger) = mockforge_core::get_global_logger() {
803 let all_logs = global_logger.get_recent_logs(None).await;
805 let recent_logs_subset = global_logger.get_recent_logs(Some(20)).await;
806
807 let total_requests = all_logs.len() as u64;
809 let mut requests_by_endpoint = HashMap::new();
810 let mut errors_by_endpoint = HashMap::new();
811 let mut response_times = Vec::new();
812 let mut last_request_by_endpoint = HashMap::new();
813
814 for log in &all_logs {
815 let endpoint_key = format!("{} {}", log.method, log.path);
816 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
817
818 if log.status_code >= 400 {
819 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
820 }
821
822 response_times.push(log.response_time_ms);
823 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
824 }
825
826 let calculated_metrics = RequestMetrics {
827 total_requests,
828 active_connections: 0, requests_by_endpoint,
830 response_times,
831 response_times_by_endpoint: HashMap::new(), errors_by_endpoint,
833 last_request_by_endpoint,
834 };
835
836 let recent_logs = recent_logs_subset
838 .into_iter()
839 .map(|log| RequestLog {
840 id: log.id,
841 timestamp: log.timestamp,
842 method: log.method,
843 path: log.path,
844 status_code: log.status_code,
845 response_time_ms: log.response_time_ms,
846 client_ip: log.client_ip,
847 user_agent: log.user_agent,
848 headers: log.headers,
849 response_size_bytes: log.response_size_bytes,
850 error_message: log.error_message,
851 })
852 .collect();
853
854 (recent_logs, calculated_metrics)
855 } else {
856 let logs = state.logs.read().await;
858 let recent_logs = logs.iter().rev().take(10).cloned().collect();
859 let metrics = state.get_metrics().await;
860 (recent_logs, metrics)
861 };
862
863 let metrics = calculated_metrics;
864
865 let system_info = SystemInfo {
866 version: env!("CARGO_PKG_VERSION").to_string(),
867 uptime_seconds: uptime,
868 memory_usage_mb: system_metrics.memory_usage_mb,
869 cpu_usage_percent: system_metrics.cpu_usage_percent,
870 active_threads: system_metrics.active_threads as usize,
871 total_routes: metrics.requests_by_endpoint.len(),
872 total_fixtures: count_fixtures().unwrap_or(0),
873 };
874
875 let servers = vec![
876 ServerStatus {
877 server_type: "HTTP".to_string(),
878 address: state.http_server_addr.map(|addr| addr.to_string()),
879 running: state.http_server_addr.is_some(),
880 start_time: Some(state.start_time),
881 uptime_seconds: Some(uptime),
882 active_connections: metrics.active_connections,
883 total_requests: count_requests_by_server_type(&metrics, "HTTP"),
884 },
885 ServerStatus {
886 server_type: "WebSocket".to_string(),
887 address: state.ws_server_addr.map(|addr| addr.to_string()),
888 running: state.ws_server_addr.is_some(),
889 start_time: Some(state.start_time),
890 uptime_seconds: Some(uptime),
891 active_connections: metrics.active_connections / 2, total_requests: count_requests_by_server_type(&metrics, "WebSocket"),
893 },
894 ServerStatus {
895 server_type: "gRPC".to_string(),
896 address: state.grpc_server_addr.map(|addr| addr.to_string()),
897 running: state.grpc_server_addr.is_some(),
898 start_time: Some(state.start_time),
899 uptime_seconds: Some(uptime),
900 active_connections: metrics.active_connections / 3, total_requests: count_requests_by_server_type(&metrics, "gRPC"),
902 },
903 ];
904
905 let mut routes = Vec::new();
907 for (endpoint, count) in &metrics.requests_by_endpoint {
908 let parts: Vec<&str> = endpoint.splitn(2, ' ').collect();
909 if parts.len() == 2 {
910 let method = parts[0].to_string();
911 let path = parts[1].to_string();
912 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
913
914 routes.push(RouteInfo {
915 method: Some(method.clone()),
916 path: path.clone(),
917 priority: 0,
918 has_fixtures: route_has_fixtures(&method, &path),
919 latency_ms: calculate_endpoint_latency(&metrics, endpoint),
920 request_count: *count,
921 last_request: get_endpoint_last_request(&metrics, endpoint),
922 error_count,
923 });
924 }
925 }
926
927 let dashboard = DashboardData {
928 server_info: ServerInfo {
929 version: env!("CARGO_PKG_VERSION").to_string(),
930 build_time: option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown").to_string(),
931 git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("unknown").to_string(),
932 http_server: state.http_server_addr.map(|addr| addr.to_string()),
933 ws_server: state.ws_server_addr.map(|addr| addr.to_string()),
934 grpc_server: state.grpc_server_addr.map(|addr| addr.to_string()),
935 graphql_server: state.graphql_server_addr.map(|addr| addr.to_string()),
936 api_enabled: state.api_enabled,
937 admin_port: state.admin_port,
938 },
939 system_info: DashboardSystemInfo {
940 os: std::env::consts::OS.to_string(),
941 arch: std::env::consts::ARCH.to_string(),
942 uptime,
943 memory_usage: system_metrics.memory_usage_mb * 1024 * 1024, },
945 metrics: SimpleMetricsData {
946 total_requests: metrics.requests_by_endpoint.values().sum(),
947 active_requests: metrics.active_connections,
948 average_response_time: if metrics.response_times.is_empty() {
949 0.0
950 } else {
951 metrics.response_times.iter().sum::<u64>() as f64
952 / metrics.response_times.len() as f64
953 },
954 error_rate: {
955 let total_requests = metrics.requests_by_endpoint.values().sum::<u64>();
956 let total_errors = metrics.errors_by_endpoint.values().sum::<u64>();
957 if total_requests == 0 {
958 0.0
959 } else {
960 total_errors as f64 / total_requests as f64
961 }
962 },
963 },
964 servers,
965 recent_logs,
966 system: system_info,
967 };
968
969 Json(ApiResponse::success(dashboard))
970}
971
972pub async fn get_routes(State(state): State<AdminState>) -> impl IntoResponse {
974 if let Some(http_addr) = state.http_server_addr {
975 let url = format!("http://{}/__mockforge/routes", http_addr);
977 if let Ok(response) = reqwest::get(&url).await {
978 if response.status().is_success() {
979 if let Ok(body) = response.text().await {
980 return (StatusCode::OK, [("content-type", "application/json")], body);
981 }
982 }
983 }
984 }
985
986 (
988 StatusCode::OK,
989 [("content-type", "application/json")],
990 r#"{"routes":[]}"#.to_string(),
991 )
992}
993
994pub async fn get_server_info(State(state): State<AdminState>) -> Json<serde_json::Value> {
996 Json(json!({
997 "http_server": state.http_server_addr.map(|addr| addr.to_string()),
998 "ws_server": state.ws_server_addr.map(|addr| addr.to_string()),
999 "grpc_server": state.grpc_server_addr.map(|addr| addr.to_string()),
1000 "admin_port": state.admin_port
1001 }))
1002}
1003
1004pub async fn get_health() -> Json<HealthCheck> {
1006 Json(
1007 HealthCheck::healthy()
1008 .with_service("http".to_string(), "healthy".to_string())
1009 .with_service("websocket".to_string(), "healthy".to_string())
1010 .with_service("grpc".to_string(), "healthy".to_string()),
1011 )
1012}
1013
1014pub async fn get_logs(
1016 State(state): State<AdminState>,
1017 Query(params): Query<HashMap<String, String>>,
1018) -> Json<ApiResponse<Vec<RequestLog>>> {
1019 let mut filter = LogFilter::default();
1020
1021 if let Some(method) = params.get("method") {
1022 filter.method = Some(method.clone());
1023 }
1024 if let Some(path) = params.get("path") {
1025 filter.path_pattern = Some(path.clone());
1026 }
1027 if let Some(status) = params.get("status").and_then(|s| s.parse().ok()) {
1028 filter.status_code = Some(status);
1029 }
1030 if let Some(limit) = params.get("limit").and_then(|s| s.parse().ok()) {
1031 filter.limit = Some(limit);
1032 }
1033
1034 let logs = if let Some(global_logger) = mockforge_core::get_global_logger() {
1036 let centralized_logs = global_logger.get_recent_logs(filter.limit).await;
1038
1039 centralized_logs
1041 .into_iter()
1042 .filter(|log| {
1043 if let Some(ref method) = filter.method {
1044 if log.method != *method {
1045 return false;
1046 }
1047 }
1048 if let Some(ref path_pattern) = filter.path_pattern {
1049 if !log.path.contains(path_pattern) {
1050 return false;
1051 }
1052 }
1053 if let Some(status) = filter.status_code {
1054 if log.status_code != status {
1055 return false;
1056 }
1057 }
1058 true
1059 })
1060 .map(|log| RequestLog {
1061 id: log.id,
1062 timestamp: log.timestamp,
1063 method: log.method,
1064 path: log.path,
1065 status_code: log.status_code,
1066 response_time_ms: log.response_time_ms,
1067 client_ip: log.client_ip,
1068 user_agent: log.user_agent,
1069 headers: log.headers,
1070 response_size_bytes: log.response_size_bytes,
1071 error_message: log.error_message,
1072 })
1073 .collect()
1074 } else {
1075 state.get_logs_filtered(&filter).await
1077 };
1078
1079 Json(ApiResponse::success(logs))
1080}
1081
1082pub async fn get_reality_trace(
1086 Path(request_id): Path<String>,
1087) -> Json<ApiResponse<Option<mockforge_core::request_logger::RealityTraceMetadata>>> {
1088 if let Some(global_logger) = mockforge_core::get_global_logger() {
1089 let logs = global_logger.get_recent_logs(None).await;
1090 if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1091 Json(ApiResponse::success(log_entry.reality_metadata))
1092 } else {
1093 Json(ApiResponse::error(format!("Request {} not found", request_id)))
1094 }
1095 } else {
1096 Json(ApiResponse::error("Request logger not initialized".to_string()))
1097 }
1098}
1099
1100pub async fn get_response_trace(
1104 Path(request_id): Path<String>,
1105) -> Json<ApiResponse<Option<serde_json::Value>>> {
1106 if let Some(global_logger) = mockforge_core::get_global_logger() {
1107 let logs = global_logger.get_recent_logs(None).await;
1108 if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1109 let trace = log_entry
1112 .metadata
1113 .get("response_generation_trace")
1114 .and_then(|s| serde_json::from_str::<serde_json::Value>(s).ok());
1115 Json(ApiResponse::success(trace))
1116 } else {
1117 Json(ApiResponse::error(format!("Request {} not found", request_id)))
1118 }
1119 } else {
1120 Json(ApiResponse::error("Request logger not initialized".to_string()))
1121 }
1122}
1123
1124const RECENT_LOGS_LIMIT: usize = 20;
1126const RECENT_LOGS_TTL_MINUTES: i64 = 5;
1127
1128pub async fn logs_sse(
1130 State(_state): State<AdminState>,
1131) -> Sse<impl Stream<Item = std::result::Result<Event, Infallible>>> {
1132 tracing::info!("SSE endpoint /logs/sse accessed - starting real-time log streaming for recent requests only");
1133
1134 let stream = stream::unfold(std::collections::HashSet::new(), |mut seen_ids| async move {
1135 tokio::time::sleep(Duration::from_millis(500)).await;
1136
1137 if let Some(global_logger) = mockforge_core::get_global_logger() {
1139 let centralized_logs = global_logger.get_recent_logs(Some(RECENT_LOGS_LIMIT)).await;
1140
1141 tracing::debug!(
1142 "SSE: Checking logs - total logs: {}, seen logs: {}",
1143 centralized_logs.len(),
1144 seen_ids.len()
1145 );
1146
1147 let now = chrono::Utc::now();
1149 let ttl_cutoff = now - chrono::Duration::minutes(RECENT_LOGS_TTL_MINUTES);
1150
1151 let new_logs: Vec<RequestLog> = centralized_logs
1153 .into_iter()
1154 .filter(|log| {
1155 log.timestamp > ttl_cutoff && !seen_ids.contains(&log.id)
1157 })
1158 .map(|log| RequestLog {
1159 id: log.id,
1160 timestamp: log.timestamp,
1161 method: log.method,
1162 path: log.path,
1163 status_code: log.status_code,
1164 response_time_ms: log.response_time_ms,
1165 client_ip: log.client_ip,
1166 user_agent: log.user_agent,
1167 headers: log.headers,
1168 response_size_bytes: log.response_size_bytes,
1169 error_message: log.error_message,
1170 })
1171 .collect();
1172
1173 for log in &new_logs {
1175 seen_ids.insert(log.id.clone());
1176 }
1177
1178 if !new_logs.is_empty() {
1180 tracing::info!("SSE: Sending {} new logs to client", new_logs.len());
1181
1182 let event_data = serde_json::to_string(&new_logs).unwrap_or_default();
1183 let event = Ok(Event::default().event("new_logs").data(event_data));
1184
1185 return Some((event, seen_ids));
1186 }
1187 }
1188
1189 let event = Ok(Event::default().event("keep_alive").data(""));
1191 Some((event, seen_ids))
1192 });
1193
1194 Sse::new(stream).keep_alive(
1195 axum::response::sse::KeepAlive::new()
1196 .interval(Duration::from_secs(15))
1197 .text("keep-alive-text"),
1198 )
1199}
1200
1201pub async fn get_metrics(State(state): State<AdminState>) -> Json<ApiResponse<MetricsData>> {
1203 let metrics = if let Some(global_logger) = mockforge_core::get_global_logger() {
1205 let all_logs = global_logger.get_recent_logs(None).await;
1206
1207 let total_requests = all_logs.len() as u64;
1208 let mut requests_by_endpoint = HashMap::new();
1209 let mut errors_by_endpoint = HashMap::new();
1210 let mut response_times = Vec::new();
1211 let mut last_request_by_endpoint = HashMap::new();
1212
1213 for log in &all_logs {
1214 let endpoint_key = format!("{} {}", log.method, log.path);
1215 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1216
1217 if log.status_code >= 400 {
1218 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1219 }
1220
1221 response_times.push(log.response_time_ms);
1222 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
1223 }
1224
1225 RequestMetrics {
1226 total_requests,
1227 active_connections: 0,
1228 requests_by_endpoint,
1229 response_times,
1230 response_times_by_endpoint: HashMap::new(),
1231 errors_by_endpoint,
1232 last_request_by_endpoint,
1233 }
1234 } else {
1235 state.get_metrics().await
1236 };
1237
1238 let system_metrics = state.get_system_metrics().await;
1239 let time_series = state.get_time_series_data().await;
1240
1241 fn calculate_percentile(sorted_data: &[u64], percentile: f64) -> u64 {
1243 if sorted_data.is_empty() {
1244 return 0;
1245 }
1246 let idx = ((sorted_data.len() as f64) * percentile).ceil() as usize;
1247 let idx = idx.min(sorted_data.len().saturating_sub(1));
1248 sorted_data[idx]
1249 }
1250
1251 let mut response_times = metrics.response_times.clone();
1253 response_times.sort();
1254
1255 let p50 = calculate_percentile(&response_times, 0.50);
1256 let p75 = calculate_percentile(&response_times, 0.75);
1257 let p90 = calculate_percentile(&response_times, 0.90);
1258 let p95 = calculate_percentile(&response_times, 0.95);
1259 let p99 = calculate_percentile(&response_times, 0.99);
1260 let p999 = calculate_percentile(&response_times, 0.999);
1261
1262 let mut response_times_by_endpoint: HashMap<String, Vec<u64>> = HashMap::new();
1264 if let Some(global_logger) = mockforge_core::get_global_logger() {
1265 let all_logs = global_logger.get_recent_logs(None).await;
1266 for log in &all_logs {
1267 let endpoint_key = format!("{} {}", log.method, log.path);
1268 response_times_by_endpoint
1269 .entry(endpoint_key)
1270 .or_default()
1271 .push(log.response_time_ms);
1272 }
1273 }
1274
1275 let mut endpoint_percentiles: HashMap<String, HashMap<String, u64>> = HashMap::new();
1277 for (endpoint, times) in &mut response_times_by_endpoint {
1278 times.sort();
1279 if !times.is_empty() {
1280 endpoint_percentiles.insert(
1281 endpoint.clone(),
1282 HashMap::from([
1283 ("p50".to_string(), calculate_percentile(times, 0.50)),
1284 ("p75".to_string(), calculate_percentile(times, 0.75)),
1285 ("p90".to_string(), calculate_percentile(times, 0.90)),
1286 ("p95".to_string(), calculate_percentile(times, 0.95)),
1287 ("p99".to_string(), calculate_percentile(times, 0.99)),
1288 ("p999".to_string(), calculate_percentile(times, 0.999)),
1289 ]),
1290 );
1291 }
1292 }
1293
1294 let mut error_rate_by_endpoint = HashMap::new();
1296 for (endpoint, total_count) in &metrics.requests_by_endpoint {
1297 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
1298 let error_rate = if *total_count > 0 {
1299 error_count as f64 / *total_count as f64
1300 } else {
1301 0.0
1302 };
1303 error_rate_by_endpoint.insert(endpoint.clone(), error_rate);
1304 }
1305
1306 let memory_usage_over_time = if time_series.memory_usage.is_empty() {
1309 vec![(Utc::now(), system_metrics.memory_usage_mb)]
1310 } else {
1311 time_series
1312 .memory_usage
1313 .iter()
1314 .map(|point| (point.timestamp, point.value as u64))
1315 .collect()
1316 };
1317
1318 let cpu_usage_over_time = if time_series.cpu_usage.is_empty() {
1319 vec![(Utc::now(), system_metrics.cpu_usage_percent)]
1320 } else {
1321 time_series
1322 .cpu_usage
1323 .iter()
1324 .map(|point| (point.timestamp, point.value))
1325 .collect()
1326 };
1327
1328 let latency_over_time: Vec<(chrono::DateTime<chrono::Utc>, u64)> =
1330 if let Some(global_logger) = mockforge_core::get_global_logger() {
1331 let all_logs = global_logger.get_recent_logs(Some(100)).await;
1332 all_logs.iter().map(|log| (log.timestamp, log.response_time_ms)).collect()
1333 } else {
1334 Vec::new()
1335 };
1336
1337 let metrics_data = MetricsData {
1338 requests_by_endpoint: metrics.requests_by_endpoint,
1339 response_time_percentiles: HashMap::from([
1340 ("p50".to_string(), p50),
1341 ("p75".to_string(), p75),
1342 ("p90".to_string(), p90),
1343 ("p95".to_string(), p95),
1344 ("p99".to_string(), p99),
1345 ("p999".to_string(), p999),
1346 ]),
1347 endpoint_percentiles: Some(endpoint_percentiles),
1348 latency_over_time: Some(latency_over_time),
1349 error_rate_by_endpoint,
1350 memory_usage_over_time,
1351 cpu_usage_over_time,
1352 };
1353
1354 Json(ApiResponse::success(metrics_data))
1355}
1356
1357pub async fn update_latency(
1359 State(state): State<AdminState>,
1360 headers: axum::http::HeaderMap,
1361 Json(update): Json<ConfigUpdate>,
1362) -> Json<ApiResponse<String>> {
1363 use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1364 use crate::rbac::{extract_user_context, get_default_user_context};
1365
1366 if update.config_type != "latency" {
1367 return Json(ApiResponse::error("Invalid config type".to_string()));
1368 }
1369
1370 let base_ms = update.data.get("base_ms").and_then(|v| v.as_u64()).unwrap_or(50);
1372 let jitter_ms = update.data.get("jitter_ms").and_then(|v| v.as_u64()).unwrap_or(20);
1373
1374 let tag_overrides: std::collections::HashMap<String, u64> = update
1375 .data
1376 .get("tag_overrides")
1377 .and_then(|v| v.as_object())
1378 .map(|obj| obj.iter().filter_map(|(k, v)| v.as_u64().map(|val| (k.clone(), val))).collect())
1379 .unwrap_or_default();
1380
1381 state.update_latency_config(base_ms, jitter_ms, tag_overrides.clone()).await;
1383
1384 if let Some(audit_store) = get_global_audit_store() {
1386 let metadata = serde_json::json!({
1387 "base_ms": base_ms,
1388 "jitter_ms": jitter_ms,
1389 "tag_overrides": tag_overrides,
1390 });
1391 let mut audit_log = create_audit_log(
1392 AdminActionType::ConfigLatencyUpdated,
1393 format!("Latency profile updated: base_ms={}, jitter_ms={}", base_ms, jitter_ms),
1394 None,
1395 true,
1396 None,
1397 Some(metadata),
1398 );
1399
1400 if let Some(user_ctx) = extract_user_context(&headers).or_else(get_default_user_context) {
1402 audit_log.user_id = Some(user_ctx.user_id);
1403 audit_log.username = Some(user_ctx.username);
1404 }
1405
1406 if let Some(ip) = headers
1408 .get("x-forwarded-for")
1409 .or_else(|| headers.get("x-real-ip"))
1410 .and_then(|h| h.to_str().ok())
1411 {
1412 audit_log.ip_address = Some(ip.to_string());
1413 }
1414
1415 if let Some(ua) = headers.get("user-agent").and_then(|h| h.to_str().ok()) {
1417 audit_log.user_agent = Some(ua.to_string());
1418 }
1419
1420 audit_store.record(audit_log).await;
1421 }
1422
1423 tracing::info!("Updated latency profile: base_ms={}, jitter_ms={}", base_ms, jitter_ms);
1424
1425 Json(ApiResponse::success("Latency profile updated".to_string()))
1426}
1427
1428pub async fn update_faults(
1430 State(state): State<AdminState>,
1431 Json(update): Json<ConfigUpdate>,
1432) -> Json<ApiResponse<String>> {
1433 if update.config_type != "faults" {
1434 return Json(ApiResponse::error("Invalid config type".to_string()));
1435 }
1436
1437 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1439
1440 let failure_rate = update.data.get("failure_rate").and_then(|v| v.as_f64()).unwrap_or(0.0);
1441
1442 let status_codes = update
1443 .data
1444 .get("status_codes")
1445 .and_then(|v| v.as_array())
1446 .map(|arr| arr.iter().filter_map(|v| v.as_u64().map(|n| n as u16)).collect())
1447 .unwrap_or_else(|| vec![500, 502, 503]);
1448
1449 state.update_fault_config(enabled, failure_rate, status_codes).await;
1451
1452 tracing::info!(
1453 "Updated fault configuration: enabled={}, failure_rate={}",
1454 enabled,
1455 failure_rate
1456 );
1457
1458 Json(ApiResponse::success("Fault configuration updated".to_string()))
1459}
1460
1461pub async fn update_proxy(
1463 State(state): State<AdminState>,
1464 Json(update): Json<ConfigUpdate>,
1465) -> Json<ApiResponse<String>> {
1466 if update.config_type != "proxy" {
1467 return Json(ApiResponse::error("Invalid config type".to_string()));
1468 }
1469
1470 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1472
1473 let upstream_url =
1474 update.data.get("upstream_url").and_then(|v| v.as_str()).map(|s| s.to_string());
1475
1476 let timeout_seconds = update.data.get("timeout_seconds").and_then(|v| v.as_u64()).unwrap_or(30);
1477
1478 state.update_proxy_config(enabled, upstream_url.clone(), timeout_seconds).await;
1480
1481 tracing::info!(
1482 "Updated proxy configuration: enabled={}, upstream_url={:?}",
1483 enabled,
1484 upstream_url
1485 );
1486
1487 Json(ApiResponse::success("Proxy configuration updated".to_string()))
1488}
1489
1490pub async fn clear_logs(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1492 state.clear_logs().await;
1494 tracing::info!("Cleared all request logs");
1495
1496 Json(ApiResponse::success("Logs cleared".to_string()))
1497}
1498
1499pub async fn restart_servers(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1501 use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1502 let current_status = state.get_restart_status().await;
1504 if current_status.in_progress {
1505 return Json(ApiResponse::error("Server restart already in progress".to_string()));
1506 }
1507
1508 let restart_result = state
1510 .initiate_restart("Manual restart requested via admin UI".to_string())
1511 .await;
1512
1513 let success = restart_result.is_ok();
1514 let error_msg = restart_result.as_ref().err().map(|e| format!("{}", e));
1515
1516 if let Some(audit_store) = get_global_audit_store() {
1518 let audit_log = create_audit_log(
1519 AdminActionType::ServerRestarted,
1520 "Server restart initiated via admin UI".to_string(),
1521 None,
1522 success,
1523 error_msg.clone(),
1524 None,
1525 );
1526 audit_store.record(audit_log).await;
1527 }
1528
1529 if let Err(e) = restart_result {
1530 return Json(ApiResponse::error(format!("Failed to initiate restart: {}", e)));
1531 }
1532
1533 let state_clone = state.clone();
1535 tokio::spawn(async move {
1536 if let Err(e) = perform_server_restart(&state_clone).await {
1537 tracing::error!("Server restart failed: {}", e);
1538 state_clone.complete_restart(false).await;
1539 } else {
1540 tracing::info!("Server restart completed successfully");
1541 state_clone.complete_restart(true).await;
1542 }
1543 });
1544
1545 tracing::info!("Server restart initiated via admin UI");
1546 Json(ApiResponse::success(
1547 "Server restart initiated. Please wait for completion.".to_string(),
1548 ))
1549}
1550
1551async fn perform_server_restart(_state: &AdminState) -> Result<()> {
1553 let current_pid = std::process::id();
1555 tracing::info!("Initiating restart for process PID: {}", current_pid);
1556
1557 let parent_pid = get_parent_process_id(current_pid).await?;
1559 tracing::info!("Found parent process PID: {}", parent_pid);
1560
1561 if let Ok(()) = restart_via_parent_signal(parent_pid).await {
1563 tracing::info!("Restart initiated via parent process signal");
1564 return Ok(());
1565 }
1566
1567 if let Ok(()) = restart_via_process_replacement().await {
1569 tracing::info!("Restart initiated via process replacement");
1570 return Ok(());
1571 }
1572
1573 restart_via_script().await
1575}
1576
1577async fn get_parent_process_id(pid: u32) -> Result<u32> {
1579 #[cfg(target_os = "linux")]
1581 {
1582 let stat_path = format!("/proc/{}/stat", pid);
1584 if let Ok(ppid) = tokio::task::spawn_blocking(move || -> Result<u32> {
1585 let content = std::fs::read_to_string(&stat_path)
1586 .map_err(|e| Error::generic(format!("Failed to read {}: {}", stat_path, e)))?;
1587
1588 let fields: Vec<&str> = content.split_whitespace().collect();
1589 if fields.len() > 3 {
1590 fields[3]
1591 .parse::<u32>()
1592 .map_err(|e| Error::generic(format!("Failed to parse PPID: {}", e)))
1593 } else {
1594 Err(Error::generic("Insufficient fields in /proc/pid/stat".to_string()))
1595 }
1596 })
1597 .await
1598 {
1599 return ppid;
1600 }
1601 }
1602
1603 Ok(1) }
1606
1607async fn restart_via_parent_signal(parent_pid: u32) -> Result<()> {
1609 #[cfg(unix)]
1610 {
1611 use std::process::Command;
1612
1613 let output = Command::new("kill")
1615 .args(["-TERM", &parent_pid.to_string()])
1616 .output()
1617 .map_err(|e| Error::generic(format!("Failed to send signal: {}", e)))?;
1618
1619 if !output.status.success() {
1620 return Err(Error::generic(
1621 "Failed to send restart signal to parent process".to_string(),
1622 ));
1623 }
1624
1625 tokio::time::sleep(Duration::from_millis(100)).await;
1627 Ok(())
1628 }
1629
1630 #[cfg(not(unix))]
1631 {
1632 Err(Error::generic(
1633 "Signal-based restart not supported on this platform".to_string(),
1634 ))
1635 }
1636}
1637
1638async fn restart_via_process_replacement() -> Result<()> {
1640 let current_exe = std::env::current_exe()
1642 .map_err(|e| Error::generic(format!("Failed to get current executable: {}", e)))?;
1643
1644 let args: Vec<String> = std::env::args().collect();
1646
1647 tracing::info!("Restarting with command: {:?}", args);
1648
1649 let mut child = Command::new(¤t_exe)
1651 .args(&args[1..]) .stdout(Stdio::inherit())
1653 .stderr(Stdio::inherit())
1654 .spawn()
1655 .map_err(|e| Error::generic(format!("Failed to start new process: {}", e)))?;
1656
1657 tokio::time::sleep(Duration::from_millis(500)).await;
1659
1660 match child.try_wait() {
1662 Ok(Some(status)) => {
1663 if status.success() {
1664 tracing::info!("New process started successfully");
1665 Ok(())
1666 } else {
1667 Err(Error::generic("New process exited with error".to_string()))
1668 }
1669 }
1670 Ok(None) => {
1671 tracing::info!("New process is running, exiting current process");
1672 std::process::exit(0);
1674 }
1675 Err(e) => Err(Error::generic(format!("Failed to check new process status: {}", e))),
1676 }
1677}
1678
1679async fn restart_via_script() -> Result<()> {
1681 let script_paths = ["./scripts/restart.sh", "./restart.sh", "restart.sh"];
1683
1684 for script_path in &script_paths {
1685 if std::path::Path::new(script_path).exists() {
1686 tracing::info!("Using restart script: {}", script_path);
1687
1688 let output = Command::new("bash")
1689 .arg(script_path)
1690 .output()
1691 .map_err(|e| Error::generic(format!("Failed to execute restart script: {}", e)))?;
1692
1693 if output.status.success() {
1694 return Ok(());
1695 } else {
1696 tracing::warn!(
1697 "Restart script failed: {}",
1698 String::from_utf8_lossy(&output.stderr)
1699 );
1700 }
1701 }
1702 }
1703
1704 let clear_script = "./scripts/clear-ports.sh";
1706 if std::path::Path::new(clear_script).exists() {
1707 tracing::info!("Using clear-ports script as fallback");
1708
1709 let _ = Command::new("bash").arg(clear_script).output();
1710 }
1711
1712 Err(Error::generic(
1713 "No restart mechanism available. Please restart manually.".to_string(),
1714 ))
1715}
1716
1717pub async fn get_restart_status(
1719 State(state): State<AdminState>,
1720) -> Json<ApiResponse<RestartStatus>> {
1721 let status = state.get_restart_status().await;
1722 Json(ApiResponse::success(status))
1723}
1724
1725pub async fn get_audit_logs(
1727 Query(params): Query<std::collections::HashMap<String, String>>,
1728) -> Json<ApiResponse<Vec<crate::audit::AdminAuditLog>>> {
1729 use crate::audit::{get_global_audit_store, AdminActionType};
1730
1731 let action_type_str = params.get("action_type");
1732 let user_id = params.get("user_id").map(|s| s.as_str());
1733 let limit = params.get("limit").and_then(|s| s.parse::<usize>().ok());
1734 let offset = params.get("offset").and_then(|s| s.parse::<usize>().ok());
1735
1736 let action_type = action_type_str.and_then(|s| {
1738 match s.as_str() {
1740 "config_latency_updated" => Some(AdminActionType::ConfigLatencyUpdated),
1741 "config_faults_updated" => Some(AdminActionType::ConfigFaultsUpdated),
1742 "server_restarted" => Some(AdminActionType::ServerRestarted),
1743 "logs_cleared" => Some(AdminActionType::LogsCleared),
1744 _ => None,
1745 }
1746 });
1747
1748 if let Some(audit_store) = get_global_audit_store() {
1749 let logs = audit_store.get_logs(action_type, user_id, limit, offset).await;
1750 Json(ApiResponse::success(logs))
1751 } else {
1752 Json(ApiResponse::error("Audit logging not initialized".to_string()))
1753 }
1754}
1755
1756pub async fn get_audit_stats() -> Json<ApiResponse<crate::audit::AuditLogStats>> {
1758 use crate::audit::get_global_audit_store;
1759
1760 if let Some(audit_store) = get_global_audit_store() {
1761 let stats = audit_store.get_stats().await;
1762 Json(ApiResponse::success(stats))
1763 } else {
1764 Json(ApiResponse::error("Audit logging not initialized".to_string()))
1765 }
1766}
1767
1768pub async fn get_config(State(state): State<AdminState>) -> Json<ApiResponse<serde_json::Value>> {
1770 let config_state = state.get_config().await;
1771
1772 let config = json!({
1773 "latency": {
1774 "enabled": true,
1775 "base_ms": config_state.latency_profile.base_ms,
1776 "jitter_ms": config_state.latency_profile.jitter_ms,
1777 "tag_overrides": config_state.latency_profile.tag_overrides
1778 },
1779 "faults": {
1780 "enabled": config_state.fault_config.enabled,
1781 "failure_rate": config_state.fault_config.failure_rate,
1782 "status_codes": config_state.fault_config.status_codes
1783 },
1784 "proxy": {
1785 "enabled": config_state.proxy_config.enabled,
1786 "upstream_url": config_state.proxy_config.upstream_url,
1787 "timeout_seconds": config_state.proxy_config.timeout_seconds
1788 },
1789 "validation": {
1790 "mode": config_state.validation_settings.mode,
1791 "aggregate_errors": config_state.validation_settings.aggregate_errors,
1792 "validate_responses": config_state.validation_settings.validate_responses,
1793 "overrides": config_state.validation_settings.overrides
1794 }
1795 });
1796
1797 Json(ApiResponse::success(config))
1798}
1799
1800pub fn count_fixtures() -> Result<usize> {
1802 let fixtures_dir =
1804 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1805 let fixtures_path = std::path::Path::new(&fixtures_dir);
1806
1807 if !fixtures_path.exists() {
1808 return Ok(0);
1809 }
1810
1811 let mut total_count = 0;
1812
1813 let http_fixtures_path = fixtures_path.join("http");
1815 if http_fixtures_path.exists() {
1816 total_count += count_fixtures_in_directory(&http_fixtures_path)?;
1817 }
1818
1819 let ws_fixtures_path = fixtures_path.join("websocket");
1821 if ws_fixtures_path.exists() {
1822 total_count += count_fixtures_in_directory(&ws_fixtures_path)?;
1823 }
1824
1825 let grpc_fixtures_path = fixtures_path.join("grpc");
1827 if grpc_fixtures_path.exists() {
1828 total_count += count_fixtures_in_directory(&grpc_fixtures_path)?;
1829 }
1830
1831 Ok(total_count)
1832}
1833
1834fn count_fixtures_in_directory(dir_path: &std::path::Path) -> Result<usize> {
1836 let mut count = 0;
1837
1838 if let Ok(entries) = std::fs::read_dir(dir_path) {
1839 for entry in entries {
1840 let entry = entry
1841 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1842 let path = entry.path();
1843
1844 if path.is_dir() {
1845 count += count_fixtures_in_directory(&path)?;
1847 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1848 count += 1;
1850 }
1851 }
1852 }
1853
1854 Ok(count)
1855}
1856
1857pub fn route_has_fixtures(method: &str, path: &str) -> bool {
1859 let fixtures_dir =
1861 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1862 let fixtures_path = std::path::Path::new(&fixtures_dir);
1863
1864 if !fixtures_path.exists() {
1865 return false;
1866 }
1867
1868 let method_lower = method.to_lowercase();
1870 let path_hash = path.replace(['/', ':'], "_");
1871 let http_fixtures_path = fixtures_path.join("http").join(&method_lower).join(&path_hash);
1872
1873 if http_fixtures_path.exists() {
1874 if let Ok(entries) = std::fs::read_dir(&http_fixtures_path) {
1876 for entry in entries.flatten() {
1877 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1878 return true;
1879 }
1880 }
1881 }
1882 }
1883
1884 if method.to_uppercase() == "WS" {
1886 let ws_fixtures_path = fixtures_path.join("websocket").join(&path_hash);
1887
1888 if ws_fixtures_path.exists() {
1889 if let Ok(entries) = std::fs::read_dir(&ws_fixtures_path) {
1890 for entry in entries.flatten() {
1891 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1892 return true;
1893 }
1894 }
1895 }
1896 }
1897 }
1898
1899 false
1900}
1901
1902fn calculate_endpoint_latency(metrics: &RequestMetrics, endpoint: &str) -> Option<u64> {
1904 metrics.response_times_by_endpoint.get(endpoint).and_then(|times| {
1905 if times.is_empty() {
1906 None
1907 } else {
1908 let sum: u64 = times.iter().sum();
1909 Some(sum / times.len() as u64)
1910 }
1911 })
1912}
1913
1914fn get_endpoint_last_request(
1916 metrics: &RequestMetrics,
1917 endpoint: &str,
1918) -> Option<chrono::DateTime<chrono::Utc>> {
1919 metrics.last_request_by_endpoint.get(endpoint).copied()
1920}
1921
1922fn count_requests_by_server_type(metrics: &RequestMetrics, server_type: &str) -> u64 {
1924 match server_type {
1925 "HTTP" => {
1926 metrics
1928 .requests_by_endpoint
1929 .iter()
1930 .filter(|(endpoint, _)| {
1931 let method = endpoint.split(' ').next().unwrap_or("");
1932 matches!(
1933 method,
1934 "GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "HEAD" | "OPTIONS"
1935 )
1936 })
1937 .map(|(_, count)| count)
1938 .sum()
1939 }
1940 "WebSocket" => {
1941 metrics
1943 .requests_by_endpoint
1944 .iter()
1945 .filter(|(endpoint, _)| {
1946 let method = endpoint.split(' ').next().unwrap_or("");
1947 method == "WS"
1948 })
1949 .map(|(_, count)| count)
1950 .sum()
1951 }
1952 "gRPC" => {
1953 metrics
1955 .requests_by_endpoint
1956 .iter()
1957 .filter(|(endpoint, _)| {
1958 let method = endpoint.split(' ').next().unwrap_or("");
1959 method == "gRPC"
1960 })
1961 .map(|(_, count)| count)
1962 .sum()
1963 }
1964 _ => 0,
1965 }
1966}
1967
1968pub async fn get_fixtures() -> Json<ApiResponse<Vec<FixtureInfo>>> {
1970 match scan_fixtures_directory() {
1971 Ok(fixtures) => Json(ApiResponse::success(fixtures)),
1972 Err(e) => {
1973 tracing::error!("Failed to scan fixtures directory: {}", e);
1974 Json(ApiResponse::error(format!("Failed to load fixtures: {}", e)))
1975 }
1976 }
1977}
1978
1979fn scan_fixtures_directory() -> Result<Vec<FixtureInfo>> {
1981 let fixtures_dir =
1982 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1983 let fixtures_path = std::path::Path::new(&fixtures_dir);
1984
1985 if !fixtures_path.exists() {
1986 tracing::warn!("Fixtures directory does not exist: {}", fixtures_dir);
1987 return Ok(Vec::new());
1988 }
1989
1990 let mut all_fixtures = Vec::new();
1991
1992 let http_fixtures = scan_protocol_fixtures(fixtures_path, "http")?;
1994 all_fixtures.extend(http_fixtures);
1995
1996 let ws_fixtures = scan_protocol_fixtures(fixtures_path, "websocket")?;
1998 all_fixtures.extend(ws_fixtures);
1999
2000 let grpc_fixtures = scan_protocol_fixtures(fixtures_path, "grpc")?;
2002 all_fixtures.extend(grpc_fixtures);
2003
2004 all_fixtures.sort_by(|a, b| b.saved_at.cmp(&a.saved_at));
2006
2007 tracing::info!("Found {} fixtures in directory: {}", all_fixtures.len(), fixtures_dir);
2008 Ok(all_fixtures)
2009}
2010
2011fn scan_protocol_fixtures(
2013 fixtures_path: &std::path::Path,
2014 protocol: &str,
2015) -> Result<Vec<FixtureInfo>> {
2016 let protocol_path = fixtures_path.join(protocol);
2017 let mut fixtures = Vec::new();
2018
2019 if !protocol_path.exists() {
2020 return Ok(fixtures);
2021 }
2022
2023 if let Ok(entries) = std::fs::read_dir(&protocol_path) {
2025 for entry in entries {
2026 let entry = entry
2027 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2028 let path = entry.path();
2029
2030 if path.is_dir() {
2031 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2033 fixtures.extend(sub_fixtures);
2034 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2035 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2037 fixtures.push(fixture);
2038 }
2039 }
2040 }
2041 }
2042
2043 Ok(fixtures)
2044}
2045
2046fn scan_directory_recursive(
2048 dir_path: &std::path::Path,
2049 protocol: &str,
2050) -> Result<Vec<FixtureInfo>> {
2051 let mut fixtures = Vec::new();
2052
2053 if let Ok(entries) = std::fs::read_dir(dir_path) {
2054 for entry in entries {
2055 let entry = entry
2056 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2057 let path = entry.path();
2058
2059 if path.is_dir() {
2060 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2062 fixtures.extend(sub_fixtures);
2063 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2064 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2066 fixtures.push(fixture);
2067 }
2068 }
2069 }
2070 }
2071
2072 Ok(fixtures)
2073}
2074
2075fn parse_fixture_file_sync(file_path: &std::path::Path, protocol: &str) -> Result<FixtureInfo> {
2077 let metadata = std::fs::metadata(file_path)
2079 .map_err(|e| Error::generic(format!("Failed to read file metadata: {}", e)))?;
2080
2081 let file_size = metadata.len();
2082 let modified_time = metadata
2083 .modified()
2084 .map_err(|e| Error::generic(format!("Failed to get file modification time: {}", e)))?;
2085
2086 let saved_at = chrono::DateTime::from(modified_time);
2087
2088 let content = std::fs::read_to_string(file_path)
2090 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2091
2092 let fixture_data: serde_json::Value = serde_json::from_str(&content)
2093 .map_err(|e| Error::generic(format!("Failed to parse fixture JSON: {}", e)))?;
2094
2095 let (method, path) = extract_method_and_path(&fixture_data, protocol)?;
2097
2098 let id = generate_fixture_id(file_path, &content);
2100
2101 let fingerprint = extract_fingerprint(file_path, &fixture_data)?;
2103
2104 let fixtures_dir =
2106 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2107 let fixtures_path = std::path::Path::new(&fixtures_dir);
2108 let file_path_str = file_path
2109 .strip_prefix(fixtures_path)
2110 .unwrap_or(file_path)
2111 .to_string_lossy()
2112 .to_string();
2113
2114 Ok(FixtureInfo {
2115 id,
2116 protocol: protocol.to_string(),
2117 method,
2118 path,
2119 saved_at,
2120 file_size,
2121 file_path: file_path_str,
2122 fingerprint,
2123 metadata: fixture_data,
2124 })
2125}
2126
2127fn extract_method_and_path(
2129 fixture_data: &serde_json::Value,
2130 protocol: &str,
2131) -> Result<(String, String)> {
2132 match protocol {
2133 "http" => {
2134 let method = fixture_data
2136 .get("request")
2137 .and_then(|req| req.get("method"))
2138 .and_then(|m| m.as_str())
2139 .unwrap_or("UNKNOWN")
2140 .to_uppercase();
2141
2142 let path = fixture_data
2143 .get("request")
2144 .and_then(|req| req.get("path"))
2145 .and_then(|p| p.as_str())
2146 .unwrap_or("/unknown")
2147 .to_string();
2148
2149 Ok((method, path))
2150 }
2151 "websocket" => {
2152 let path = fixture_data
2154 .get("path")
2155 .and_then(|p| p.as_str())
2156 .or_else(|| {
2157 fixture_data
2158 .get("request")
2159 .and_then(|req| req.get("path"))
2160 .and_then(|p| p.as_str())
2161 })
2162 .unwrap_or("/ws")
2163 .to_string();
2164
2165 Ok(("WS".to_string(), path))
2166 }
2167 "grpc" => {
2168 let service =
2170 fixture_data.get("service").and_then(|s| s.as_str()).unwrap_or("UnknownService");
2171
2172 let method =
2173 fixture_data.get("method").and_then(|m| m.as_str()).unwrap_or("UnknownMethod");
2174
2175 let path = format!("/{}/{}", service, method);
2176 Ok(("gRPC".to_string(), path))
2177 }
2178 _ => {
2179 let path = fixture_data
2180 .get("path")
2181 .and_then(|p| p.as_str())
2182 .unwrap_or("/unknown")
2183 .to_string();
2184 Ok((protocol.to_uppercase(), path))
2185 }
2186 }
2187}
2188
2189fn generate_fixture_id(file_path: &std::path::Path, content: &str) -> String {
2191 use std::collections::hash_map::DefaultHasher;
2192 use std::hash::{Hash, Hasher};
2193
2194 let mut hasher = DefaultHasher::new();
2195 file_path.hash(&mut hasher);
2196 content.hash(&mut hasher);
2197 format!("fixture_{:x}", hasher.finish())
2198}
2199
2200fn extract_fingerprint(
2202 file_path: &std::path::Path,
2203 fixture_data: &serde_json::Value,
2204) -> Result<String> {
2205 if let Some(fingerprint) = fixture_data.get("fingerprint").and_then(|f| f.as_str()) {
2207 return Ok(fingerprint.to_string());
2208 }
2209
2210 if let Some(file_name) = file_path.file_stem().and_then(|s| s.to_str()) {
2212 if let Some(hash) = file_name.split('_').next_back() {
2214 if hash.len() >= 8 && hash.chars().all(|c| c.is_alphanumeric()) {
2215 return Ok(hash.to_string());
2216 }
2217 }
2218 }
2219
2220 use std::collections::hash_map::DefaultHasher;
2222 use std::hash::{Hash, Hasher};
2223
2224 let mut hasher = DefaultHasher::new();
2225 file_path.hash(&mut hasher);
2226 Ok(format!("{:x}", hasher.finish()))
2227}
2228
2229pub async fn delete_fixture(
2231 Json(payload): Json<FixtureDeleteRequest>,
2232) -> Json<ApiResponse<String>> {
2233 match delete_fixture_by_id(&payload.fixture_id).await {
2234 Ok(_) => {
2235 tracing::info!("Successfully deleted fixture: {}", payload.fixture_id);
2236 Json(ApiResponse::success("Fixture deleted successfully".to_string()))
2237 }
2238 Err(e) => {
2239 tracing::error!("Failed to delete fixture {}: {}", payload.fixture_id, e);
2240 Json(ApiResponse::error(format!("Failed to delete fixture: {}", e)))
2241 }
2242 }
2243}
2244
2245pub async fn delete_fixtures_bulk(
2247 Json(payload): Json<FixtureBulkDeleteRequest>,
2248) -> Json<ApiResponse<FixtureBulkDeleteResult>> {
2249 let mut deleted_count = 0;
2250 let mut errors = Vec::new();
2251
2252 for fixture_id in &payload.fixture_ids {
2253 match delete_fixture_by_id(fixture_id).await {
2254 Ok(_) => {
2255 deleted_count += 1;
2256 tracing::info!("Successfully deleted fixture: {}", fixture_id);
2257 }
2258 Err(e) => {
2259 errors.push(format!("Failed to delete {}: {}", fixture_id, e));
2260 tracing::error!("Failed to delete fixture {}: {}", fixture_id, e);
2261 }
2262 }
2263 }
2264
2265 let result = FixtureBulkDeleteResult {
2266 deleted_count,
2267 total_requested: payload.fixture_ids.len(),
2268 errors: errors.clone(),
2269 };
2270
2271 if errors.is_empty() {
2272 Json(ApiResponse::success(result))
2273 } else {
2274 Json(ApiResponse::error(format!(
2275 "Partial success: {} deleted, {} errors",
2276 deleted_count,
2277 errors.len()
2278 )))
2279 }
2280}
2281
2282async fn delete_fixture_by_id(fixture_id: &str) -> Result<()> {
2284 let fixtures_dir =
2287 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2288 let fixtures_path = std::path::Path::new(&fixtures_dir);
2289
2290 if !fixtures_path.exists() {
2291 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2292 }
2293
2294 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2296
2297 let file_path_clone = file_path.clone();
2299 tokio::task::spawn_blocking(move || {
2300 if file_path_clone.exists() {
2301 std::fs::remove_file(&file_path_clone).map_err(|e| {
2302 Error::generic(format!(
2303 "Failed to delete fixture file {}: {}",
2304 file_path_clone.display(),
2305 e
2306 ))
2307 })
2308 } else {
2309 Err(Error::generic(format!("Fixture file not found: {}", file_path_clone.display())))
2310 }
2311 })
2312 .await
2313 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2314
2315 tracing::info!("Deleted fixture file: {}", file_path.display());
2316
2317 cleanup_empty_directories(&file_path).await;
2319
2320 Ok(())
2321}
2322
2323fn find_fixture_file_by_id(
2325 fixtures_path: &std::path::Path,
2326 fixture_id: &str,
2327) -> Result<std::path::PathBuf> {
2328 let protocols = ["http", "websocket", "grpc"];
2330
2331 for protocol in &protocols {
2332 let protocol_path = fixtures_path.join(protocol);
2333 if let Ok(found_path) = search_fixture_in_directory(&protocol_path, fixture_id) {
2334 return Ok(found_path);
2335 }
2336 }
2337
2338 Err(Error::generic(format!(
2339 "Fixture with ID '{}' not found in any protocol directory",
2340 fixture_id
2341 )))
2342}
2343
2344fn search_fixture_in_directory(
2346 dir_path: &std::path::Path,
2347 fixture_id: &str,
2348) -> Result<std::path::PathBuf> {
2349 if !dir_path.exists() {
2350 return Err(Error::generic(format!("Directory does not exist: {}", dir_path.display())));
2351 }
2352
2353 if let Ok(entries) = std::fs::read_dir(dir_path) {
2354 for entry in entries {
2355 let entry = entry
2356 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2357 let path = entry.path();
2358
2359 if path.is_dir() {
2360 if let Ok(found_path) = search_fixture_in_directory(&path, fixture_id) {
2362 return Ok(found_path);
2363 }
2364 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2365 if let Ok(fixture_info) = parse_fixture_file_sync(&path, "unknown") {
2367 if fixture_info.id == fixture_id {
2368 return Ok(path);
2369 }
2370 }
2371 }
2372 }
2373 }
2374
2375 Err(Error::generic(format!(
2376 "Fixture not found in directory: {}",
2377 dir_path.display()
2378 )))
2379}
2380
2381async fn cleanup_empty_directories(file_path: &std::path::Path) {
2383 let file_path = file_path.to_path_buf();
2384
2385 let _ = tokio::task::spawn_blocking(move || {
2387 if let Some(parent) = file_path.parent() {
2388 let mut current = parent;
2390 let fixtures_dir =
2391 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2392 let fixtures_path = std::path::Path::new(&fixtures_dir);
2393
2394 while current != fixtures_path && current.parent().is_some() {
2395 if let Ok(entries) = std::fs::read_dir(current) {
2396 if entries.count() == 0 {
2397 if let Err(e) = std::fs::remove_dir(current) {
2398 tracing::debug!(
2399 "Failed to remove empty directory {}: {}",
2400 current.display(),
2401 e
2402 );
2403 break;
2404 } else {
2405 tracing::debug!("Removed empty directory: {}", current.display());
2406 }
2407 } else {
2408 break;
2409 }
2410 } else {
2411 break;
2412 }
2413
2414 if let Some(next_parent) = current.parent() {
2415 current = next_parent;
2416 } else {
2417 break;
2418 }
2419 }
2420 }
2421 })
2422 .await;
2423}
2424
2425pub async fn download_fixture(
2427 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2428) -> impl IntoResponse {
2429 match download_fixture_by_id(&fixture_id).await {
2431 Ok((content, file_name)) => (
2432 http::StatusCode::OK,
2433 [
2434 (http::header::CONTENT_TYPE, "application/json".to_string()),
2435 (
2436 http::header::CONTENT_DISPOSITION,
2437 format!("attachment; filename=\"{}\"", file_name),
2438 ),
2439 ],
2440 content,
2441 )
2442 .into_response(),
2443 Err(e) => {
2444 tracing::error!("Failed to download fixture {}: {}", fixture_id, e);
2445 let error_response = format!(r#"{{"error": "Failed to download fixture: {}"}}"#, e);
2446 (
2447 http::StatusCode::NOT_FOUND,
2448 [(http::header::CONTENT_TYPE, "application/json".to_string())],
2449 error_response,
2450 )
2451 .into_response()
2452 }
2453 }
2454}
2455
2456async fn download_fixture_by_id(fixture_id: &str) -> Result<(String, String)> {
2458 let fixtures_dir =
2460 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2461 let fixtures_path = std::path::Path::new(&fixtures_dir);
2462
2463 if !fixtures_path.exists() {
2464 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2465 }
2466
2467 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2468
2469 let file_path_clone = file_path.clone();
2471 let (content, file_name) = tokio::task::spawn_blocking(move || {
2472 let content = std::fs::read_to_string(&file_path_clone)
2473 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2474
2475 let file_name = file_path_clone
2476 .file_name()
2477 .and_then(|name| name.to_str())
2478 .unwrap_or("fixture.json")
2479 .to_string();
2480
2481 Ok::<_, Error>((content, file_name))
2482 })
2483 .await
2484 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2485
2486 tracing::info!("Downloaded fixture file: {} ({} bytes)", file_path.display(), content.len());
2487 Ok((content, file_name))
2488}
2489
2490pub async fn rename_fixture(
2492 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2493 Json(payload): Json<FixtureRenameRequest>,
2494) -> Json<ApiResponse<String>> {
2495 match rename_fixture_by_id(&fixture_id, &payload.new_name).await {
2496 Ok(new_path) => {
2497 tracing::info!("Successfully renamed fixture: {} -> {}", fixture_id, payload.new_name);
2498 Json(ApiResponse::success(format!("Fixture renamed successfully to: {}", new_path)))
2499 }
2500 Err(e) => {
2501 tracing::error!("Failed to rename fixture {}: {}", fixture_id, e);
2502 Json(ApiResponse::error(format!("Failed to rename fixture: {}", e)))
2503 }
2504 }
2505}
2506
2507async fn rename_fixture_by_id(fixture_id: &str, new_name: &str) -> Result<String> {
2509 if new_name.is_empty() {
2511 return Err(Error::generic("New name cannot be empty".to_string()));
2512 }
2513
2514 let new_name = if new_name.ends_with(".json") {
2516 new_name.to_string()
2517 } else {
2518 format!("{}.json", new_name)
2519 };
2520
2521 let fixtures_dir =
2523 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2524 let fixtures_path = std::path::Path::new(&fixtures_dir);
2525
2526 if !fixtures_path.exists() {
2527 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2528 }
2529
2530 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2531
2532 let parent = old_path
2534 .parent()
2535 .ok_or_else(|| Error::generic("Could not determine parent directory".to_string()))?;
2536
2537 let new_path = parent.join(&new_name);
2538
2539 if new_path.exists() {
2541 return Err(Error::generic(format!(
2542 "A fixture with name '{}' already exists in the same directory",
2543 new_name
2544 )));
2545 }
2546
2547 let old_path_clone = old_path.clone();
2549 let new_path_clone = new_path.clone();
2550 tokio::task::spawn_blocking(move || {
2551 std::fs::rename(&old_path_clone, &new_path_clone)
2552 .map_err(|e| Error::generic(format!("Failed to rename fixture file: {}", e)))
2553 })
2554 .await
2555 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2556
2557 tracing::info!("Renamed fixture file: {} -> {}", old_path.display(), new_path.display());
2558
2559 Ok(new_path
2561 .strip_prefix(fixtures_path)
2562 .unwrap_or(&new_path)
2563 .to_string_lossy()
2564 .to_string())
2565}
2566
2567pub async fn move_fixture(
2569 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2570 Json(payload): Json<FixtureMoveRequest>,
2571) -> Json<ApiResponse<String>> {
2572 match move_fixture_by_id(&fixture_id, &payload.new_path).await {
2573 Ok(new_location) => {
2574 tracing::info!("Successfully moved fixture: {} -> {}", fixture_id, payload.new_path);
2575 Json(ApiResponse::success(format!("Fixture moved successfully to: {}", new_location)))
2576 }
2577 Err(e) => {
2578 tracing::error!("Failed to move fixture {}: {}", fixture_id, e);
2579 Json(ApiResponse::error(format!("Failed to move fixture: {}", e)))
2580 }
2581 }
2582}
2583
2584async fn move_fixture_by_id(fixture_id: &str, new_path: &str) -> Result<String> {
2586 if new_path.is_empty() {
2588 return Err(Error::generic("New path cannot be empty".to_string()));
2589 }
2590
2591 let fixtures_dir =
2593 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2594 let fixtures_path = std::path::Path::new(&fixtures_dir);
2595
2596 if !fixtures_path.exists() {
2597 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2598 }
2599
2600 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2601
2602 let new_full_path = if new_path.starts_with('/') {
2604 fixtures_path.join(new_path.trim_start_matches('/'))
2606 } else {
2607 fixtures_path.join(new_path)
2609 };
2610
2611 let new_full_path = if new_full_path.extension().and_then(|s| s.to_str()) == Some("json") {
2613 new_full_path
2614 } else {
2615 if new_full_path.is_dir() || !new_path.contains('.') {
2617 let file_name = old_path.file_name().ok_or_else(|| {
2618 Error::generic("Could not determine original file name".to_string())
2619 })?;
2620 new_full_path.join(file_name)
2621 } else {
2622 new_full_path.with_extension("json")
2623 }
2624 };
2625
2626 if new_full_path.exists() {
2628 return Err(Error::generic(format!(
2629 "A fixture already exists at path: {}",
2630 new_full_path.display()
2631 )));
2632 }
2633
2634 let old_path_clone = old_path.clone();
2636 let new_full_path_clone = new_full_path.clone();
2637 tokio::task::spawn_blocking(move || {
2638 if let Some(parent) = new_full_path_clone.parent() {
2640 std::fs::create_dir_all(parent)
2641 .map_err(|e| Error::generic(format!("Failed to create target directory: {}", e)))?;
2642 }
2643
2644 std::fs::rename(&old_path_clone, &new_full_path_clone)
2646 .map_err(|e| Error::generic(format!("Failed to move fixture file: {}", e)))
2647 })
2648 .await
2649 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2650
2651 tracing::info!("Moved fixture file: {} -> {}", old_path.display(), new_full_path.display());
2652
2653 cleanup_empty_directories(&old_path).await;
2655
2656 Ok(new_full_path
2658 .strip_prefix(fixtures_path)
2659 .unwrap_or(&new_full_path)
2660 .to_string_lossy()
2661 .to_string())
2662}
2663
2664pub async fn get_validation(
2666 State(state): State<AdminState>,
2667) -> Json<ApiResponse<ValidationSettings>> {
2668 let config_state = state.get_config().await;
2670
2671 Json(ApiResponse::success(config_state.validation_settings))
2672}
2673
2674pub async fn update_validation(
2676 State(state): State<AdminState>,
2677 Json(update): Json<ValidationUpdate>,
2678) -> Json<ApiResponse<String>> {
2679 match update.mode.as_str() {
2681 "enforce" | "warn" | "off" => {}
2682 _ => {
2683 return Json(ApiResponse::error(
2684 "Invalid validation mode. Must be 'enforce', 'warn', or 'off'".to_string(),
2685 ))
2686 }
2687 }
2688
2689 let mode = update.mode.clone();
2691 state
2692 .update_validation_config(
2693 update.mode,
2694 update.aggregate_errors,
2695 update.validate_responses,
2696 update.overrides.unwrap_or_default(),
2697 )
2698 .await;
2699
2700 tracing::info!(
2701 "Updated validation settings: mode={}, aggregate_errors={}",
2702 mode,
2703 update.aggregate_errors
2704 );
2705
2706 Json(ApiResponse::success("Validation settings updated".to_string()))
2707}
2708
2709pub async fn get_env_vars() -> Json<ApiResponse<HashMap<String, String>>> {
2711 let mut env_vars = HashMap::new();
2713
2714 let relevant_vars = [
2715 "MOCKFORGE_LATENCY_ENABLED",
2717 "MOCKFORGE_FAILURES_ENABLED",
2718 "MOCKFORGE_PROXY_ENABLED",
2719 "MOCKFORGE_RECORD_ENABLED",
2720 "MOCKFORGE_REPLAY_ENABLED",
2721 "MOCKFORGE_LOG_LEVEL",
2722 "MOCKFORGE_CONFIG_FILE",
2723 "RUST_LOG",
2724 "MOCKFORGE_HTTP_PORT",
2726 "MOCKFORGE_HTTP_HOST",
2727 "MOCKFORGE_HTTP_OPENAPI_SPEC",
2728 "MOCKFORGE_CORS_ENABLED",
2729 "MOCKFORGE_REQUEST_TIMEOUT_SECS",
2730 "MOCKFORGE_WS_PORT",
2732 "MOCKFORGE_WS_HOST",
2733 "MOCKFORGE_WS_REPLAY_FILE",
2734 "MOCKFORGE_WS_CONNECTION_TIMEOUT_SECS",
2735 "MOCKFORGE_GRPC_PORT",
2737 "MOCKFORGE_GRPC_HOST",
2738 "MOCKFORGE_ADMIN_ENABLED",
2740 "MOCKFORGE_ADMIN_PORT",
2741 "MOCKFORGE_ADMIN_HOST",
2742 "MOCKFORGE_ADMIN_MOUNT_PATH",
2743 "MOCKFORGE_ADMIN_API_ENABLED",
2744 "MOCKFORGE_RESPONSE_TEMPLATE_EXPAND",
2746 "MOCKFORGE_REQUEST_VALIDATION",
2747 "MOCKFORGE_AGGREGATE_ERRORS",
2748 "MOCKFORGE_RESPONSE_VALIDATION",
2749 "MOCKFORGE_VALIDATION_STATUS",
2750 "MOCKFORGE_RAG_ENABLED",
2752 "MOCKFORGE_FAKE_TOKENS",
2753 "MOCKFORGE_FIXTURES_DIR",
2755 ];
2756
2757 for var_name in &relevant_vars {
2758 if let Ok(value) = std::env::var(var_name) {
2759 env_vars.insert(var_name.to_string(), value);
2760 }
2761 }
2762
2763 Json(ApiResponse::success(env_vars))
2764}
2765
2766pub async fn update_env_var(Json(update): Json<EnvVarUpdate>) -> Json<ApiResponse<String>> {
2768 std::env::set_var(&update.key, &update.value);
2770
2771 tracing::info!("Updated environment variable: {}={}", update.key, update.value);
2772
2773 Json(ApiResponse::success(format!(
2776 "Environment variable {} updated to '{}'. Note: This change is not persisted and will be lost on restart.",
2777 update.key, update.value
2778 )))
2779}
2780
2781pub async fn get_file_content(
2783 Json(request): Json<FileContentRequest>,
2784) -> Json<ApiResponse<String>> {
2785 if let Err(e) = validate_file_path(&request.file_path) {
2787 return Json(ApiResponse::error(format!("Invalid file path: {}", e)));
2788 }
2789
2790 match tokio::fs::read_to_string(&request.file_path).await {
2792 Ok(content) => {
2793 if let Err(e) = validate_file_content(&content) {
2795 return Json(ApiResponse::error(format!("Invalid file content: {}", e)));
2796 }
2797 Json(ApiResponse::success(content))
2798 }
2799 Err(e) => Json(ApiResponse::error(format!("Failed to read file: {}", e))),
2800 }
2801}
2802
2803pub async fn save_file_content(Json(request): Json<FileSaveRequest>) -> Json<ApiResponse<String>> {
2805 match save_file_to_filesystem(&request.file_path, &request.content).await {
2806 Ok(_) => {
2807 tracing::info!("Successfully saved file: {}", request.file_path);
2808 Json(ApiResponse::success("File saved successfully".to_string()))
2809 }
2810 Err(e) => {
2811 tracing::error!("Failed to save file {}: {}", request.file_path, e);
2812 Json(ApiResponse::error(format!("Failed to save file: {}", e)))
2813 }
2814 }
2815}
2816
2817async fn save_file_to_filesystem(file_path: &str, content: &str) -> Result<()> {
2819 validate_file_path(file_path)?;
2821
2822 validate_file_content(content)?;
2824
2825 let path = std::path::PathBuf::from(file_path);
2827 let content = content.to_string();
2828
2829 let path_clone = path.clone();
2831 let content_clone = content.clone();
2832 tokio::task::spawn_blocking(move || {
2833 if let Some(parent) = path_clone.parent() {
2835 std::fs::create_dir_all(parent).map_err(|e| {
2836 Error::generic(format!("Failed to create directory {}: {}", parent.display(), e))
2837 })?;
2838 }
2839
2840 std::fs::write(&path_clone, &content_clone).map_err(|e| {
2842 Error::generic(format!("Failed to write file {}: {}", path_clone.display(), e))
2843 })?;
2844
2845 let written_content = std::fs::read_to_string(&path_clone).map_err(|e| {
2847 Error::generic(format!("Failed to verify written file {}: {}", path_clone.display(), e))
2848 })?;
2849
2850 if written_content != content_clone {
2851 return Err(Error::generic(format!(
2852 "File content verification failed for {}",
2853 path_clone.display()
2854 )));
2855 }
2856
2857 Ok::<_, Error>(())
2858 })
2859 .await
2860 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2861
2862 tracing::info!("File saved successfully: {} ({} bytes)", path.display(), content.len());
2863 Ok(())
2864}
2865
2866fn validate_file_path(file_path: &str) -> Result<()> {
2868 if file_path.contains("..") {
2870 return Err(Error::generic("Path traversal detected in file path".to_string()));
2871 }
2872
2873 let path = std::path::Path::new(file_path);
2875 if path.is_absolute() {
2876 let allowed_dirs = [
2878 std::env::current_dir().unwrap_or_default(),
2879 std::path::PathBuf::from("."),
2880 std::path::PathBuf::from("fixtures"),
2881 std::path::PathBuf::from("config"),
2882 ];
2883
2884 let mut is_allowed = false;
2885 for allowed_dir in &allowed_dirs {
2886 if path.starts_with(allowed_dir) {
2887 is_allowed = true;
2888 break;
2889 }
2890 }
2891
2892 if !is_allowed {
2893 return Err(Error::generic("File path is outside allowed directories".to_string()));
2894 }
2895 }
2896
2897 let dangerous_extensions = ["exe", "bat", "cmd", "sh", "ps1", "scr", "com"];
2899 if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
2900 if dangerous_extensions.contains(&extension.to_lowercase().as_str()) {
2901 return Err(Error::generic(format!(
2902 "Dangerous file extension not allowed: {}",
2903 extension
2904 )));
2905 }
2906 }
2907
2908 Ok(())
2909}
2910
2911fn validate_file_content(content: &str) -> Result<()> {
2913 if content.len() > 10 * 1024 * 1024 {
2915 return Err(Error::generic("File content too large (max 10MB)".to_string()));
2917 }
2918
2919 if content.contains('\0') {
2921 return Err(Error::generic("File content contains null bytes".to_string()));
2922 }
2923
2924 Ok(())
2925}
2926
2927#[derive(Debug, Clone, Serialize, Deserialize)]
2929pub struct FixtureDeleteRequest {
2930 pub fixture_id: String,
2931}
2932
2933#[derive(Debug, Clone, Serialize, Deserialize)]
2935pub struct EnvVarUpdate {
2936 pub key: String,
2937 pub value: String,
2938}
2939
2940#[derive(Debug, Clone, Serialize, Deserialize)]
2942pub struct FixtureBulkDeleteRequest {
2943 pub fixture_ids: Vec<String>,
2944}
2945
2946#[derive(Debug, Clone, Serialize, Deserialize)]
2948pub struct FixtureBulkDeleteResult {
2949 pub deleted_count: usize,
2950 pub total_requested: usize,
2951 pub errors: Vec<String>,
2952}
2953
2954#[derive(Debug, Clone, Serialize, Deserialize)]
2956pub struct FixtureRenameRequest {
2957 pub new_name: String,
2958}
2959
2960#[derive(Debug, Clone, Serialize, Deserialize)]
2962pub struct FixtureMoveRequest {
2963 pub new_path: String,
2964}
2965
2966#[derive(Debug, Clone, Serialize, Deserialize)]
2968pub struct FileContentRequest {
2969 pub file_path: String,
2970 pub file_type: String,
2971}
2972
2973#[derive(Debug, Clone, Serialize, Deserialize)]
2975pub struct FileSaveRequest {
2976 pub file_path: String,
2977 pub content: String,
2978}
2979
2980pub async fn get_smoke_tests(
2982 State(state): State<AdminState>,
2983) -> Json<ApiResponse<Vec<SmokeTestResult>>> {
2984 let results = state.get_smoke_test_results().await;
2985 Json(ApiResponse::success(results))
2986}
2987
2988pub async fn run_smoke_tests_endpoint(
2990 State(state): State<AdminState>,
2991) -> Json<ApiResponse<String>> {
2992 tracing::info!("Starting smoke test execution");
2993
2994 let state_clone = state.clone();
2996 tokio::spawn(async move {
2997 if let Err(e) = execute_smoke_tests(&state_clone).await {
2998 tracing::error!("Smoke test execution failed: {}", e);
2999 } else {
3000 tracing::info!("Smoke test execution completed successfully");
3001 }
3002 });
3003
3004 Json(ApiResponse::success(
3005 "Smoke tests started. Check results in the smoke tests section.".to_string(),
3006 ))
3007}
3008
3009async fn execute_smoke_tests(state: &AdminState) -> Result<()> {
3011 let base_url =
3013 std::env::var("MOCKFORGE_BASE_URL").unwrap_or_else(|_| "http://localhost:3000".to_string());
3014
3015 let context = SmokeTestContext {
3016 base_url,
3017 timeout_seconds: 30,
3018 parallel: true,
3019 };
3020
3021 let fixtures = scan_fixtures_directory()?;
3023
3024 let http_fixtures: Vec<&FixtureInfo> =
3026 fixtures.iter().filter(|f| f.protocol == "http").collect();
3027
3028 if http_fixtures.is_empty() {
3029 tracing::warn!("No HTTP fixtures found for smoke testing");
3030 return Ok(());
3031 }
3032
3033 tracing::info!("Running smoke tests for {} HTTP fixtures", http_fixtures.len());
3034
3035 let mut test_results = Vec::new();
3037
3038 for fixture in http_fixtures {
3039 let test_result = create_smoke_test_from_fixture(fixture);
3040 test_results.push(test_result);
3041 }
3042
3043 let mut executed_results = Vec::new();
3045 for mut test_result in test_results {
3046 test_result.status = "running".to_string();
3048 state.update_smoke_test_result(test_result.clone()).await;
3049
3050 let start_time = std::time::Instant::now();
3052 match execute_single_smoke_test(&test_result, &context).await {
3053 Ok((status_code, response_time_ms)) => {
3054 test_result.status = "passed".to_string();
3055 test_result.status_code = Some(status_code);
3056 test_result.response_time_ms = Some(response_time_ms);
3057 test_result.error_message = None;
3058 }
3059 Err(e) => {
3060 test_result.status = "failed".to_string();
3061 test_result.error_message = Some(e.to_string());
3062 test_result.status_code = None;
3063 test_result.response_time_ms = None;
3064 }
3065 }
3066
3067 let duration = start_time.elapsed();
3068 test_result.duration_seconds = Some(duration.as_secs_f64());
3069 test_result.last_run = Some(chrono::Utc::now());
3070
3071 executed_results.push(test_result.clone());
3072 state.update_smoke_test_result(test_result).await;
3073 }
3074
3075 tracing::info!("Smoke test execution completed: {} tests run", executed_results.len());
3076 Ok(())
3077}
3078
3079fn create_smoke_test_from_fixture(fixture: &FixtureInfo) -> SmokeTestResult {
3081 let test_name = format!("{} {}", fixture.method, fixture.path);
3082 let description = format!("Smoke test for {} endpoint", fixture.path);
3083
3084 SmokeTestResult {
3085 id: format!("smoke_{}", fixture.id),
3086 name: test_name,
3087 method: fixture.method.clone(),
3088 path: fixture.path.clone(),
3089 description,
3090 last_run: None,
3091 status: "pending".to_string(),
3092 response_time_ms: None,
3093 error_message: None,
3094 status_code: None,
3095 duration_seconds: None,
3096 }
3097}
3098
3099async fn execute_single_smoke_test(
3101 test: &SmokeTestResult,
3102 context: &SmokeTestContext,
3103) -> Result<(u16, u64)> {
3104 let url = format!("{}{}", context.base_url, test.path);
3105 let client = reqwest::Client::builder()
3106 .timeout(std::time::Duration::from_secs(context.timeout_seconds))
3107 .build()
3108 .map_err(|e| Error::generic(format!("Failed to create HTTP client: {}", e)))?;
3109
3110 let start_time = std::time::Instant::now();
3111
3112 let response = match test.method.as_str() {
3113 "GET" => client.get(&url).send().await,
3114 "POST" => client.post(&url).send().await,
3115 "PUT" => client.put(&url).send().await,
3116 "DELETE" => client.delete(&url).send().await,
3117 "PATCH" => client.patch(&url).send().await,
3118 "HEAD" => client.head(&url).send().await,
3119 "OPTIONS" => client.request(reqwest::Method::OPTIONS, &url).send().await,
3120 _ => {
3121 return Err(Error::generic(format!("Unsupported HTTP method: {}", test.method)));
3122 }
3123 };
3124
3125 let response_time = start_time.elapsed();
3126 let response_time_ms = response_time.as_millis() as u64;
3127
3128 match response {
3129 Ok(resp) => {
3130 let status_code = resp.status().as_u16();
3131 if (200..400).contains(&status_code) {
3132 Ok((status_code, response_time_ms))
3133 } else {
3134 Err(Error::generic(format!(
3135 "HTTP error: {} {}",
3136 status_code,
3137 resp.status().canonical_reason().unwrap_or("Unknown")
3138 )))
3139 }
3140 }
3141 Err(e) => Err(Error::generic(format!("Request failed: {}", e))),
3142 }
3143}
3144
3145pub async fn install_plugin(Json(request): Json<serde_json::Value>) -> impl IntoResponse {
3147 let source = request.get("source").and_then(|s| s.as_str()).unwrap_or("");
3149
3150 if source.is_empty() {
3151 return Json(json!({
3152 "success": false,
3153 "error": "Plugin source is required"
3154 }));
3155 }
3156
3157 let plugin_path = if source.starts_with("http://") || source.starts_with("https://") {
3159 match download_plugin_from_url(source).await {
3161 Ok(temp_path) => temp_path,
3162 Err(e) => {
3163 return Json(json!({
3164 "success": false,
3165 "error": format!("Failed to download plugin: {}", e)
3166 }))
3167 }
3168 }
3169 } else {
3170 std::path::PathBuf::from(source)
3172 };
3173
3174 if !plugin_path.exists() {
3176 return Json(json!({
3177 "success": false,
3178 "error": format!("Plugin file not found: {}", source)
3179 }));
3180 }
3181
3182 Json(json!({
3184 "success": true,
3185 "message": format!("Plugin would be installed from: {}", source)
3186 }))
3187}
3188
3189async fn download_plugin_from_url(url: &str) -> Result<std::path::PathBuf> {
3191 let temp_file =
3193 std::env::temp_dir().join(format!("plugin_{}.tmp", chrono::Utc::now().timestamp()));
3194 let temp_path = temp_file.clone();
3195
3196 let response = reqwest::get(url)
3198 .await
3199 .map_err(|e| Error::generic(format!("Failed to download from URL: {}", e)))?;
3200
3201 if !response.status().is_success() {
3202 return Err(Error::generic(format!(
3203 "HTTP error {}: {}",
3204 response.status().as_u16(),
3205 response.status().canonical_reason().unwrap_or("Unknown")
3206 )));
3207 }
3208
3209 let bytes = response
3211 .bytes()
3212 .await
3213 .map_err(|e| Error::generic(format!("Failed to read response: {}", e)))?;
3214
3215 tokio::fs::write(&temp_file, &bytes)
3217 .await
3218 .map_err(|e| Error::generic(format!("Failed to write temporary file: {}", e)))?;
3219
3220 Ok(temp_path)
3221}
3222
3223pub async fn serve_icon() -> impl IntoResponse {
3224 ([(http::header::CONTENT_TYPE, "image/png")], "")
3226}
3227
3228pub async fn serve_icon_32() -> impl IntoResponse {
3229 ([(http::header::CONTENT_TYPE, "image/png")], "")
3230}
3231
3232pub async fn serve_icon_48() -> impl IntoResponse {
3233 ([(http::header::CONTENT_TYPE, "image/png")], "")
3234}
3235
3236pub async fn serve_logo() -> impl IntoResponse {
3237 ([(http::header::CONTENT_TYPE, "image/png")], "")
3238}
3239
3240pub async fn serve_logo_40() -> impl IntoResponse {
3241 ([(http::header::CONTENT_TYPE, "image/png")], "")
3242}
3243
3244pub async fn serve_logo_80() -> impl IntoResponse {
3245 ([(http::header::CONTENT_TYPE, "image/png")], "")
3246}
3247
3248pub async fn update_traffic_shaping(
3250 State(_state): State<AdminState>,
3251 Json(_config): Json<serde_json::Value>,
3252) -> Json<ApiResponse<String>> {
3253 Json(ApiResponse::success("Traffic shaping updated".to_string()))
3254}
3255
3256pub async fn import_postman(
3257 State(state): State<AdminState>,
3258 Json(request): Json<serde_json::Value>,
3259) -> Json<ApiResponse<String>> {
3260 use mockforge_core::workspace_import::{import_postman_to_workspace, WorkspaceImportConfig};
3261 use uuid::Uuid;
3262
3263 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3264 let filename = request.get("filename").and_then(|v| v.as_str());
3265 let environment = request.get("environment").and_then(|v| v.as_str());
3266 let base_url = request.get("base_url").and_then(|v| v.as_str());
3267
3268 let import_result = match mockforge_core::import::import_postman_collection(content, base_url) {
3270 Ok(result) => result,
3271 Err(e) => {
3272 let entry = ImportHistoryEntry {
3274 id: Uuid::new_v4().to_string(),
3275 format: "postman".to_string(),
3276 timestamp: chrono::Utc::now(),
3277 routes_count: 0,
3278 variables_count: 0,
3279 warnings_count: 0,
3280 success: false,
3281 filename: filename.map(|s| s.to_string()),
3282 environment: environment.map(|s| s.to_string()),
3283 base_url: base_url.map(|s| s.to_string()),
3284 error_message: Some(e.clone()),
3285 };
3286 let mut history = state.import_history.write().await;
3287 history.push(entry);
3288
3289 return Json(ApiResponse::error(format!("Postman import failed: {}", e)));
3290 }
3291 };
3292
3293 let workspace_name = filename
3295 .and_then(|f| f.split('.').next())
3296 .unwrap_or("Imported Postman Collection");
3297
3298 let config = WorkspaceImportConfig {
3299 create_folders: true,
3300 base_folder_name: None,
3301 preserve_hierarchy: true,
3302 max_depth: 5,
3303 };
3304
3305 let routes: Vec<ImportRoute> = import_result
3307 .routes
3308 .into_iter()
3309 .map(|route| ImportRoute {
3310 method: route.method,
3311 path: route.path,
3312 headers: route.headers,
3313 body: route.body,
3314 response: ImportResponse {
3315 status: route.response.status,
3316 headers: route.response.headers,
3317 body: route.response.body,
3318 },
3319 })
3320 .collect();
3321
3322 match import_postman_to_workspace(routes, workspace_name.to_string(), config) {
3323 Ok(workspace_result) => {
3324 if let Err(e) =
3326 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3327 {
3328 tracing::error!("Failed to save workspace: {}", e);
3329 return Json(ApiResponse::error(format!(
3330 "Import succeeded but failed to save workspace: {}",
3331 e
3332 )));
3333 }
3334
3335 let entry = ImportHistoryEntry {
3337 id: Uuid::new_v4().to_string(),
3338 format: "postman".to_string(),
3339 timestamp: chrono::Utc::now(),
3340 routes_count: workspace_result.request_count,
3341 variables_count: import_result.variables.len(),
3342 warnings_count: workspace_result.warnings.len(),
3343 success: true,
3344 filename: filename.map(|s| s.to_string()),
3345 environment: environment.map(|s| s.to_string()),
3346 base_url: base_url.map(|s| s.to_string()),
3347 error_message: None,
3348 };
3349 let mut history = state.import_history.write().await;
3350 history.push(entry);
3351
3352 Json(ApiResponse::success(format!(
3353 "Successfully imported {} routes into workspace '{}'",
3354 workspace_result.request_count, workspace_name
3355 )))
3356 }
3357 Err(e) => {
3358 let entry = ImportHistoryEntry {
3360 id: Uuid::new_v4().to_string(),
3361 format: "postman".to_string(),
3362 timestamp: chrono::Utc::now(),
3363 routes_count: 0,
3364 variables_count: 0,
3365 warnings_count: 0,
3366 success: false,
3367 filename: filename.map(|s| s.to_string()),
3368 environment: environment.map(|s| s.to_string()),
3369 base_url: base_url.map(|s| s.to_string()),
3370 error_message: Some(e.to_string()),
3371 };
3372 let mut history = state.import_history.write().await;
3373 history.push(entry);
3374
3375 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3376 }
3377 }
3378}
3379
3380pub async fn import_insomnia(
3381 State(state): State<AdminState>,
3382 Json(request): Json<serde_json::Value>,
3383) -> Json<ApiResponse<String>> {
3384 use uuid::Uuid;
3385
3386 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3387 let filename = request.get("filename").and_then(|v| v.as_str());
3388 let environment = request.get("environment").and_then(|v| v.as_str());
3389 let base_url = request.get("base_url").and_then(|v| v.as_str());
3390
3391 let import_result = match mockforge_core::import::import_insomnia_export(content, environment) {
3393 Ok(result) => result,
3394 Err(e) => {
3395 let entry = ImportHistoryEntry {
3397 id: Uuid::new_v4().to_string(),
3398 format: "insomnia".to_string(),
3399 timestamp: chrono::Utc::now(),
3400 routes_count: 0,
3401 variables_count: 0,
3402 warnings_count: 0,
3403 success: false,
3404 filename: filename.map(|s| s.to_string()),
3405 environment: environment.map(|s| s.to_string()),
3406 base_url: base_url.map(|s| s.to_string()),
3407 error_message: Some(e.clone()),
3408 };
3409 let mut history = state.import_history.write().await;
3410 history.push(entry);
3411
3412 return Json(ApiResponse::error(format!("Insomnia import failed: {}", e)));
3413 }
3414 };
3415
3416 let workspace_name = filename
3418 .and_then(|f| f.split('.').next())
3419 .unwrap_or("Imported Insomnia Collection");
3420
3421 let _config = WorkspaceImportConfig {
3422 create_folders: true,
3423 base_folder_name: None,
3424 preserve_hierarchy: true,
3425 max_depth: 5,
3426 };
3427
3428 let variables_count = import_result.variables.len();
3430
3431 match mockforge_core::workspace_import::create_workspace_from_insomnia(
3432 import_result,
3433 Some(workspace_name.to_string()),
3434 ) {
3435 Ok(workspace_result) => {
3436 if let Err(e) =
3438 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3439 {
3440 tracing::error!("Failed to save workspace: {}", e);
3441 return Json(ApiResponse::error(format!(
3442 "Import succeeded but failed to save workspace: {}",
3443 e
3444 )));
3445 }
3446
3447 let entry = ImportHistoryEntry {
3449 id: Uuid::new_v4().to_string(),
3450 format: "insomnia".to_string(),
3451 timestamp: chrono::Utc::now(),
3452 routes_count: workspace_result.request_count,
3453 variables_count,
3454 warnings_count: workspace_result.warnings.len(),
3455 success: true,
3456 filename: filename.map(|s| s.to_string()),
3457 environment: environment.map(|s| s.to_string()),
3458 base_url: base_url.map(|s| s.to_string()),
3459 error_message: None,
3460 };
3461 let mut history = state.import_history.write().await;
3462 history.push(entry);
3463
3464 Json(ApiResponse::success(format!(
3465 "Successfully imported {} routes into workspace '{}'",
3466 workspace_result.request_count, workspace_name
3467 )))
3468 }
3469 Err(e) => {
3470 let entry = ImportHistoryEntry {
3472 id: Uuid::new_v4().to_string(),
3473 format: "insomnia".to_string(),
3474 timestamp: chrono::Utc::now(),
3475 routes_count: 0,
3476 variables_count: 0,
3477 warnings_count: 0,
3478 success: false,
3479 filename: filename.map(|s| s.to_string()),
3480 environment: environment.map(|s| s.to_string()),
3481 base_url: base_url.map(|s| s.to_string()),
3482 error_message: Some(e.to_string()),
3483 };
3484 let mut history = state.import_history.write().await;
3485 history.push(entry);
3486
3487 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3488 }
3489 }
3490}
3491
3492pub async fn import_openapi(
3493 State(_state): State<AdminState>,
3494 Json(_request): Json<serde_json::Value>,
3495) -> Json<ApiResponse<String>> {
3496 Json(ApiResponse::success("OpenAPI import completed".to_string()))
3497}
3498
3499pub async fn import_curl(
3500 State(state): State<AdminState>,
3501 Json(request): Json<serde_json::Value>,
3502) -> Json<ApiResponse<String>> {
3503 use uuid::Uuid;
3504
3505 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3506 let filename = request.get("filename").and_then(|v| v.as_str());
3507 let base_url = request.get("base_url").and_then(|v| v.as_str());
3508
3509 let import_result = match mockforge_core::import::import_curl_commands(content, base_url) {
3511 Ok(result) => result,
3512 Err(e) => {
3513 let entry = ImportHistoryEntry {
3515 id: Uuid::new_v4().to_string(),
3516 format: "curl".to_string(),
3517 timestamp: chrono::Utc::now(),
3518 routes_count: 0,
3519 variables_count: 0,
3520 warnings_count: 0,
3521 success: false,
3522 filename: filename.map(|s| s.to_string()),
3523 environment: None,
3524 base_url: base_url.map(|s| s.to_string()),
3525 error_message: Some(e.clone()),
3526 };
3527 let mut history = state.import_history.write().await;
3528 history.push(entry);
3529
3530 return Json(ApiResponse::error(format!("Curl import failed: {}", e)));
3531 }
3532 };
3533
3534 let workspace_name =
3536 filename.and_then(|f| f.split('.').next()).unwrap_or("Imported Curl Commands");
3537
3538 match mockforge_core::workspace_import::create_workspace_from_curl(
3539 import_result,
3540 Some(workspace_name.to_string()),
3541 ) {
3542 Ok(workspace_result) => {
3543 if let Err(e) =
3545 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3546 {
3547 tracing::error!("Failed to save workspace: {}", e);
3548 return Json(ApiResponse::error(format!(
3549 "Import succeeded but failed to save workspace: {}",
3550 e
3551 )));
3552 }
3553
3554 let entry = ImportHistoryEntry {
3556 id: Uuid::new_v4().to_string(),
3557 format: "curl".to_string(),
3558 timestamp: chrono::Utc::now(),
3559 routes_count: workspace_result.request_count,
3560 variables_count: 0, warnings_count: workspace_result.warnings.len(),
3562 success: true,
3563 filename: filename.map(|s| s.to_string()),
3564 environment: None,
3565 base_url: base_url.map(|s| s.to_string()),
3566 error_message: None,
3567 };
3568 let mut history = state.import_history.write().await;
3569 history.push(entry);
3570
3571 Json(ApiResponse::success(format!(
3572 "Successfully imported {} routes into workspace '{}'",
3573 workspace_result.request_count, workspace_name
3574 )))
3575 }
3576 Err(e) => {
3577 let entry = ImportHistoryEntry {
3579 id: Uuid::new_v4().to_string(),
3580 format: "curl".to_string(),
3581 timestamp: chrono::Utc::now(),
3582 routes_count: 0,
3583 variables_count: 0,
3584 warnings_count: 0,
3585 success: false,
3586 filename: filename.map(|s| s.to_string()),
3587 environment: None,
3588 base_url: base_url.map(|s| s.to_string()),
3589 error_message: Some(e.to_string()),
3590 };
3591 let mut history = state.import_history.write().await;
3592 history.push(entry);
3593
3594 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3595 }
3596 }
3597}
3598
3599pub async fn preview_import(
3600 State(_state): State<AdminState>,
3601 Json(request): Json<serde_json::Value>,
3602) -> Json<ApiResponse<serde_json::Value>> {
3603 use mockforge_core::import::{
3604 import_curl_commands, import_insomnia_export, import_postman_collection,
3605 };
3606
3607 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3608 let filename = request.get("filename").and_then(|v| v.as_str());
3609 let environment = request.get("environment").and_then(|v| v.as_str());
3610 let base_url = request.get("base_url").and_then(|v| v.as_str());
3611
3612 let format = if let Some(fname) = filename {
3614 if fname.to_lowercase().contains("postman")
3615 || fname.to_lowercase().ends_with(".postman_collection")
3616 {
3617 "postman"
3618 } else if fname.to_lowercase().contains("insomnia")
3619 || fname.to_lowercase().ends_with(".insomnia")
3620 {
3621 "insomnia"
3622 } else if fname.to_lowercase().contains("curl")
3623 || fname.to_lowercase().ends_with(".sh")
3624 || fname.to_lowercase().ends_with(".curl")
3625 {
3626 "curl"
3627 } else {
3628 "unknown"
3629 }
3630 } else {
3631 "unknown"
3632 };
3633
3634 match format {
3635 "postman" => match import_postman_collection(content, base_url) {
3636 Ok(import_result) => {
3637 let routes: Vec<serde_json::Value> = import_result
3638 .routes
3639 .into_iter()
3640 .map(|route| {
3641 serde_json::json!({
3642 "method": route.method,
3643 "path": route.path,
3644 "headers": route.headers,
3645 "body": route.body,
3646 "status_code": route.response.status,
3647 "response": serde_json::json!({
3648 "status": route.response.status,
3649 "headers": route.response.headers,
3650 "body": route.response.body
3651 })
3652 })
3653 })
3654 .collect();
3655
3656 let response = serde_json::json!({
3657 "routes": routes,
3658 "variables": import_result.variables,
3659 "warnings": import_result.warnings
3660 });
3661
3662 Json(ApiResponse::success(response))
3663 }
3664 Err(e) => Json(ApiResponse::error(format!("Postman import failed: {}", e))),
3665 },
3666 "insomnia" => match import_insomnia_export(content, environment) {
3667 Ok(import_result) => {
3668 let routes: Vec<serde_json::Value> = import_result
3669 .routes
3670 .into_iter()
3671 .map(|route| {
3672 serde_json::json!({
3673 "method": route.method,
3674 "path": route.path,
3675 "headers": route.headers,
3676 "body": route.body,
3677 "status_code": route.response.status,
3678 "response": serde_json::json!({
3679 "status": route.response.status,
3680 "headers": route.response.headers,
3681 "body": route.response.body
3682 })
3683 })
3684 })
3685 .collect();
3686
3687 let response = serde_json::json!({
3688 "routes": routes,
3689 "variables": import_result.variables,
3690 "warnings": import_result.warnings
3691 });
3692
3693 Json(ApiResponse::success(response))
3694 }
3695 Err(e) => Json(ApiResponse::error(format!("Insomnia import failed: {}", e))),
3696 },
3697 "curl" => match import_curl_commands(content, base_url) {
3698 Ok(import_result) => {
3699 let routes: Vec<serde_json::Value> = import_result
3700 .routes
3701 .into_iter()
3702 .map(|route| {
3703 serde_json::json!({
3704 "method": route.method,
3705 "path": route.path,
3706 "headers": route.headers,
3707 "body": route.body,
3708 "status_code": route.response.status,
3709 "response": serde_json::json!({
3710 "status": route.response.status,
3711 "headers": route.response.headers,
3712 "body": route.response.body
3713 })
3714 })
3715 })
3716 .collect();
3717
3718 let response = serde_json::json!({
3719 "routes": routes,
3720 "variables": serde_json::json!({}),
3721 "warnings": import_result.warnings
3722 });
3723
3724 Json(ApiResponse::success(response))
3725 }
3726 Err(e) => Json(ApiResponse::error(format!("Curl import failed: {}", e))),
3727 },
3728 _ => Json(ApiResponse::error("Unsupported import format".to_string())),
3729 }
3730}
3731
3732pub async fn get_import_history(
3733 State(state): State<AdminState>,
3734) -> Json<ApiResponse<serde_json::Value>> {
3735 let history = state.import_history.read().await;
3736 let total = history.len();
3737
3738 let imports: Vec<serde_json::Value> = history
3739 .iter()
3740 .rev()
3741 .take(50)
3742 .map(|entry| {
3743 serde_json::json!({
3744 "id": entry.id,
3745 "format": entry.format,
3746 "timestamp": entry.timestamp.to_rfc3339(),
3747 "routes_count": entry.routes_count,
3748 "variables_count": entry.variables_count,
3749 "warnings_count": entry.warnings_count,
3750 "success": entry.success,
3751 "filename": entry.filename,
3752 "environment": entry.environment,
3753 "base_url": entry.base_url,
3754 "error_message": entry.error_message
3755 })
3756 })
3757 .collect();
3758
3759 let response = serde_json::json!({
3760 "imports": imports,
3761 "total": total
3762 });
3763
3764 Json(ApiResponse::success(response))
3765}
3766
3767pub async fn get_admin_api_state(
3768 State(_state): State<AdminState>,
3769) -> Json<ApiResponse<serde_json::Value>> {
3770 Json(ApiResponse::success(serde_json::json!({
3771 "status": "active"
3772 })))
3773}
3774
3775pub async fn get_admin_api_replay(
3776 State(_state): State<AdminState>,
3777) -> Json<ApiResponse<serde_json::Value>> {
3778 Json(ApiResponse::success(serde_json::json!({
3779 "replay": []
3780 })))
3781}
3782
3783pub async fn get_sse_status(
3784 State(_state): State<AdminState>,
3785) -> Json<ApiResponse<serde_json::Value>> {
3786 Json(ApiResponse::success(serde_json::json!({
3787 "available": true,
3788 "endpoint": "/sse",
3789 "config": {
3790 "event_type": "status",
3791 "interval_ms": 1000,
3792 "data_template": "{}"
3793 }
3794 })))
3795}
3796
3797pub async fn get_sse_connections(
3798 State(_state): State<AdminState>,
3799) -> Json<ApiResponse<serde_json::Value>> {
3800 Json(ApiResponse::success(serde_json::json!({
3801 "active_connections": 0
3802 })))
3803}
3804
3805pub async fn get_workspaces(
3807 State(_state): State<AdminState>,
3808) -> Json<ApiResponse<Vec<serde_json::Value>>> {
3809 Json(ApiResponse::success(vec![]))
3810}
3811
3812pub async fn create_workspace(
3813 State(_state): State<AdminState>,
3814 Json(_request): Json<serde_json::Value>,
3815) -> Json<ApiResponse<String>> {
3816 Json(ApiResponse::success("Workspace created".to_string()))
3817}
3818
3819pub async fn open_workspace_from_directory(
3820 State(_state): State<AdminState>,
3821 Json(_request): Json<serde_json::Value>,
3822) -> Json<ApiResponse<String>> {
3823 Json(ApiResponse::success("Workspace opened from directory".to_string()))
3824}
3825
3826pub async fn get_reality_level(
3830 State(state): State<AdminState>,
3831) -> Json<ApiResponse<serde_json::Value>> {
3832 let engine = state.reality_engine.read().await;
3833 let level = engine.get_level().await;
3834 let config = engine.get_config().await;
3835
3836 Json(ApiResponse::success(serde_json::json!({
3837 "level": level.value(),
3838 "level_name": level.name(),
3839 "description": level.description(),
3840 "chaos": {
3841 "enabled": config.chaos.enabled,
3842 "error_rate": config.chaos.error_rate,
3843 "delay_rate": config.chaos.delay_rate,
3844 },
3845 "latency": {
3846 "base_ms": config.latency.base_ms,
3847 "jitter_ms": config.latency.jitter_ms,
3848 },
3849 "mockai": {
3850 "enabled": config.mockai.enabled,
3851 },
3852 })))
3853}
3854
3855#[derive(Deserialize)]
3857pub struct SetRealityLevelRequest {
3858 level: u8,
3859}
3860
3861pub async fn set_reality_level(
3862 State(state): State<AdminState>,
3863 Json(request): Json<SetRealityLevelRequest>,
3864) -> Json<ApiResponse<serde_json::Value>> {
3865 let level = match mockforge_core::RealityLevel::from_value(request.level) {
3866 Some(l) => l,
3867 None => {
3868 return Json(ApiResponse::error(format!(
3869 "Invalid reality level: {}. Must be between 1 and 5.",
3870 request.level
3871 )));
3872 }
3873 };
3874
3875 let engine = state.reality_engine.write().await;
3877 engine.set_level(level).await;
3878 let config = engine.get_config().await;
3879 drop(engine); let mut update_errors = Vec::new();
3883
3884 if let Some(ref chaos_api_state) = state.chaos_api_state {
3886 let mut chaos_config = chaos_api_state.config.write().await;
3887
3888 use mockforge_chaos::config::{FaultInjectionConfig, LatencyConfig};
3891
3892 let latency_config = if config.latency.base_ms > 0 {
3893 Some(LatencyConfig {
3894 enabled: true,
3895 fixed_delay_ms: Some(config.latency.base_ms),
3896 random_delay_range_ms: config
3897 .latency
3898 .max_ms
3899 .map(|max| (config.latency.min_ms, max)),
3900 jitter_percent: if config.latency.jitter_ms > 0 {
3901 (config.latency.jitter_ms as f64 / config.latency.base_ms as f64).min(1.0)
3902 } else {
3903 0.0
3904 },
3905 probability: 1.0,
3906 })
3907 } else {
3908 None
3909 };
3910
3911 let fault_injection_config = if config.chaos.enabled {
3912 Some(FaultInjectionConfig {
3913 enabled: true,
3914 http_errors: config.chaos.status_codes.clone(),
3915 http_error_probability: config.chaos.error_rate,
3916 connection_errors: false,
3917 connection_error_probability: 0.0,
3918 timeout_errors: config.chaos.inject_timeouts,
3919 timeout_ms: config.chaos.timeout_ms,
3920 timeout_probability: if config.chaos.inject_timeouts {
3921 config.chaos.error_rate
3922 } else {
3923 0.0
3924 },
3925 partial_responses: false,
3926 partial_response_probability: 0.0,
3927 payload_corruption: false,
3928 payload_corruption_probability: 0.0,
3929 corruption_type: mockforge_chaos::config::CorruptionType::None,
3930 error_pattern: Some(mockforge_chaos::config::ErrorPattern::Random {
3931 probability: config.chaos.error_rate,
3932 }),
3933 mockai_enabled: false,
3934 })
3935 } else {
3936 None
3937 };
3938
3939 chaos_config.enabled = config.chaos.enabled;
3941 chaos_config.latency = latency_config;
3942 chaos_config.fault_injection = fault_injection_config;
3943
3944 drop(chaos_config);
3945 tracing::info!("✅ Updated chaos config for reality level {}", level.value());
3946
3947 }
3952
3953 if let Some(ref latency_injector) = state.latency_injector {
3955 match mockforge_core::latency::LatencyInjector::update_profile_async(
3956 latency_injector,
3957 config.latency.clone(),
3958 )
3959 .await
3960 {
3961 Ok(_) => {
3962 tracing::info!("✅ Updated latency injector for reality level {}", level.value());
3963 }
3964 Err(e) => {
3965 let error_msg = format!("Failed to update latency injector: {}", e);
3966 tracing::warn!("{}", error_msg);
3967 update_errors.push(error_msg);
3968 }
3969 }
3970 }
3971
3972 if let Some(ref mockai) = state.mockai {
3974 match mockforge_core::intelligent_behavior::MockAI::update_config_async(
3975 mockai,
3976 config.mockai.clone(),
3977 )
3978 .await
3979 {
3980 Ok(_) => {
3981 tracing::info!("✅ Updated MockAI config for reality level {}", level.value());
3982 }
3983 Err(e) => {
3984 let error_msg = format!("Failed to update MockAI: {}", e);
3985 tracing::warn!("{}", error_msg);
3986 update_errors.push(error_msg);
3987 }
3988 }
3989 }
3990
3991 let mut response = serde_json::json!({
3993 "level": level.value(),
3994 "level_name": level.name(),
3995 "description": level.description(),
3996 "chaos": {
3997 "enabled": config.chaos.enabled,
3998 "error_rate": config.chaos.error_rate,
3999 "delay_rate": config.chaos.delay_rate,
4000 },
4001 "latency": {
4002 "base_ms": config.latency.base_ms,
4003 "jitter_ms": config.latency.jitter_ms,
4004 },
4005 "mockai": {
4006 "enabled": config.mockai.enabled,
4007 },
4008 });
4009
4010 if !update_errors.is_empty() {
4012 response["warnings"] = serde_json::json!(update_errors);
4013 tracing::warn!(
4014 "Reality level updated to {} but some subsystems failed to update: {:?}",
4015 level.value(),
4016 update_errors
4017 );
4018 } else {
4019 tracing::info!(
4020 "✅ Reality level successfully updated to {} (hot-reload applied)",
4021 level.value()
4022 );
4023 }
4024
4025 Json(ApiResponse::success(response))
4026}
4027
4028pub async fn list_reality_presets(
4030 State(state): State<AdminState>,
4031) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4032 let persistence = &state.workspace_persistence;
4033 match persistence.list_reality_presets().await {
4034 Ok(preset_paths) => {
4035 let presets: Vec<serde_json::Value> = preset_paths
4036 .iter()
4037 .map(|path| {
4038 serde_json::json!({
4039 "id": path.file_name().and_then(|n| n.to_str()).unwrap_or("unknown"),
4040 "path": path.to_string_lossy(),
4041 "name": path.file_stem().and_then(|n| n.to_str()).unwrap_or("unknown"),
4042 })
4043 })
4044 .collect();
4045 Json(ApiResponse::success(presets))
4046 }
4047 Err(e) => Json(ApiResponse::error(format!("Failed to list presets: {}", e))),
4048 }
4049}
4050
4051#[derive(Deserialize)]
4053pub struct ImportPresetRequest {
4054 path: String,
4055}
4056
4057pub async fn import_reality_preset(
4058 State(state): State<AdminState>,
4059 Json(request): Json<ImportPresetRequest>,
4060) -> Json<ApiResponse<serde_json::Value>> {
4061 let persistence = &state.workspace_persistence;
4062 let path = std::path::Path::new(&request.path);
4063
4064 match persistence.import_reality_preset(path).await {
4065 Ok(preset) => {
4066 let engine = state.reality_engine.write().await;
4068 engine.apply_preset(preset.clone()).await;
4069
4070 Json(ApiResponse::success(serde_json::json!({
4071 "name": preset.name,
4072 "description": preset.description,
4073 "level": preset.config.level.value(),
4074 "level_name": preset.config.level.name(),
4075 })))
4076 }
4077 Err(e) => Json(ApiResponse::error(format!("Failed to import preset: {}", e))),
4078 }
4079}
4080
4081#[derive(Deserialize)]
4083pub struct ExportPresetRequest {
4084 name: String,
4085 description: Option<String>,
4086}
4087
4088pub async fn export_reality_preset(
4089 State(state): State<AdminState>,
4090 Json(request): Json<ExportPresetRequest>,
4091) -> Json<ApiResponse<serde_json::Value>> {
4092 let engine = state.reality_engine.read().await;
4093 let preset = engine.create_preset(request.name.clone(), request.description.clone()).await;
4094
4095 let persistence = &state.workspace_persistence;
4096 let presets_dir = persistence.presets_dir();
4097 let filename = format!("{}.json", request.name.replace(' ', "_").to_lowercase());
4098 let output_path = presets_dir.join(&filename);
4099
4100 match persistence.export_reality_preset(&preset, &output_path).await {
4101 Ok(_) => Json(ApiResponse::success(serde_json::json!({
4102 "name": preset.name,
4103 "description": preset.description,
4104 "path": output_path.to_string_lossy(),
4105 "level": preset.config.level.value(),
4106 }))),
4107 Err(e) => Json(ApiResponse::error(format!("Failed to export preset: {}", e))),
4108 }
4109}
4110
4111pub async fn get_continuum_ratio(
4115 State(state): State<AdminState>,
4116 Query(params): Query<std::collections::HashMap<String, String>>,
4117) -> Json<ApiResponse<serde_json::Value>> {
4118 let path = params.get("path").cloned().unwrap_or_else(|| "/".to_string());
4119 let engine = state.continuum_engine.read().await;
4120 let ratio = engine.get_blend_ratio(&path).await;
4121 let config = engine.get_config().await;
4122 let enabled = engine.is_enabled().await;
4123
4124 Json(ApiResponse::success(serde_json::json!({
4125 "path": path,
4126 "blend_ratio": ratio,
4127 "enabled": enabled,
4128 "transition_mode": format!("{:?}", config.transition_mode),
4129 "merge_strategy": format!("{:?}", config.merge_strategy),
4130 "default_ratio": config.default_ratio,
4131 })))
4132}
4133
4134#[derive(Deserialize)]
4136pub struct SetContinuumRatioRequest {
4137 path: String,
4138 ratio: f64,
4139}
4140
4141pub async fn set_continuum_ratio(
4142 State(state): State<AdminState>,
4143 Json(request): Json<SetContinuumRatioRequest>,
4144) -> Json<ApiResponse<serde_json::Value>> {
4145 let ratio = request.ratio.clamp(0.0, 1.0);
4146 let engine = state.continuum_engine.read().await;
4147 engine.set_blend_ratio(&request.path, ratio).await;
4148
4149 Json(ApiResponse::success(serde_json::json!({
4150 "path": request.path,
4151 "blend_ratio": ratio,
4152 })))
4153}
4154
4155pub async fn get_continuum_schedule(
4157 State(state): State<AdminState>,
4158) -> Json<ApiResponse<serde_json::Value>> {
4159 let engine = state.continuum_engine.read().await;
4160 let schedule = engine.get_time_schedule().await;
4161
4162 match schedule {
4163 Some(s) => Json(ApiResponse::success(serde_json::json!({
4164 "start_time": s.start_time.to_rfc3339(),
4165 "end_time": s.end_time.to_rfc3339(),
4166 "start_ratio": s.start_ratio,
4167 "end_ratio": s.end_ratio,
4168 "curve": format!("{:?}", s.curve),
4169 "duration_days": s.duration().num_days(),
4170 }))),
4171 None => Json(ApiResponse::success(serde_json::json!(null))),
4172 }
4173}
4174
4175#[derive(Deserialize)]
4177pub struct SetContinuumScheduleRequest {
4178 start_time: String,
4179 end_time: String,
4180 start_ratio: f64,
4181 end_ratio: f64,
4182 curve: Option<String>,
4183}
4184
4185pub async fn set_continuum_schedule(
4186 State(state): State<AdminState>,
4187 Json(request): Json<SetContinuumScheduleRequest>,
4188) -> Json<ApiResponse<serde_json::Value>> {
4189 let start_time = chrono::DateTime::parse_from_rfc3339(&request.start_time)
4190 .map_err(|e| format!("Invalid start_time: {}", e))
4191 .map(|dt| dt.with_timezone(&chrono::Utc));
4192
4193 let end_time = chrono::DateTime::parse_from_rfc3339(&request.end_time)
4194 .map_err(|e| format!("Invalid end_time: {}", e))
4195 .map(|dt| dt.with_timezone(&chrono::Utc));
4196
4197 match (start_time, end_time) {
4198 (Ok(start), Ok(end)) => {
4199 let curve = request
4200 .curve
4201 .as_deref()
4202 .map(|c| match c {
4203 "linear" => mockforge_core::TransitionCurve::Linear,
4204 "exponential" => mockforge_core::TransitionCurve::Exponential,
4205 "sigmoid" => mockforge_core::TransitionCurve::Sigmoid,
4206 _ => mockforge_core::TransitionCurve::Linear,
4207 })
4208 .unwrap_or(mockforge_core::TransitionCurve::Linear);
4209
4210 let schedule = mockforge_core::TimeSchedule::with_curve(
4211 start,
4212 end,
4213 request.start_ratio.clamp(0.0, 1.0),
4214 request.end_ratio.clamp(0.0, 1.0),
4215 curve,
4216 );
4217
4218 let engine = state.continuum_engine.read().await;
4219 engine.set_time_schedule(schedule.clone()).await;
4220
4221 Json(ApiResponse::success(serde_json::json!({
4222 "start_time": schedule.start_time.to_rfc3339(),
4223 "end_time": schedule.end_time.to_rfc3339(),
4224 "start_ratio": schedule.start_ratio,
4225 "end_ratio": schedule.end_ratio,
4226 "curve": format!("{:?}", schedule.curve),
4227 })))
4228 }
4229 (Err(e), _) | (_, Err(e)) => Json(ApiResponse::error(e)),
4230 }
4231}
4232
4233#[derive(Deserialize)]
4235pub struct AdvanceContinuumRatioRequest {
4236 increment: Option<f64>,
4237}
4238
4239pub async fn advance_continuum_ratio(
4240 State(state): State<AdminState>,
4241 Json(request): Json<AdvanceContinuumRatioRequest>,
4242) -> Json<ApiResponse<serde_json::Value>> {
4243 let increment = request.increment.unwrap_or(0.1);
4244 let engine = state.continuum_engine.read().await;
4245 engine.advance_ratio(increment).await;
4246 let config = engine.get_config().await;
4247
4248 Json(ApiResponse::success(serde_json::json!({
4249 "default_ratio": config.default_ratio,
4250 "increment": increment,
4251 })))
4252}
4253
4254#[derive(Deserialize)]
4256pub struct SetContinuumEnabledRequest {
4257 enabled: bool,
4258}
4259
4260pub async fn set_continuum_enabled(
4261 State(state): State<AdminState>,
4262 Json(request): Json<SetContinuumEnabledRequest>,
4263) -> Json<ApiResponse<serde_json::Value>> {
4264 let engine = state.continuum_engine.read().await;
4265 engine.set_enabled(request.enabled).await;
4266
4267 Json(ApiResponse::success(serde_json::json!({
4268 "enabled": request.enabled,
4269 })))
4270}
4271
4272pub async fn get_continuum_overrides(
4274 State(state): State<AdminState>,
4275) -> Json<ApiResponse<serde_json::Value>> {
4276 let engine = state.continuum_engine.read().await;
4277 let overrides = engine.get_manual_overrides().await;
4278
4279 Json(ApiResponse::success(serde_json::json!(overrides)))
4280}
4281
4282pub async fn clear_continuum_overrides(
4284 State(state): State<AdminState>,
4285) -> Json<ApiResponse<serde_json::Value>> {
4286 let engine = state.continuum_engine.read().await;
4287 engine.clear_manual_overrides().await;
4288
4289 Json(ApiResponse::success(serde_json::json!({
4290 "message": "All manual overrides cleared",
4291 })))
4292}
4293
4294pub async fn get_workspace(
4295 State(_state): State<AdminState>,
4296 axum::extract::Path(workspace_id): axum::extract::Path<String>,
4297) -> Json<ApiResponse<serde_json::Value>> {
4298 Json(ApiResponse::success(serde_json::json!({
4299 "workspace": {
4300 "summary": {
4301 "id": workspace_id,
4302 "name": "Mock Workspace",
4303 "description": "A mock workspace"
4304 },
4305 "folders": [],
4306 "requests": []
4307 }
4308 })))
4309}
4310
4311pub async fn delete_workspace(
4312 State(_state): State<AdminState>,
4313 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4314) -> Json<ApiResponse<String>> {
4315 Json(ApiResponse::success("Workspace deleted".to_string()))
4316}
4317
4318pub async fn set_active_workspace(
4319 State(_state): State<AdminState>,
4320 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4321) -> Json<ApiResponse<String>> {
4322 Json(ApiResponse::success("Workspace activated".to_string()))
4323}
4324
4325pub async fn create_folder(
4326 State(_state): State<AdminState>,
4327 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4328 Json(_request): Json<serde_json::Value>,
4329) -> Json<ApiResponse<String>> {
4330 Json(ApiResponse::success("Folder created".to_string()))
4331}
4332
4333pub async fn create_request(
4334 State(_state): State<AdminState>,
4335 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4336 Json(_request): Json<serde_json::Value>,
4337) -> Json<ApiResponse<String>> {
4338 Json(ApiResponse::success("Request created".to_string()))
4339}
4340
4341pub async fn execute_workspace_request(
4342 State(_state): State<AdminState>,
4343 axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
4344 Json(_request): Json<serde_json::Value>,
4345) -> Json<ApiResponse<serde_json::Value>> {
4346 Json(ApiResponse::success(serde_json::json!({
4347 "status": "executed",
4348 "response": {}
4349 })))
4350}
4351
4352pub async fn get_request_history(
4353 State(_state): State<AdminState>,
4354 axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
4355) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4356 Json(ApiResponse::success(vec![]))
4357}
4358
4359pub async fn get_folder(
4360 State(_state): State<AdminState>,
4361 axum::extract::Path((_workspace_id, folder_id)): axum::extract::Path<(String, String)>,
4362) -> Json<ApiResponse<serde_json::Value>> {
4363 Json(ApiResponse::success(serde_json::json!({
4364 "folder": {
4365 "summary": {
4366 "id": folder_id,
4367 "name": "Mock Folder",
4368 "description": "A mock folder"
4369 },
4370 "requests": []
4371 }
4372 })))
4373}
4374
4375pub async fn import_to_workspace(
4376 State(_state): State<AdminState>,
4377 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4378 Json(_request): Json<serde_json::Value>,
4379) -> Json<ApiResponse<String>> {
4380 Json(ApiResponse::success("Import to workspace completed".to_string()))
4381}
4382
4383pub async fn export_workspaces(
4384 State(_state): State<AdminState>,
4385 Json(_request): Json<serde_json::Value>,
4386) -> Json<ApiResponse<String>> {
4387 Json(ApiResponse::success("Workspaces exported".to_string()))
4388}
4389
4390pub async fn get_environments(
4392 State(_state): State<AdminState>,
4393 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4394) -> Json<ApiResponse<serde_json::Value>> {
4395 let environments = vec![serde_json::json!({
4397 "id": "global",
4398 "name": "Global",
4399 "description": "Global environment variables",
4400 "variable_count": 0,
4401 "is_global": true,
4402 "active": true,
4403 "order": 0
4404 })];
4405
4406 Json(ApiResponse::success(serde_json::json!({
4407 "environments": environments,
4408 "total": 1
4409 })))
4410}
4411
4412pub async fn create_environment(
4413 State(_state): State<AdminState>,
4414 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4415 Json(_request): Json<serde_json::Value>,
4416) -> Json<ApiResponse<String>> {
4417 Json(ApiResponse::success("Environment created".to_string()))
4418}
4419
4420pub async fn update_environment(
4421 State(_state): State<AdminState>,
4422 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4423 Json(_request): Json<serde_json::Value>,
4424) -> Json<ApiResponse<String>> {
4425 Json(ApiResponse::success("Environment updated".to_string()))
4426}
4427
4428pub async fn delete_environment(
4429 State(_state): State<AdminState>,
4430 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4431) -> Json<ApiResponse<String>> {
4432 Json(ApiResponse::success("Environment deleted".to_string()))
4433}
4434
4435pub async fn set_active_environment(
4436 State(_state): State<AdminState>,
4437 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4438) -> Json<ApiResponse<String>> {
4439 Json(ApiResponse::success("Environment activated".to_string()))
4440}
4441
4442pub async fn update_environments_order(
4443 State(_state): State<AdminState>,
4444 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4445 Json(_request): Json<serde_json::Value>,
4446) -> Json<ApiResponse<String>> {
4447 Json(ApiResponse::success("Environment order updated".to_string()))
4448}
4449
4450pub async fn get_environment_variables(
4451 State(_state): State<AdminState>,
4452 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4453) -> Json<ApiResponse<serde_json::Value>> {
4454 Json(ApiResponse::success(serde_json::json!({
4455 "variables": []
4456 })))
4457}
4458
4459pub async fn set_environment_variable(
4460 State(_state): State<AdminState>,
4461 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4462 Json(_request): Json<serde_json::Value>,
4463) -> Json<ApiResponse<String>> {
4464 Json(ApiResponse::success("Environment variable set".to_string()))
4465}
4466
4467pub async fn remove_environment_variable(
4468 State(_state): State<AdminState>,
4469 axum::extract::Path((_workspace_id, _environment_id, _variable_name)): axum::extract::Path<(
4470 String,
4471 String,
4472 String,
4473 )>,
4474) -> Json<ApiResponse<String>> {
4475 Json(ApiResponse::success("Environment variable removed".to_string()))
4476}
4477
4478pub async fn get_autocomplete_suggestions(
4480 State(_state): State<AdminState>,
4481 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4482 Json(_request): Json<serde_json::Value>,
4483) -> Json<ApiResponse<serde_json::Value>> {
4484 Json(ApiResponse::success(serde_json::json!({
4485 "suggestions": [],
4486 "start_position": 0,
4487 "end_position": 0
4488 })))
4489}
4490
4491pub async fn get_sync_status(
4493 State(_state): State<AdminState>,
4494 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4495) -> Json<ApiResponse<serde_json::Value>> {
4496 Json(ApiResponse::success(serde_json::json!({
4497 "status": "disabled"
4498 })))
4499}
4500
4501pub async fn configure_sync(
4502 State(_state): State<AdminState>,
4503 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4504 Json(_request): Json<serde_json::Value>,
4505) -> Json<ApiResponse<String>> {
4506 Json(ApiResponse::success("Sync configured".to_string()))
4507}
4508
4509pub async fn disable_sync(
4510 State(_state): State<AdminState>,
4511 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4512) -> Json<ApiResponse<String>> {
4513 Json(ApiResponse::success("Sync disabled".to_string()))
4514}
4515
4516pub async fn trigger_sync(
4517 State(_state): State<AdminState>,
4518 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4519) -> Json<ApiResponse<String>> {
4520 Json(ApiResponse::success("Sync triggered".to_string()))
4521}
4522
4523pub async fn get_sync_changes(
4524 State(_state): State<AdminState>,
4525 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4526) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4527 Json(ApiResponse::success(vec![]))
4528}
4529
4530pub async fn confirm_sync_changes(
4531 State(_state): State<AdminState>,
4532 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4533 Json(_request): Json<serde_json::Value>,
4534) -> Json<ApiResponse<String>> {
4535 Json(ApiResponse::success("Sync changes confirmed".to_string()))
4536}
4537
4538pub async fn validate_plugin(
4540 State(_state): State<AdminState>,
4541 Json(_request): Json<serde_json::Value>,
4542) -> Json<ApiResponse<String>> {
4543 Json(ApiResponse::success("Plugin validated".to_string()))
4544}
4545
4546pub async fn clear_import_history(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
4548 let mut history = state.import_history.write().await;
4549 history.clear();
4550 Json(ApiResponse::success("Import history cleared".to_string()))
4551}
4552
4553#[cfg(test)]
4554mod tests {
4555 use super::*;
4556
4557 #[test]
4558 fn test_request_metrics_creation() {
4559 use std::collections::HashMap;
4560
4561 let metrics = RequestMetrics {
4562 total_requests: 100,
4563 active_connections: 5,
4564 requests_by_endpoint: HashMap::new(),
4565 response_times: vec![10, 20, 30],
4566 response_times_by_endpoint: HashMap::new(),
4567 errors_by_endpoint: HashMap::new(),
4568 last_request_by_endpoint: HashMap::new(),
4569 };
4570
4571 assert_eq!(metrics.total_requests, 100);
4572 assert_eq!(metrics.active_connections, 5);
4573 assert_eq!(metrics.response_times.len(), 3);
4574 }
4575
4576 #[test]
4577 fn test_system_metrics_creation() {
4578 let metrics = SystemMetrics {
4579 cpu_usage_percent: 45.5,
4580 memory_usage_mb: 100,
4581 active_threads: 10,
4582 };
4583
4584 assert_eq!(metrics.active_threads, 10);
4585 assert!(metrics.cpu_usage_percent > 0.0);
4586 assert_eq!(metrics.memory_usage_mb, 100);
4587 }
4588
4589 #[test]
4590 fn test_time_series_point() {
4591 let point = TimeSeriesPoint {
4592 timestamp: chrono::Utc::now(),
4593 value: 42.5,
4594 };
4595
4596 assert_eq!(point.value, 42.5);
4597 }
4598
4599 #[test]
4600 fn test_restart_status() {
4601 let status = RestartStatus {
4602 in_progress: true,
4603 initiated_at: Some(chrono::Utc::now()),
4604 reason: Some("Manual restart".to_string()),
4605 success: None,
4606 };
4607
4608 assert!(status.in_progress);
4609 assert!(status.reason.is_some());
4610 }
4611
4612 #[test]
4613 fn test_configuration_state() {
4614 use std::collections::HashMap;
4615
4616 let state = ConfigurationState {
4617 latency_profile: crate::models::LatencyProfile {
4618 name: "default".to_string(),
4619 base_ms: 100,
4620 jitter_ms: 10,
4621 tag_overrides: HashMap::new(),
4622 },
4623 fault_config: crate::models::FaultConfig {
4624 enabled: false,
4625 failure_rate: 0.0,
4626 status_codes: vec![],
4627 active_failures: 0,
4628 },
4629 proxy_config: crate::models::ProxyConfig {
4630 enabled: false,
4631 upstream_url: None,
4632 timeout_seconds: 30,
4633 requests_proxied: 0,
4634 },
4635 validation_settings: crate::models::ValidationSettings {
4636 mode: "off".to_string(),
4637 aggregate_errors: false,
4638 validate_responses: false,
4639 overrides: HashMap::new(),
4640 },
4641 };
4642
4643 assert_eq!(state.latency_profile.name, "default");
4644 assert!(!state.fault_config.enabled);
4645 assert!(!state.proxy_config.enabled);
4646 }
4647
4648 #[test]
4649 fn test_admin_state_new() {
4650 let http_addr: std::net::SocketAddr = "127.0.0.1:3000".parse().unwrap();
4651 let state = AdminState::new(
4652 Some(http_addr),
4653 None,
4654 None,
4655 None,
4656 true,
4657 8080,
4658 None,
4659 None,
4660 None,
4661 None,
4662 None,
4663 );
4664
4665 assert_eq!(state.http_server_addr, Some(http_addr));
4666 assert!(state.api_enabled);
4667 assert_eq!(state.admin_port, 8080);
4668 }
4669}