1use axum::{
13 extract::{Path, Query, State},
14 http::{self, StatusCode},
15 response::{
16 sse::{Event, Sse},
17 Html, IntoResponse, Json,
18 },
19};
20use chrono::Utc;
21use futures_util::stream::{self, Stream};
22use mockforge_core::{Error, Result};
23use mockforge_plugin_loader::PluginRegistry;
24use serde::{Deserialize, Serialize};
25use serde_json::json;
26use std::collections::HashMap;
27use std::convert::Infallible;
28use std::process::Command;
29use std::process::Stdio;
30use std::sync::Arc;
31use std::time::Duration;
32use sysinfo::System;
33use tokio::sync::RwLock;
34
35use crate::models::{
37 ApiResponse, ConfigUpdate, DashboardData, DashboardSystemInfo, FaultConfig, HealthCheck,
38 LatencyProfile, LogFilter, MetricsData, ProxyConfig, RequestLog, RouteInfo, ServerInfo,
39 ServerStatus, SimpleMetricsData, SystemInfo, ValidationSettings, ValidationUpdate,
40};
41
42use mockforge_core::workspace_import::{ImportResponse, ImportRoute};
44
45pub mod admin;
47pub mod ai_studio;
48pub mod analytics;
49pub mod analytics_stream;
50pub mod analytics_v2;
51pub mod assets;
52pub mod behavioral_cloning;
53pub mod chains;
54pub mod community;
55pub mod contract_diff;
56pub mod coverage_metrics;
57pub mod failure_analysis;
58pub mod graph;
59pub mod health;
60pub mod migration;
61pub mod pillar_analytics;
62pub mod playground;
63pub mod plugin;
64pub mod promotions;
65pub mod verification;
66pub mod voice;
67pub mod workspaces;
68
69pub use assets::*;
71pub use chains::*;
72pub use graph::*;
73pub use migration::*;
74pub use plugin::*;
75
76use mockforge_core::workspace_import::WorkspaceImportConfig;
78use mockforge_core::workspace_persistence::WorkspacePersistence;
79
80#[derive(Debug, Clone, Default)]
82pub struct RequestMetrics {
83 pub total_requests: u64,
85 pub active_connections: u64,
87 pub requests_by_endpoint: HashMap<String, u64>,
89 pub response_times: Vec<u64>,
91 pub response_times_by_endpoint: HashMap<String, Vec<u64>>,
93 pub errors_by_endpoint: HashMap<String, u64>,
95 pub last_request_by_endpoint: HashMap<String, chrono::DateTime<chrono::Utc>>,
97}
98
99#[derive(Debug, Clone)]
101pub struct SystemMetrics {
102 pub memory_usage_mb: u64,
104 pub cpu_usage_percent: f64,
106 pub active_threads: u32,
108}
109
110#[derive(Debug, Clone)]
112pub struct TimeSeriesPoint {
113 pub timestamp: chrono::DateTime<chrono::Utc>,
115 pub value: f64,
117}
118
119#[derive(Debug, Clone, Default)]
121pub struct TimeSeriesData {
122 pub memory_usage: Vec<TimeSeriesPoint>,
124 pub cpu_usage: Vec<TimeSeriesPoint>,
126 pub request_count: Vec<TimeSeriesPoint>,
128 pub response_time: Vec<TimeSeriesPoint>,
130}
131
132#[derive(Debug, Clone, Serialize, Deserialize)]
134pub struct RestartStatus {
135 pub in_progress: bool,
137 pub initiated_at: Option<chrono::DateTime<chrono::Utc>>,
139 pub reason: Option<String>,
141 pub success: Option<bool>,
143}
144
145#[derive(Debug, Clone, Serialize, Deserialize)]
147pub struct FixtureInfo {
148 pub id: String,
150 pub protocol: String,
152 pub method: String,
154 pub path: String,
156 pub saved_at: chrono::DateTime<chrono::Utc>,
158 pub file_size: u64,
160 pub file_path: String,
162 pub fingerprint: String,
164 pub metadata: serde_json::Value,
166}
167
168#[derive(Debug, Clone, Serialize, Deserialize)]
170pub struct SmokeTestResult {
171 pub id: String,
173 pub name: String,
175 pub method: String,
177 pub path: String,
179 pub description: String,
181 pub last_run: Option<chrono::DateTime<chrono::Utc>>,
183 pub status: String,
185 pub response_time_ms: Option<u64>,
187 pub error_message: Option<String>,
189 pub status_code: Option<u16>,
191 pub duration_seconds: Option<f64>,
193}
194
195#[derive(Debug, Clone)]
197pub struct SmokeTestContext {
198 pub base_url: String,
200 pub timeout_seconds: u64,
202 pub parallel: bool,
204}
205
206#[derive(Debug, Clone, Serialize)]
208pub struct ConfigurationState {
209 pub latency_profile: LatencyProfile,
211 pub fault_config: FaultConfig,
213 pub proxy_config: ProxyConfig,
215 pub validation_settings: ValidationSettings,
217}
218
219#[derive(Debug, Clone, Serialize, Deserialize)]
221pub struct ImportHistoryEntry {
222 pub id: String,
224 pub format: String,
226 pub timestamp: chrono::DateTime<chrono::Utc>,
228 pub routes_count: usize,
230 pub variables_count: usize,
232 pub warnings_count: usize,
234 pub success: bool,
236 pub filename: Option<String>,
238 pub environment: Option<String>,
240 pub base_url: Option<String>,
242 pub error_message: Option<String>,
244}
245
246#[derive(Clone)]
248pub struct AdminState {
249 pub http_server_addr: Option<std::net::SocketAddr>,
251 pub ws_server_addr: Option<std::net::SocketAddr>,
253 pub grpc_server_addr: Option<std::net::SocketAddr>,
255 pub graphql_server_addr: Option<std::net::SocketAddr>,
257 pub api_enabled: bool,
259 pub admin_port: u16,
261 pub start_time: chrono::DateTime<chrono::Utc>,
263 pub metrics: Arc<RwLock<RequestMetrics>>,
265 pub system_metrics: Arc<RwLock<SystemMetrics>>,
267 pub config: Arc<RwLock<ConfigurationState>>,
269 pub logs: Arc<RwLock<Vec<RequestLog>>>,
271 pub time_series: Arc<RwLock<TimeSeriesData>>,
273 pub restart_status: Arc<RwLock<RestartStatus>>,
275 pub smoke_test_results: Arc<RwLock<Vec<SmokeTestResult>>>,
277 pub import_history: Arc<RwLock<Vec<ImportHistoryEntry>>>,
279 pub workspace_persistence: Arc<WorkspacePersistence>,
281 pub plugin_registry: Arc<RwLock<PluginRegistry>>,
283 pub reality_engine: Arc<RwLock<mockforge_core::RealityEngine>>,
285 pub continuum_engine: Arc<RwLock<mockforge_core::RealityContinuumEngine>>,
287 pub chaos_api_state: Option<std::sync::Arc<mockforge_chaos::api::ChaosApiState>>,
290 pub latency_injector:
293 Option<std::sync::Arc<tokio::sync::RwLock<mockforge_core::latency::LatencyInjector>>>,
294 pub mockai:
297 Option<std::sync::Arc<tokio::sync::RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
298}
299
300impl AdminState {
301 pub async fn start_system_monitoring(&self) {
303 let state_clone = self.clone();
304 tokio::spawn(async move {
305 let mut sys = System::new_all();
306 let mut refresh_count = 0u64;
307
308 tracing::info!("Starting system monitoring background task");
309
310 loop {
311 sys.refresh_all();
313
314 let cpu_usage = sys.global_cpu_usage();
316
317 let _total_memory = sys.total_memory() as f64;
319 let used_memory = sys.used_memory() as f64;
320 let memory_usage_mb = used_memory / 1024.0 / 1024.0;
321
322 let active_threads = sys.cpus().len() as u32;
324
325 let memory_mb_u64 = memory_usage_mb as u64;
327
328 if refresh_count.is_multiple_of(10) {
330 tracing::debug!(
331 "System metrics updated: CPU={:.1}%, Mem={}MB, Threads={}",
332 cpu_usage,
333 memory_mb_u64,
334 active_threads
335 );
336 }
337
338 state_clone
339 .update_system_metrics(memory_mb_u64, cpu_usage as f64, active_threads)
340 .await;
341
342 refresh_count += 1;
343
344 tokio::time::sleep(Duration::from_secs(10)).await;
346 }
347 });
348 }
349
350 pub fn new(
365 http_server_addr: Option<std::net::SocketAddr>,
366 ws_server_addr: Option<std::net::SocketAddr>,
367 grpc_server_addr: Option<std::net::SocketAddr>,
368 graphql_server_addr: Option<std::net::SocketAddr>,
369 api_enabled: bool,
370 admin_port: u16,
371 chaos_api_state: Option<std::sync::Arc<mockforge_chaos::api::ChaosApiState>>,
372 latency_injector: Option<
373 std::sync::Arc<tokio::sync::RwLock<mockforge_core::latency::LatencyInjector>>,
374 >,
375 mockai: Option<
376 std::sync::Arc<tokio::sync::RwLock<mockforge_core::intelligent_behavior::MockAI>>,
377 >,
378 continuum_config: Option<mockforge_core::ContinuumConfig>,
379 virtual_clock: Option<std::sync::Arc<mockforge_core::VirtualClock>>,
380 ) -> Self {
381 let start_time = chrono::Utc::now();
382
383 Self {
384 http_server_addr,
385 ws_server_addr,
386 grpc_server_addr,
387 graphql_server_addr,
388 api_enabled,
389 admin_port,
390 start_time,
391 metrics: Arc::new(RwLock::new(RequestMetrics::default())),
392 system_metrics: Arc::new(RwLock::new(SystemMetrics {
393 memory_usage_mb: 0,
394 cpu_usage_percent: 0.0,
395 active_threads: 0,
396 })),
397 config: Arc::new(RwLock::new(ConfigurationState {
398 latency_profile: LatencyProfile {
399 name: "default".to_string(),
400 base_ms: 50,
401 jitter_ms: 20,
402 tag_overrides: HashMap::new(),
403 },
404 fault_config: FaultConfig {
405 enabled: false,
406 failure_rate: 0.0,
407 status_codes: vec![500, 502, 503],
408 active_failures: 0,
409 },
410 proxy_config: ProxyConfig {
411 enabled: false,
412 upstream_url: None,
413 timeout_seconds: 30,
414 requests_proxied: 0,
415 },
416 validation_settings: ValidationSettings {
417 mode: "enforce".to_string(),
418 aggregate_errors: true,
419 validate_responses: false,
420 overrides: HashMap::new(),
421 },
422 })),
423 logs: Arc::new(RwLock::new(Vec::new())),
424 time_series: Arc::new(RwLock::new(TimeSeriesData::default())),
425 restart_status: Arc::new(RwLock::new(RestartStatus {
426 in_progress: false,
427 initiated_at: None,
428 reason: None,
429 success: None,
430 })),
431 smoke_test_results: Arc::new(RwLock::new(Vec::new())),
432 import_history: Arc::new(RwLock::new(Vec::new())),
433 workspace_persistence: Arc::new(WorkspacePersistence::new("./workspaces")),
434 plugin_registry: Arc::new(RwLock::new(PluginRegistry::new())),
435 reality_engine: Arc::new(RwLock::new(mockforge_core::RealityEngine::new())),
436 continuum_engine: Arc::new(RwLock::new({
437 let config = continuum_config.unwrap_or_default();
438 if let Some(clock) = virtual_clock {
439 mockforge_core::RealityContinuumEngine::with_virtual_clock(config, clock)
440 } else {
441 mockforge_core::RealityContinuumEngine::new(config)
442 }
443 })),
444 chaos_api_state,
445 latency_injector,
446 mockai,
447 }
448 }
449
450 pub async fn record_request(
452 &self,
453 method: &str,
454 path: &str,
455 status_code: u16,
456 response_time_ms: u64,
457 error: Option<String>,
458 ) {
459 let mut metrics = self.metrics.write().await;
460
461 metrics.total_requests += 1;
462 let endpoint = format!("{} {}", method, path);
463 *metrics.requests_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
464
465 if status_code >= 400 {
466 *metrics.errors_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
467 }
468
469 metrics.response_times.push(response_time_ms);
471 if metrics.response_times.len() > 100 {
472 metrics.response_times.remove(0);
473 }
474
475 let endpoint_times = metrics
477 .response_times_by_endpoint
478 .entry(endpoint.clone())
479 .or_insert_with(Vec::new);
480 endpoint_times.push(response_time_ms);
481 if endpoint_times.len() > 50 {
482 endpoint_times.remove(0);
483 }
484
485 metrics.last_request_by_endpoint.insert(endpoint, chrono::Utc::now());
487
488 let total_requests = metrics.total_requests;
490
491 drop(metrics);
493
494 self.update_time_series_on_request(response_time_ms, total_requests).await;
496
497 let mut logs = self.logs.write().await;
499 let log_entry = RequestLog {
500 id: format!("req_{}", total_requests),
501 timestamp: Utc::now(),
502 method: method.to_string(),
503 path: path.to_string(),
504 status_code,
505 response_time_ms,
506 client_ip: None,
507 user_agent: None,
508 headers: HashMap::new(),
509 response_size_bytes: 0,
510 error_message: error,
511 };
512
513 logs.push(log_entry);
514
515 if logs.len() > 1000 {
517 logs.remove(0);
518 }
519 }
520
521 pub async fn get_metrics(&self) -> RequestMetrics {
523 self.metrics.read().await.clone()
524 }
525
526 pub async fn update_system_metrics(&self, memory_mb: u64, cpu_percent: f64, threads: u32) {
528 let mut system_metrics = self.system_metrics.write().await;
529 system_metrics.memory_usage_mb = memory_mb;
530 system_metrics.cpu_usage_percent = cpu_percent;
531 system_metrics.active_threads = threads;
532
533 self.update_time_series_data(memory_mb as f64, cpu_percent).await;
535 }
536
537 async fn update_time_series_data(&self, memory_mb: f64, cpu_percent: f64) {
539 let now = chrono::Utc::now();
540 let mut time_series = self.time_series.write().await;
541
542 time_series.memory_usage.push(TimeSeriesPoint {
544 timestamp: now,
545 value: memory_mb,
546 });
547
548 time_series.cpu_usage.push(TimeSeriesPoint {
550 timestamp: now,
551 value: cpu_percent,
552 });
553
554 let metrics = self.metrics.read().await;
556 time_series.request_count.push(TimeSeriesPoint {
557 timestamp: now,
558 value: metrics.total_requests as f64,
559 });
560
561 let avg_response_time = if !metrics.response_times.is_empty() {
563 metrics.response_times.iter().sum::<u64>() as f64 / metrics.response_times.len() as f64
564 } else {
565 0.0
566 };
567 time_series.response_time.push(TimeSeriesPoint {
568 timestamp: now,
569 value: avg_response_time,
570 });
571
572 const MAX_POINTS: usize = 100;
574 if time_series.memory_usage.len() > MAX_POINTS {
575 time_series.memory_usage.remove(0);
576 }
577 if time_series.cpu_usage.len() > MAX_POINTS {
578 time_series.cpu_usage.remove(0);
579 }
580 if time_series.request_count.len() > MAX_POINTS {
581 time_series.request_count.remove(0);
582 }
583 if time_series.response_time.len() > MAX_POINTS {
584 time_series.response_time.remove(0);
585 }
586 }
587
588 pub async fn get_system_metrics(&self) -> SystemMetrics {
590 self.system_metrics.read().await.clone()
591 }
592
593 pub async fn get_time_series_data(&self) -> TimeSeriesData {
595 self.time_series.read().await.clone()
596 }
597
598 pub async fn get_restart_status(&self) -> RestartStatus {
600 self.restart_status.read().await.clone()
601 }
602
603 pub async fn initiate_restart(&self, reason: String) -> Result<()> {
605 let mut status = self.restart_status.write().await;
606
607 if status.in_progress {
608 return Err(Error::generic("Restart already in progress".to_string()));
609 }
610
611 status.in_progress = true;
612 status.initiated_at = Some(chrono::Utc::now());
613 status.reason = Some(reason);
614 status.success = None;
615
616 Ok(())
617 }
618
619 pub async fn complete_restart(&self, success: bool) {
621 let mut status = self.restart_status.write().await;
622 status.in_progress = false;
623 status.success = Some(success);
624 }
625
626 pub async fn get_smoke_test_results(&self) -> Vec<SmokeTestResult> {
628 self.smoke_test_results.read().await.clone()
629 }
630
631 pub async fn update_smoke_test_result(&self, result: SmokeTestResult) {
633 let mut results = self.smoke_test_results.write().await;
634
635 if let Some(existing) = results.iter_mut().find(|r| r.id == result.id) {
637 *existing = result;
638 } else {
639 results.push(result);
640 }
641
642 if results.len() > 100 {
644 results.remove(0);
645 }
646 }
647
648 pub async fn clear_smoke_test_results(&self) {
650 let mut results = self.smoke_test_results.write().await;
651 results.clear();
652 }
653
654 async fn update_time_series_on_request(&self, response_time_ms: u64, total_requests: u64) {
656 let now = chrono::Utc::now();
657 let mut time_series = self.time_series.write().await;
658
659 time_series.request_count.push(TimeSeriesPoint {
661 timestamp: now,
662 value: total_requests as f64,
663 });
664
665 time_series.response_time.push(TimeSeriesPoint {
667 timestamp: now,
668 value: response_time_ms as f64,
669 });
670
671 const MAX_POINTS: usize = 100;
673 if time_series.request_count.len() > MAX_POINTS {
674 time_series.request_count.remove(0);
675 }
676 if time_series.response_time.len() > MAX_POINTS {
677 time_series.response_time.remove(0);
678 }
679 }
680
681 pub async fn get_config(&self) -> ConfigurationState {
683 self.config.read().await.clone()
684 }
685
686 pub async fn update_latency_config(
688 &self,
689 base_ms: u64,
690 jitter_ms: u64,
691 tag_overrides: HashMap<String, u64>,
692 ) {
693 let mut config = self.config.write().await;
694 config.latency_profile.base_ms = base_ms;
695 config.latency_profile.jitter_ms = jitter_ms;
696 config.latency_profile.tag_overrides = tag_overrides;
697 }
698
699 pub async fn update_fault_config(
701 &self,
702 enabled: bool,
703 failure_rate: f64,
704 status_codes: Vec<u16>,
705 ) {
706 let mut config = self.config.write().await;
707 config.fault_config.enabled = enabled;
708 config.fault_config.failure_rate = failure_rate;
709 config.fault_config.status_codes = status_codes;
710 }
711
712 pub async fn update_proxy_config(
714 &self,
715 enabled: bool,
716 upstream_url: Option<String>,
717 timeout_seconds: u64,
718 ) {
719 let mut config = self.config.write().await;
720 config.proxy_config.enabled = enabled;
721 config.proxy_config.upstream_url = upstream_url;
722 config.proxy_config.timeout_seconds = timeout_seconds;
723 }
724
725 pub async fn update_validation_config(
727 &self,
728 mode: String,
729 aggregate_errors: bool,
730 validate_responses: bool,
731 overrides: HashMap<String, String>,
732 ) {
733 let mut config = self.config.write().await;
734 config.validation_settings.mode = mode;
735 config.validation_settings.aggregate_errors = aggregate_errors;
736 config.validation_settings.validate_responses = validate_responses;
737 config.validation_settings.overrides = overrides;
738 }
739
740 pub async fn get_logs_filtered(&self, filter: &LogFilter) -> Vec<RequestLog> {
742 let logs = self.logs.read().await;
743
744 logs.iter()
745 .rev() .filter(|log| {
747 if let Some(ref method) = filter.method {
748 if log.method != *method {
749 return false;
750 }
751 }
752 if let Some(ref path_pattern) = filter.path_pattern {
753 if !log.path.contains(path_pattern) {
754 return false;
755 }
756 }
757 if let Some(status) = filter.status_code {
758 if log.status_code != status {
759 return false;
760 }
761 }
762 true
763 })
764 .take(filter.limit.unwrap_or(100))
765 .cloned()
766 .collect()
767 }
768
769 pub async fn clear_logs(&self) {
771 let mut logs = self.logs.write().await;
772 logs.clear();
773 }
774}
775
776pub async fn serve_admin_html() -> Html<&'static str> {
778 Html(crate::get_admin_html())
779}
780
781pub async fn serve_admin_css() -> ([(http::HeaderName, &'static str); 1], &'static str) {
783 ([(http::header::CONTENT_TYPE, "text/css")], crate::get_admin_css())
784}
785
786pub async fn serve_admin_js() -> ([(http::HeaderName, &'static str); 1], &'static str) {
788 ([(http::header::CONTENT_TYPE, "application/javascript")], crate::get_admin_js())
789}
790
791pub async fn get_dashboard(State(state): State<AdminState>) -> Json<ApiResponse<DashboardData>> {
793 let uptime = Utc::now().signed_duration_since(state.start_time).num_seconds() as u64;
794
795 let system_metrics = state.get_system_metrics().await;
797 let _config = state.get_config().await;
798
799 let (recent_logs, calculated_metrics): (Vec<RequestLog>, RequestMetrics) =
801 if let Some(global_logger) = mockforge_core::get_global_logger() {
802 let all_logs = global_logger.get_recent_logs(None).await;
804 let recent_logs_subset = global_logger.get_recent_logs(Some(20)).await;
805
806 let total_requests = all_logs.len() as u64;
808 let mut requests_by_endpoint = HashMap::new();
809 let mut errors_by_endpoint = HashMap::new();
810 let mut response_times = Vec::new();
811 let mut last_request_by_endpoint = HashMap::new();
812
813 for log in &all_logs {
814 let endpoint_key = format!("{} {}", log.method, log.path);
815 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
816
817 if log.status_code >= 400 {
818 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
819 }
820
821 response_times.push(log.response_time_ms);
822 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
823 }
824
825 let calculated_metrics = RequestMetrics {
826 total_requests,
827 active_connections: 0, requests_by_endpoint,
829 response_times,
830 response_times_by_endpoint: HashMap::new(), errors_by_endpoint,
832 last_request_by_endpoint,
833 };
834
835 let recent_logs = recent_logs_subset
837 .into_iter()
838 .map(|log| RequestLog {
839 id: log.id,
840 timestamp: log.timestamp,
841 method: log.method,
842 path: log.path,
843 status_code: log.status_code,
844 response_time_ms: log.response_time_ms,
845 client_ip: log.client_ip,
846 user_agent: log.user_agent,
847 headers: log.headers,
848 response_size_bytes: log.response_size_bytes,
849 error_message: log.error_message,
850 })
851 .collect();
852
853 (recent_logs, calculated_metrics)
854 } else {
855 let logs = state.logs.read().await;
857 let recent_logs = logs.iter().rev().take(10).cloned().collect();
858 let metrics = state.get_metrics().await;
859 (recent_logs, metrics)
860 };
861
862 let metrics = calculated_metrics;
863
864 let system_info = SystemInfo {
865 version: env!("CARGO_PKG_VERSION").to_string(),
866 uptime_seconds: uptime,
867 memory_usage_mb: system_metrics.memory_usage_mb,
868 cpu_usage_percent: system_metrics.cpu_usage_percent,
869 active_threads: system_metrics.active_threads as usize,
870 total_routes: metrics.requests_by_endpoint.len(),
871 total_fixtures: count_fixtures().unwrap_or(0),
872 };
873
874 let servers = vec![
875 ServerStatus {
876 server_type: "HTTP".to_string(),
877 address: state.http_server_addr.map(|addr| addr.to_string()),
878 running: state.http_server_addr.is_some(),
879 start_time: Some(state.start_time),
880 uptime_seconds: Some(uptime),
881 active_connections: metrics.active_connections,
882 total_requests: count_requests_by_server_type(&metrics, "HTTP"),
883 },
884 ServerStatus {
885 server_type: "WebSocket".to_string(),
886 address: state.ws_server_addr.map(|addr| addr.to_string()),
887 running: state.ws_server_addr.is_some(),
888 start_time: Some(state.start_time),
889 uptime_seconds: Some(uptime),
890 active_connections: metrics.active_connections / 2, total_requests: count_requests_by_server_type(&metrics, "WebSocket"),
892 },
893 ServerStatus {
894 server_type: "gRPC".to_string(),
895 address: state.grpc_server_addr.map(|addr| addr.to_string()),
896 running: state.grpc_server_addr.is_some(),
897 start_time: Some(state.start_time),
898 uptime_seconds: Some(uptime),
899 active_connections: metrics.active_connections / 3, total_requests: count_requests_by_server_type(&metrics, "gRPC"),
901 },
902 ];
903
904 let mut routes = Vec::new();
906 for (endpoint, count) in &metrics.requests_by_endpoint {
907 let parts: Vec<&str> = endpoint.splitn(2, ' ').collect();
908 if parts.len() == 2 {
909 let method = parts[0].to_string();
910 let path = parts[1].to_string();
911 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
912
913 routes.push(RouteInfo {
914 method: Some(method.clone()),
915 path: path.clone(),
916 priority: 0,
917 has_fixtures: route_has_fixtures(&method, &path),
918 latency_ms: calculate_endpoint_latency(&metrics, endpoint),
919 request_count: *count,
920 last_request: get_endpoint_last_request(&metrics, endpoint),
921 error_count,
922 });
923 }
924 }
925
926 let dashboard = DashboardData {
927 server_info: ServerInfo {
928 version: env!("CARGO_PKG_VERSION").to_string(),
929 build_time: option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown").to_string(),
930 git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("unknown").to_string(),
931 http_server: state.http_server_addr.map(|addr| addr.to_string()),
932 ws_server: state.ws_server_addr.map(|addr| addr.to_string()),
933 grpc_server: state.grpc_server_addr.map(|addr| addr.to_string()),
934 graphql_server: state.graphql_server_addr.map(|addr| addr.to_string()),
935 api_enabled: state.api_enabled,
936 admin_port: state.admin_port,
937 },
938 system_info: DashboardSystemInfo {
939 os: std::env::consts::OS.to_string(),
940 arch: std::env::consts::ARCH.to_string(),
941 uptime,
942 memory_usage: system_metrics.memory_usage_mb * 1024 * 1024, },
944 metrics: SimpleMetricsData {
945 total_requests: metrics.requests_by_endpoint.values().sum(),
946 active_requests: metrics.active_connections,
947 average_response_time: if metrics.response_times.is_empty() {
948 0.0
949 } else {
950 metrics.response_times.iter().sum::<u64>() as f64
951 / metrics.response_times.len() as f64
952 },
953 error_rate: {
954 let total_requests = metrics.requests_by_endpoint.values().sum::<u64>();
955 let total_errors = metrics.errors_by_endpoint.values().sum::<u64>();
956 if total_requests == 0 {
957 0.0
958 } else {
959 total_errors as f64 / total_requests as f64
960 }
961 },
962 },
963 servers,
964 recent_logs,
965 system: system_info,
966 };
967
968 Json(ApiResponse::success(dashboard))
969}
970
971pub async fn get_routes(State(state): State<AdminState>) -> impl IntoResponse {
973 if let Some(http_addr) = state.http_server_addr {
974 let url = format!("http://{}/__mockforge/routes", http_addr);
976 if let Ok(response) = reqwest::get(&url).await {
977 if response.status().is_success() {
978 if let Ok(body) = response.text().await {
979 return (StatusCode::OK, [("content-type", "application/json")], body);
980 }
981 }
982 }
983 }
984
985 (
987 StatusCode::OK,
988 [("content-type", "application/json")],
989 r#"{"routes":[]}"#.to_string(),
990 )
991}
992
993pub async fn get_server_info(State(state): State<AdminState>) -> Json<serde_json::Value> {
995 Json(json!({
996 "http_server": state.http_server_addr.map(|addr| addr.to_string()),
997 "ws_server": state.ws_server_addr.map(|addr| addr.to_string()),
998 "grpc_server": state.grpc_server_addr.map(|addr| addr.to_string()),
999 "admin_port": state.admin_port
1000 }))
1001}
1002
1003pub async fn get_health() -> Json<HealthCheck> {
1005 Json(
1006 HealthCheck::healthy()
1007 .with_service("http".to_string(), "healthy".to_string())
1008 .with_service("websocket".to_string(), "healthy".to_string())
1009 .with_service("grpc".to_string(), "healthy".to_string()),
1010 )
1011}
1012
1013pub async fn get_logs(
1015 State(state): State<AdminState>,
1016 Query(params): Query<HashMap<String, String>>,
1017) -> Json<ApiResponse<Vec<RequestLog>>> {
1018 let mut filter = LogFilter::default();
1019
1020 if let Some(method) = params.get("method") {
1021 filter.method = Some(method.clone());
1022 }
1023 if let Some(path) = params.get("path") {
1024 filter.path_pattern = Some(path.clone());
1025 }
1026 if let Some(status) = params.get("status").and_then(|s| s.parse().ok()) {
1027 filter.status_code = Some(status);
1028 }
1029 if let Some(limit) = params.get("limit").and_then(|s| s.parse().ok()) {
1030 filter.limit = Some(limit);
1031 }
1032
1033 let logs = if let Some(global_logger) = mockforge_core::get_global_logger() {
1035 let centralized_logs = global_logger.get_recent_logs(filter.limit).await;
1037
1038 centralized_logs
1040 .into_iter()
1041 .filter(|log| {
1042 if let Some(ref method) = filter.method {
1043 if log.method != *method {
1044 return false;
1045 }
1046 }
1047 if let Some(ref path_pattern) = filter.path_pattern {
1048 if !log.path.contains(path_pattern) {
1049 return false;
1050 }
1051 }
1052 if let Some(status) = filter.status_code {
1053 if log.status_code != status {
1054 return false;
1055 }
1056 }
1057 true
1058 })
1059 .map(|log| RequestLog {
1060 id: log.id,
1061 timestamp: log.timestamp,
1062 method: log.method,
1063 path: log.path,
1064 status_code: log.status_code,
1065 response_time_ms: log.response_time_ms,
1066 client_ip: log.client_ip,
1067 user_agent: log.user_agent,
1068 headers: log.headers,
1069 response_size_bytes: log.response_size_bytes,
1070 error_message: log.error_message,
1071 })
1072 .collect()
1073 } else {
1074 state.get_logs_filtered(&filter).await
1076 };
1077
1078 Json(ApiResponse::success(logs))
1079}
1080
1081pub async fn get_reality_trace(
1085 Path(request_id): Path<String>,
1086) -> Json<ApiResponse<Option<mockforge_core::request_logger::RealityTraceMetadata>>> {
1087 if let Some(global_logger) = mockforge_core::get_global_logger() {
1088 let logs = global_logger.get_recent_logs(None).await;
1089 if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1090 Json(ApiResponse::success(log_entry.reality_metadata))
1091 } else {
1092 Json(ApiResponse::error(format!("Request {} not found", request_id)))
1093 }
1094 } else {
1095 Json(ApiResponse::error("Request logger not initialized".to_string()))
1096 }
1097}
1098
1099pub async fn get_response_trace(
1103 Path(request_id): Path<String>,
1104) -> Json<ApiResponse<Option<serde_json::Value>>> {
1105 if let Some(global_logger) = mockforge_core::get_global_logger() {
1106 let logs = global_logger.get_recent_logs(None).await;
1107 if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1108 let trace = log_entry
1111 .metadata
1112 .get("response_generation_trace")
1113 .and_then(|s| serde_json::from_str::<serde_json::Value>(s).ok());
1114 Json(ApiResponse::success(trace))
1115 } else {
1116 Json(ApiResponse::error(format!("Request {} not found", request_id)))
1117 }
1118 } else {
1119 Json(ApiResponse::error("Request logger not initialized".to_string()))
1120 }
1121}
1122
1123const RECENT_LOGS_LIMIT: usize = 20;
1125const RECENT_LOGS_TTL_MINUTES: i64 = 5;
1126
1127pub async fn logs_sse(
1129 State(_state): State<AdminState>,
1130) -> Sse<impl Stream<Item = std::result::Result<Event, Infallible>>> {
1131 tracing::info!("SSE endpoint /logs/sse accessed - starting real-time log streaming for recent requests only");
1132
1133 let stream = stream::unfold(std::collections::HashSet::new(), |mut seen_ids| async move {
1134 tokio::time::sleep(Duration::from_millis(500)).await;
1135
1136 if let Some(global_logger) = mockforge_core::get_global_logger() {
1138 let centralized_logs = global_logger.get_recent_logs(Some(RECENT_LOGS_LIMIT)).await;
1139
1140 tracing::debug!(
1141 "SSE: Checking logs - total logs: {}, seen logs: {}",
1142 centralized_logs.len(),
1143 seen_ids.len()
1144 );
1145
1146 let now = chrono::Utc::now();
1148 let ttl_cutoff = now - chrono::Duration::minutes(RECENT_LOGS_TTL_MINUTES);
1149
1150 let new_logs: Vec<RequestLog> = centralized_logs
1152 .into_iter()
1153 .filter(|log| {
1154 log.timestamp > ttl_cutoff && !seen_ids.contains(&log.id)
1156 })
1157 .map(|log| RequestLog {
1158 id: log.id,
1159 timestamp: log.timestamp,
1160 method: log.method,
1161 path: log.path,
1162 status_code: log.status_code,
1163 response_time_ms: log.response_time_ms,
1164 client_ip: log.client_ip,
1165 user_agent: log.user_agent,
1166 headers: log.headers,
1167 response_size_bytes: log.response_size_bytes,
1168 error_message: log.error_message,
1169 })
1170 .collect();
1171
1172 for log in &new_logs {
1174 seen_ids.insert(log.id.clone());
1175 }
1176
1177 if !new_logs.is_empty() {
1179 tracing::info!("SSE: Sending {} new logs to client", new_logs.len());
1180
1181 let event_data = serde_json::to_string(&new_logs).unwrap_or_default();
1182 let event = Ok(Event::default().event("new_logs").data(event_data));
1183
1184 return Some((event, seen_ids));
1185 }
1186 }
1187
1188 let event = Ok(Event::default().event("keep_alive").data(""));
1190 Some((event, seen_ids))
1191 });
1192
1193 Sse::new(stream).keep_alive(
1194 axum::response::sse::KeepAlive::new()
1195 .interval(Duration::from_secs(15))
1196 .text("keep-alive-text"),
1197 )
1198}
1199
1200pub async fn get_metrics(State(state): State<AdminState>) -> Json<ApiResponse<MetricsData>> {
1202 let metrics = if let Some(global_logger) = mockforge_core::get_global_logger() {
1204 let all_logs = global_logger.get_recent_logs(None).await;
1205
1206 let total_requests = all_logs.len() as u64;
1207 let mut requests_by_endpoint = HashMap::new();
1208 let mut errors_by_endpoint = HashMap::new();
1209 let mut response_times = Vec::new();
1210 let mut last_request_by_endpoint = HashMap::new();
1211
1212 for log in &all_logs {
1213 let endpoint_key = format!("{} {}", log.method, log.path);
1214 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1215
1216 if log.status_code >= 400 {
1217 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1218 }
1219
1220 response_times.push(log.response_time_ms);
1221 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
1222 }
1223
1224 RequestMetrics {
1225 total_requests,
1226 active_connections: 0,
1227 requests_by_endpoint,
1228 response_times,
1229 response_times_by_endpoint: HashMap::new(),
1230 errors_by_endpoint,
1231 last_request_by_endpoint,
1232 }
1233 } else {
1234 state.get_metrics().await
1235 };
1236
1237 let system_metrics = state.get_system_metrics().await;
1238 let time_series = state.get_time_series_data().await;
1239
1240 fn calculate_percentile(sorted_data: &[u64], percentile: f64) -> u64 {
1242 if sorted_data.is_empty() {
1243 return 0;
1244 }
1245 let idx = ((sorted_data.len() as f64) * percentile).ceil() as usize;
1246 let idx = idx.min(sorted_data.len().saturating_sub(1));
1247 sorted_data[idx]
1248 }
1249
1250 let mut response_times = metrics.response_times.clone();
1252 response_times.sort();
1253
1254 let p50 = calculate_percentile(&response_times, 0.50);
1255 let p75 = calculate_percentile(&response_times, 0.75);
1256 let p90 = calculate_percentile(&response_times, 0.90);
1257 let p95 = calculate_percentile(&response_times, 0.95);
1258 let p99 = calculate_percentile(&response_times, 0.99);
1259 let p999 = calculate_percentile(&response_times, 0.999);
1260
1261 let mut response_times_by_endpoint: HashMap<String, Vec<u64>> = HashMap::new();
1263 if let Some(global_logger) = mockforge_core::get_global_logger() {
1264 let all_logs = global_logger.get_recent_logs(None).await;
1265 for log in &all_logs {
1266 let endpoint_key = format!("{} {}", log.method, log.path);
1267 response_times_by_endpoint
1268 .entry(endpoint_key)
1269 .or_default()
1270 .push(log.response_time_ms);
1271 }
1272 }
1273
1274 let mut endpoint_percentiles: HashMap<String, HashMap<String, u64>> = HashMap::new();
1276 for (endpoint, times) in &mut response_times_by_endpoint {
1277 times.sort();
1278 if !times.is_empty() {
1279 endpoint_percentiles.insert(
1280 endpoint.clone(),
1281 HashMap::from([
1282 ("p50".to_string(), calculate_percentile(times, 0.50)),
1283 ("p75".to_string(), calculate_percentile(times, 0.75)),
1284 ("p90".to_string(), calculate_percentile(times, 0.90)),
1285 ("p95".to_string(), calculate_percentile(times, 0.95)),
1286 ("p99".to_string(), calculate_percentile(times, 0.99)),
1287 ("p999".to_string(), calculate_percentile(times, 0.999)),
1288 ]),
1289 );
1290 }
1291 }
1292
1293 let mut error_rate_by_endpoint = HashMap::new();
1295 for (endpoint, total_count) in &metrics.requests_by_endpoint {
1296 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
1297 let error_rate = if *total_count > 0 {
1298 error_count as f64 / *total_count as f64
1299 } else {
1300 0.0
1301 };
1302 error_rate_by_endpoint.insert(endpoint.clone(), error_rate);
1303 }
1304
1305 let memory_usage_over_time = if time_series.memory_usage.is_empty() {
1308 vec![(Utc::now(), system_metrics.memory_usage_mb)]
1309 } else {
1310 time_series
1311 .memory_usage
1312 .iter()
1313 .map(|point| (point.timestamp, point.value as u64))
1314 .collect()
1315 };
1316
1317 let cpu_usage_over_time = if time_series.cpu_usage.is_empty() {
1318 vec![(Utc::now(), system_metrics.cpu_usage_percent)]
1319 } else {
1320 time_series
1321 .cpu_usage
1322 .iter()
1323 .map(|point| (point.timestamp, point.value))
1324 .collect()
1325 };
1326
1327 let latency_over_time: Vec<(chrono::DateTime<chrono::Utc>, u64)> =
1329 if let Some(global_logger) = mockforge_core::get_global_logger() {
1330 let all_logs = global_logger.get_recent_logs(Some(100)).await;
1331 all_logs.iter().map(|log| (log.timestamp, log.response_time_ms)).collect()
1332 } else {
1333 Vec::new()
1334 };
1335
1336 let metrics_data = MetricsData {
1337 requests_by_endpoint: metrics.requests_by_endpoint,
1338 response_time_percentiles: HashMap::from([
1339 ("p50".to_string(), p50),
1340 ("p75".to_string(), p75),
1341 ("p90".to_string(), p90),
1342 ("p95".to_string(), p95),
1343 ("p99".to_string(), p99),
1344 ("p999".to_string(), p999),
1345 ]),
1346 endpoint_percentiles: Some(endpoint_percentiles),
1347 latency_over_time: Some(latency_over_time),
1348 error_rate_by_endpoint,
1349 memory_usage_over_time,
1350 cpu_usage_over_time,
1351 };
1352
1353 Json(ApiResponse::success(metrics_data))
1354}
1355
1356pub async fn update_latency(
1358 State(state): State<AdminState>,
1359 headers: axum::http::HeaderMap,
1360 Json(update): Json<ConfigUpdate>,
1361) -> Json<ApiResponse<String>> {
1362 use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1363 use crate::rbac::{extract_user_context, get_default_user_context};
1364
1365 if update.config_type != "latency" {
1366 return Json(ApiResponse::error("Invalid config type".to_string()));
1367 }
1368
1369 let base_ms = update.data.get("base_ms").and_then(|v| v.as_u64()).unwrap_or(50);
1371 let jitter_ms = update.data.get("jitter_ms").and_then(|v| v.as_u64()).unwrap_or(20);
1372
1373 let tag_overrides: std::collections::HashMap<String, u64> = update
1374 .data
1375 .get("tag_overrides")
1376 .and_then(|v| v.as_object())
1377 .map(|obj| obj.iter().filter_map(|(k, v)| v.as_u64().map(|val| (k.clone(), val))).collect())
1378 .unwrap_or_default();
1379
1380 state.update_latency_config(base_ms, jitter_ms, tag_overrides.clone()).await;
1382
1383 if let Some(audit_store) = get_global_audit_store() {
1385 let metadata = serde_json::json!({
1386 "base_ms": base_ms,
1387 "jitter_ms": jitter_ms,
1388 "tag_overrides": tag_overrides,
1389 });
1390 let mut audit_log = create_audit_log(
1391 AdminActionType::ConfigLatencyUpdated,
1392 format!("Latency profile updated: base_ms={}, jitter_ms={}", base_ms, jitter_ms),
1393 None,
1394 true,
1395 None,
1396 Some(metadata),
1397 );
1398
1399 if let Some(user_ctx) = extract_user_context(&headers).or_else(get_default_user_context) {
1401 audit_log.user_id = Some(user_ctx.user_id);
1402 audit_log.username = Some(user_ctx.username);
1403 }
1404
1405 if let Some(ip) = headers
1407 .get("x-forwarded-for")
1408 .or_else(|| headers.get("x-real-ip"))
1409 .and_then(|h| h.to_str().ok())
1410 {
1411 audit_log.ip_address = Some(ip.to_string());
1412 }
1413
1414 if let Some(ua) = headers.get("user-agent").and_then(|h| h.to_str().ok()) {
1416 audit_log.user_agent = Some(ua.to_string());
1417 }
1418
1419 audit_store.record(audit_log).await;
1420 }
1421
1422 tracing::info!("Updated latency profile: base_ms={}, jitter_ms={}", base_ms, jitter_ms);
1423
1424 Json(ApiResponse::success("Latency profile updated".to_string()))
1425}
1426
1427pub async fn update_faults(
1429 State(state): State<AdminState>,
1430 Json(update): Json<ConfigUpdate>,
1431) -> Json<ApiResponse<String>> {
1432 if update.config_type != "faults" {
1433 return Json(ApiResponse::error("Invalid config type".to_string()));
1434 }
1435
1436 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1438
1439 let failure_rate = update.data.get("failure_rate").and_then(|v| v.as_f64()).unwrap_or(0.0);
1440
1441 let status_codes = update
1442 .data
1443 .get("status_codes")
1444 .and_then(|v| v.as_array())
1445 .map(|arr| arr.iter().filter_map(|v| v.as_u64().map(|n| n as u16)).collect())
1446 .unwrap_or_else(|| vec![500, 502, 503]);
1447
1448 state.update_fault_config(enabled, failure_rate, status_codes).await;
1450
1451 tracing::info!(
1452 "Updated fault configuration: enabled={}, failure_rate={}",
1453 enabled,
1454 failure_rate
1455 );
1456
1457 Json(ApiResponse::success("Fault configuration updated".to_string()))
1458}
1459
1460pub async fn update_proxy(
1462 State(state): State<AdminState>,
1463 Json(update): Json<ConfigUpdate>,
1464) -> Json<ApiResponse<String>> {
1465 if update.config_type != "proxy" {
1466 return Json(ApiResponse::error("Invalid config type".to_string()));
1467 }
1468
1469 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1471
1472 let upstream_url =
1473 update.data.get("upstream_url").and_then(|v| v.as_str()).map(|s| s.to_string());
1474
1475 let timeout_seconds = update.data.get("timeout_seconds").and_then(|v| v.as_u64()).unwrap_or(30);
1476
1477 state.update_proxy_config(enabled, upstream_url.clone(), timeout_seconds).await;
1479
1480 tracing::info!(
1481 "Updated proxy configuration: enabled={}, upstream_url={:?}",
1482 enabled,
1483 upstream_url
1484 );
1485
1486 Json(ApiResponse::success("Proxy configuration updated".to_string()))
1487}
1488
1489pub async fn clear_logs(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1491 state.clear_logs().await;
1493 tracing::info!("Cleared all request logs");
1494
1495 Json(ApiResponse::success("Logs cleared".to_string()))
1496}
1497
1498pub async fn restart_servers(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1500 use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1501 let current_status = state.get_restart_status().await;
1503 if current_status.in_progress {
1504 return Json(ApiResponse::error("Server restart already in progress".to_string()));
1505 }
1506
1507 let restart_result = state
1509 .initiate_restart("Manual restart requested via admin UI".to_string())
1510 .await;
1511
1512 let success = restart_result.is_ok();
1513 let error_msg = restart_result.as_ref().err().map(|e| format!("{}", e));
1514
1515 if let Some(audit_store) = get_global_audit_store() {
1517 let audit_log = create_audit_log(
1518 AdminActionType::ServerRestarted,
1519 "Server restart initiated via admin UI".to_string(),
1520 None,
1521 success,
1522 error_msg.clone(),
1523 None,
1524 );
1525 audit_store.record(audit_log).await;
1526 }
1527
1528 if let Err(e) = restart_result {
1529 return Json(ApiResponse::error(format!("Failed to initiate restart: {}", e)));
1530 }
1531
1532 let state_clone = state.clone();
1534 tokio::spawn(async move {
1535 if let Err(e) = perform_server_restart(&state_clone).await {
1536 tracing::error!("Server restart failed: {}", e);
1537 state_clone.complete_restart(false).await;
1538 } else {
1539 tracing::info!("Server restart completed successfully");
1540 state_clone.complete_restart(true).await;
1541 }
1542 });
1543
1544 tracing::info!("Server restart initiated via admin UI");
1545 Json(ApiResponse::success(
1546 "Server restart initiated. Please wait for completion.".to_string(),
1547 ))
1548}
1549
1550async fn perform_server_restart(_state: &AdminState) -> Result<()> {
1552 let current_pid = std::process::id();
1554 tracing::info!("Initiating restart for process PID: {}", current_pid);
1555
1556 let parent_pid = get_parent_process_id(current_pid).await?;
1558 tracing::info!("Found parent process PID: {}", parent_pid);
1559
1560 if let Ok(()) = restart_via_parent_signal(parent_pid).await {
1562 tracing::info!("Restart initiated via parent process signal");
1563 return Ok(());
1564 }
1565
1566 if let Ok(()) = restart_via_process_replacement().await {
1568 tracing::info!("Restart initiated via process replacement");
1569 return Ok(());
1570 }
1571
1572 restart_via_script().await
1574}
1575
1576async fn get_parent_process_id(pid: u32) -> Result<u32> {
1578 #[cfg(target_os = "linux")]
1580 {
1581 let stat_path = format!("/proc/{}/stat", pid);
1583 if let Ok(ppid) = tokio::task::spawn_blocking(move || -> Result<u32> {
1584 let content = std::fs::read_to_string(&stat_path)
1585 .map_err(|e| Error::generic(format!("Failed to read {}: {}", stat_path, e)))?;
1586
1587 let fields: Vec<&str> = content.split_whitespace().collect();
1588 if fields.len() > 3 {
1589 fields[3]
1590 .parse::<u32>()
1591 .map_err(|e| Error::generic(format!("Failed to parse PPID: {}", e)))
1592 } else {
1593 Err(Error::generic("Insufficient fields in /proc/pid/stat".to_string()))
1594 }
1595 })
1596 .await
1597 {
1598 return ppid;
1599 }
1600 }
1601
1602 Ok(1) }
1605
1606async fn restart_via_parent_signal(parent_pid: u32) -> Result<()> {
1608 #[cfg(unix)]
1609 {
1610 use std::process::Command;
1611
1612 let output = Command::new("kill")
1614 .args(["-TERM", &parent_pid.to_string()])
1615 .output()
1616 .map_err(|e| Error::generic(format!("Failed to send signal: {}", e)))?;
1617
1618 if !output.status.success() {
1619 return Err(Error::generic(
1620 "Failed to send restart signal to parent process".to_string(),
1621 ));
1622 }
1623
1624 tokio::time::sleep(Duration::from_millis(100)).await;
1626 Ok(())
1627 }
1628
1629 #[cfg(not(unix))]
1630 {
1631 Err(Error::generic(
1632 "Signal-based restart not supported on this platform".to_string(),
1633 ))
1634 }
1635}
1636
1637async fn restart_via_process_replacement() -> Result<()> {
1639 let current_exe = std::env::current_exe()
1641 .map_err(|e| Error::generic(format!("Failed to get current executable: {}", e)))?;
1642
1643 let args: Vec<String> = std::env::args().collect();
1645
1646 tracing::info!("Restarting with command: {:?}", args);
1647
1648 let mut child = Command::new(¤t_exe)
1650 .args(&args[1..]) .stdout(Stdio::inherit())
1652 .stderr(Stdio::inherit())
1653 .spawn()
1654 .map_err(|e| Error::generic(format!("Failed to start new process: {}", e)))?;
1655
1656 tokio::time::sleep(Duration::from_millis(500)).await;
1658
1659 match child.try_wait() {
1661 Ok(Some(status)) => {
1662 if status.success() {
1663 tracing::info!("New process started successfully");
1664 Ok(())
1665 } else {
1666 Err(Error::generic("New process exited with error".to_string()))
1667 }
1668 }
1669 Ok(None) => {
1670 tracing::info!("New process is running, exiting current process");
1671 std::process::exit(0);
1673 }
1674 Err(e) => Err(Error::generic(format!("Failed to check new process status: {}", e))),
1675 }
1676}
1677
1678async fn restart_via_script() -> Result<()> {
1680 let script_paths = ["./scripts/restart.sh", "./restart.sh", "restart.sh"];
1682
1683 for script_path in &script_paths {
1684 if std::path::Path::new(script_path).exists() {
1685 tracing::info!("Using restart script: {}", script_path);
1686
1687 let output = Command::new("bash")
1688 .arg(script_path)
1689 .output()
1690 .map_err(|e| Error::generic(format!("Failed to execute restart script: {}", e)))?;
1691
1692 if output.status.success() {
1693 return Ok(());
1694 } else {
1695 tracing::warn!(
1696 "Restart script failed: {}",
1697 String::from_utf8_lossy(&output.stderr)
1698 );
1699 }
1700 }
1701 }
1702
1703 let clear_script = "./scripts/clear-ports.sh";
1705 if std::path::Path::new(clear_script).exists() {
1706 tracing::info!("Using clear-ports script as fallback");
1707
1708 let _ = Command::new("bash").arg(clear_script).output();
1709 }
1710
1711 Err(Error::generic(
1712 "No restart mechanism available. Please restart manually.".to_string(),
1713 ))
1714}
1715
1716pub async fn get_restart_status(
1718 State(state): State<AdminState>,
1719) -> Json<ApiResponse<RestartStatus>> {
1720 let status = state.get_restart_status().await;
1721 Json(ApiResponse::success(status))
1722}
1723
1724pub async fn get_audit_logs(
1726 Query(params): Query<std::collections::HashMap<String, String>>,
1727) -> Json<ApiResponse<Vec<crate::audit::AdminAuditLog>>> {
1728 use crate::audit::{get_global_audit_store, AdminActionType};
1729
1730 let action_type_str = params.get("action_type");
1731 let user_id = params.get("user_id").map(|s| s.as_str());
1732 let limit = params.get("limit").and_then(|s| s.parse::<usize>().ok());
1733 let offset = params.get("offset").and_then(|s| s.parse::<usize>().ok());
1734
1735 let action_type = action_type_str.and_then(|s| {
1737 match s.as_str() {
1739 "config_latency_updated" => Some(AdminActionType::ConfigLatencyUpdated),
1740 "config_faults_updated" => Some(AdminActionType::ConfigFaultsUpdated),
1741 "server_restarted" => Some(AdminActionType::ServerRestarted),
1742 "logs_cleared" => Some(AdminActionType::LogsCleared),
1743 _ => None,
1744 }
1745 });
1746
1747 if let Some(audit_store) = get_global_audit_store() {
1748 let logs = audit_store.get_logs(action_type, user_id, limit, offset).await;
1749 Json(ApiResponse::success(logs))
1750 } else {
1751 Json(ApiResponse::error("Audit logging not initialized".to_string()))
1752 }
1753}
1754
1755pub async fn get_audit_stats() -> Json<ApiResponse<crate::audit::AuditLogStats>> {
1757 use crate::audit::get_global_audit_store;
1758
1759 if let Some(audit_store) = get_global_audit_store() {
1760 let stats = audit_store.get_stats().await;
1761 Json(ApiResponse::success(stats))
1762 } else {
1763 Json(ApiResponse::error("Audit logging not initialized".to_string()))
1764 }
1765}
1766
1767pub async fn get_config(State(state): State<AdminState>) -> Json<ApiResponse<serde_json::Value>> {
1769 let config_state = state.get_config().await;
1770
1771 let config = json!({
1772 "latency": {
1773 "enabled": true,
1774 "base_ms": config_state.latency_profile.base_ms,
1775 "jitter_ms": config_state.latency_profile.jitter_ms,
1776 "tag_overrides": config_state.latency_profile.tag_overrides
1777 },
1778 "faults": {
1779 "enabled": config_state.fault_config.enabled,
1780 "failure_rate": config_state.fault_config.failure_rate,
1781 "status_codes": config_state.fault_config.status_codes
1782 },
1783 "proxy": {
1784 "enabled": config_state.proxy_config.enabled,
1785 "upstream_url": config_state.proxy_config.upstream_url,
1786 "timeout_seconds": config_state.proxy_config.timeout_seconds
1787 },
1788 "validation": {
1789 "mode": config_state.validation_settings.mode,
1790 "aggregate_errors": config_state.validation_settings.aggregate_errors,
1791 "validate_responses": config_state.validation_settings.validate_responses,
1792 "overrides": config_state.validation_settings.overrides
1793 }
1794 });
1795
1796 Json(ApiResponse::success(config))
1797}
1798
1799pub fn count_fixtures() -> Result<usize> {
1801 let fixtures_dir =
1803 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1804 let fixtures_path = std::path::Path::new(&fixtures_dir);
1805
1806 if !fixtures_path.exists() {
1807 return Ok(0);
1808 }
1809
1810 let mut total_count = 0;
1811
1812 let http_fixtures_path = fixtures_path.join("http");
1814 if http_fixtures_path.exists() {
1815 total_count += count_fixtures_in_directory(&http_fixtures_path)?;
1816 }
1817
1818 let ws_fixtures_path = fixtures_path.join("websocket");
1820 if ws_fixtures_path.exists() {
1821 total_count += count_fixtures_in_directory(&ws_fixtures_path)?;
1822 }
1823
1824 let grpc_fixtures_path = fixtures_path.join("grpc");
1826 if grpc_fixtures_path.exists() {
1827 total_count += count_fixtures_in_directory(&grpc_fixtures_path)?;
1828 }
1829
1830 Ok(total_count)
1831}
1832
1833fn count_fixtures_in_directory(dir_path: &std::path::Path) -> Result<usize> {
1835 let mut count = 0;
1836
1837 if let Ok(entries) = std::fs::read_dir(dir_path) {
1838 for entry in entries {
1839 let entry = entry
1840 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1841 let path = entry.path();
1842
1843 if path.is_dir() {
1844 count += count_fixtures_in_directory(&path)?;
1846 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1847 count += 1;
1849 }
1850 }
1851 }
1852
1853 Ok(count)
1854}
1855
1856pub fn route_has_fixtures(method: &str, path: &str) -> bool {
1858 let fixtures_dir =
1860 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1861 let fixtures_path = std::path::Path::new(&fixtures_dir);
1862
1863 if !fixtures_path.exists() {
1864 return false;
1865 }
1866
1867 let method_lower = method.to_lowercase();
1869 let path_hash = path.replace(['/', ':'], "_");
1870 let http_fixtures_path = fixtures_path.join("http").join(&method_lower).join(&path_hash);
1871
1872 if http_fixtures_path.exists() {
1873 if let Ok(entries) = std::fs::read_dir(&http_fixtures_path) {
1875 for entry in entries.flatten() {
1876 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1877 return true;
1878 }
1879 }
1880 }
1881 }
1882
1883 if method.to_uppercase() == "WS" {
1885 let ws_fixtures_path = fixtures_path.join("websocket").join(&path_hash);
1886
1887 if ws_fixtures_path.exists() {
1888 if let Ok(entries) = std::fs::read_dir(&ws_fixtures_path) {
1889 for entry in entries.flatten() {
1890 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1891 return true;
1892 }
1893 }
1894 }
1895 }
1896 }
1897
1898 false
1899}
1900
1901fn calculate_endpoint_latency(metrics: &RequestMetrics, endpoint: &str) -> Option<u64> {
1903 metrics.response_times_by_endpoint.get(endpoint).and_then(|times| {
1904 if times.is_empty() {
1905 None
1906 } else {
1907 let sum: u64 = times.iter().sum();
1908 Some(sum / times.len() as u64)
1909 }
1910 })
1911}
1912
1913fn get_endpoint_last_request(
1915 metrics: &RequestMetrics,
1916 endpoint: &str,
1917) -> Option<chrono::DateTime<chrono::Utc>> {
1918 metrics.last_request_by_endpoint.get(endpoint).copied()
1919}
1920
1921fn count_requests_by_server_type(metrics: &RequestMetrics, server_type: &str) -> u64 {
1923 match server_type {
1924 "HTTP" => {
1925 metrics
1927 .requests_by_endpoint
1928 .iter()
1929 .filter(|(endpoint, _)| {
1930 let method = endpoint.split(' ').next().unwrap_or("");
1931 matches!(
1932 method,
1933 "GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "HEAD" | "OPTIONS"
1934 )
1935 })
1936 .map(|(_, count)| count)
1937 .sum()
1938 }
1939 "WebSocket" => {
1940 metrics
1942 .requests_by_endpoint
1943 .iter()
1944 .filter(|(endpoint, _)| {
1945 let method = endpoint.split(' ').next().unwrap_or("");
1946 method == "WS"
1947 })
1948 .map(|(_, count)| count)
1949 .sum()
1950 }
1951 "gRPC" => {
1952 metrics
1954 .requests_by_endpoint
1955 .iter()
1956 .filter(|(endpoint, _)| {
1957 let method = endpoint.split(' ').next().unwrap_or("");
1958 method == "gRPC"
1959 })
1960 .map(|(_, count)| count)
1961 .sum()
1962 }
1963 _ => 0,
1964 }
1965}
1966
1967pub async fn get_fixtures() -> Json<ApiResponse<Vec<FixtureInfo>>> {
1969 match scan_fixtures_directory() {
1970 Ok(fixtures) => Json(ApiResponse::success(fixtures)),
1971 Err(e) => {
1972 tracing::error!("Failed to scan fixtures directory: {}", e);
1973 Json(ApiResponse::error(format!("Failed to load fixtures: {}", e)))
1974 }
1975 }
1976}
1977
1978fn scan_fixtures_directory() -> Result<Vec<FixtureInfo>> {
1980 let fixtures_dir =
1981 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1982 let fixtures_path = std::path::Path::new(&fixtures_dir);
1983
1984 if !fixtures_path.exists() {
1985 tracing::warn!("Fixtures directory does not exist: {}", fixtures_dir);
1986 return Ok(Vec::new());
1987 }
1988
1989 let mut all_fixtures = Vec::new();
1990
1991 let http_fixtures = scan_protocol_fixtures(fixtures_path, "http")?;
1993 all_fixtures.extend(http_fixtures);
1994
1995 let ws_fixtures = scan_protocol_fixtures(fixtures_path, "websocket")?;
1997 all_fixtures.extend(ws_fixtures);
1998
1999 let grpc_fixtures = scan_protocol_fixtures(fixtures_path, "grpc")?;
2001 all_fixtures.extend(grpc_fixtures);
2002
2003 all_fixtures.sort_by(|a, b| b.saved_at.cmp(&a.saved_at));
2005
2006 tracing::info!("Found {} fixtures in directory: {}", all_fixtures.len(), fixtures_dir);
2007 Ok(all_fixtures)
2008}
2009
2010fn scan_protocol_fixtures(
2012 fixtures_path: &std::path::Path,
2013 protocol: &str,
2014) -> Result<Vec<FixtureInfo>> {
2015 let protocol_path = fixtures_path.join(protocol);
2016 let mut fixtures = Vec::new();
2017
2018 if !protocol_path.exists() {
2019 return Ok(fixtures);
2020 }
2021
2022 if let Ok(entries) = std::fs::read_dir(&protocol_path) {
2024 for entry in entries {
2025 let entry = entry
2026 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2027 let path = entry.path();
2028
2029 if path.is_dir() {
2030 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2032 fixtures.extend(sub_fixtures);
2033 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2034 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2036 fixtures.push(fixture);
2037 }
2038 }
2039 }
2040 }
2041
2042 Ok(fixtures)
2043}
2044
2045fn scan_directory_recursive(
2047 dir_path: &std::path::Path,
2048 protocol: &str,
2049) -> Result<Vec<FixtureInfo>> {
2050 let mut fixtures = Vec::new();
2051
2052 if let Ok(entries) = std::fs::read_dir(dir_path) {
2053 for entry in entries {
2054 let entry = entry
2055 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2056 let path = entry.path();
2057
2058 if path.is_dir() {
2059 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2061 fixtures.extend(sub_fixtures);
2062 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2063 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2065 fixtures.push(fixture);
2066 }
2067 }
2068 }
2069 }
2070
2071 Ok(fixtures)
2072}
2073
2074fn parse_fixture_file_sync(file_path: &std::path::Path, protocol: &str) -> Result<FixtureInfo> {
2076 let metadata = std::fs::metadata(file_path)
2078 .map_err(|e| Error::generic(format!("Failed to read file metadata: {}", e)))?;
2079
2080 let file_size = metadata.len();
2081 let modified_time = metadata
2082 .modified()
2083 .map_err(|e| Error::generic(format!("Failed to get file modification time: {}", e)))?;
2084
2085 let saved_at = chrono::DateTime::from(modified_time);
2086
2087 let content = std::fs::read_to_string(file_path)
2089 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2090
2091 let fixture_data: serde_json::Value = serde_json::from_str(&content)
2092 .map_err(|e| Error::generic(format!("Failed to parse fixture JSON: {}", e)))?;
2093
2094 let (method, path) = extract_method_and_path(&fixture_data, protocol)?;
2096
2097 let id = generate_fixture_id(file_path, &content);
2099
2100 let fingerprint = extract_fingerprint(file_path, &fixture_data)?;
2102
2103 let fixtures_dir =
2105 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2106 let fixtures_path = std::path::Path::new(&fixtures_dir);
2107 let file_path_str = file_path
2108 .strip_prefix(fixtures_path)
2109 .unwrap_or(file_path)
2110 .to_string_lossy()
2111 .to_string();
2112
2113 Ok(FixtureInfo {
2114 id,
2115 protocol: protocol.to_string(),
2116 method,
2117 path,
2118 saved_at,
2119 file_size,
2120 file_path: file_path_str,
2121 fingerprint,
2122 metadata: fixture_data,
2123 })
2124}
2125
2126fn extract_method_and_path(
2128 fixture_data: &serde_json::Value,
2129 protocol: &str,
2130) -> Result<(String, String)> {
2131 match protocol {
2132 "http" => {
2133 let method = fixture_data
2135 .get("request")
2136 .and_then(|req| req.get("method"))
2137 .and_then(|m| m.as_str())
2138 .unwrap_or("UNKNOWN")
2139 .to_uppercase();
2140
2141 let path = fixture_data
2142 .get("request")
2143 .and_then(|req| req.get("path"))
2144 .and_then(|p| p.as_str())
2145 .unwrap_or("/unknown")
2146 .to_string();
2147
2148 Ok((method, path))
2149 }
2150 "websocket" => {
2151 let path = fixture_data
2153 .get("path")
2154 .and_then(|p| p.as_str())
2155 .or_else(|| {
2156 fixture_data
2157 .get("request")
2158 .and_then(|req| req.get("path"))
2159 .and_then(|p| p.as_str())
2160 })
2161 .unwrap_or("/ws")
2162 .to_string();
2163
2164 Ok(("WS".to_string(), path))
2165 }
2166 "grpc" => {
2167 let service =
2169 fixture_data.get("service").and_then(|s| s.as_str()).unwrap_or("UnknownService");
2170
2171 let method =
2172 fixture_data.get("method").and_then(|m| m.as_str()).unwrap_or("UnknownMethod");
2173
2174 let path = format!("/{}/{}", service, method);
2175 Ok(("gRPC".to_string(), path))
2176 }
2177 _ => {
2178 let path = fixture_data
2179 .get("path")
2180 .and_then(|p| p.as_str())
2181 .unwrap_or("/unknown")
2182 .to_string();
2183 Ok((protocol.to_uppercase(), path))
2184 }
2185 }
2186}
2187
2188fn generate_fixture_id(file_path: &std::path::Path, content: &str) -> String {
2190 use std::collections::hash_map::DefaultHasher;
2191 use std::hash::{Hash, Hasher};
2192
2193 let mut hasher = DefaultHasher::new();
2194 file_path.hash(&mut hasher);
2195 content.hash(&mut hasher);
2196 format!("fixture_{:x}", hasher.finish())
2197}
2198
2199fn extract_fingerprint(
2201 file_path: &std::path::Path,
2202 fixture_data: &serde_json::Value,
2203) -> Result<String> {
2204 if let Some(fingerprint) = fixture_data.get("fingerprint").and_then(|f| f.as_str()) {
2206 return Ok(fingerprint.to_string());
2207 }
2208
2209 if let Some(file_name) = file_path.file_stem().and_then(|s| s.to_str()) {
2211 if let Some(hash) = file_name.split('_').next_back() {
2213 if hash.len() >= 8 && hash.chars().all(|c| c.is_alphanumeric()) {
2214 return Ok(hash.to_string());
2215 }
2216 }
2217 }
2218
2219 use std::collections::hash_map::DefaultHasher;
2221 use std::hash::{Hash, Hasher};
2222
2223 let mut hasher = DefaultHasher::new();
2224 file_path.hash(&mut hasher);
2225 Ok(format!("{:x}", hasher.finish()))
2226}
2227
2228pub async fn delete_fixture(
2230 Json(payload): Json<FixtureDeleteRequest>,
2231) -> Json<ApiResponse<String>> {
2232 match delete_fixture_by_id(&payload.fixture_id).await {
2233 Ok(_) => {
2234 tracing::info!("Successfully deleted fixture: {}", payload.fixture_id);
2235 Json(ApiResponse::success("Fixture deleted successfully".to_string()))
2236 }
2237 Err(e) => {
2238 tracing::error!("Failed to delete fixture {}: {}", payload.fixture_id, e);
2239 Json(ApiResponse::error(format!("Failed to delete fixture: {}", e)))
2240 }
2241 }
2242}
2243
2244pub async fn delete_fixtures_bulk(
2246 Json(payload): Json<FixtureBulkDeleteRequest>,
2247) -> Json<ApiResponse<FixtureBulkDeleteResult>> {
2248 let mut deleted_count = 0;
2249 let mut errors = Vec::new();
2250
2251 for fixture_id in &payload.fixture_ids {
2252 match delete_fixture_by_id(fixture_id).await {
2253 Ok(_) => {
2254 deleted_count += 1;
2255 tracing::info!("Successfully deleted fixture: {}", fixture_id);
2256 }
2257 Err(e) => {
2258 errors.push(format!("Failed to delete {}: {}", fixture_id, e));
2259 tracing::error!("Failed to delete fixture {}: {}", fixture_id, e);
2260 }
2261 }
2262 }
2263
2264 let result = FixtureBulkDeleteResult {
2265 deleted_count,
2266 total_requested: payload.fixture_ids.len(),
2267 errors: errors.clone(),
2268 };
2269
2270 if errors.is_empty() {
2271 Json(ApiResponse::success(result))
2272 } else {
2273 Json(ApiResponse::error(format!(
2274 "Partial success: {} deleted, {} errors",
2275 deleted_count,
2276 errors.len()
2277 )))
2278 }
2279}
2280
2281async fn delete_fixture_by_id(fixture_id: &str) -> Result<()> {
2283 let fixtures_dir =
2286 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2287 let fixtures_path = std::path::Path::new(&fixtures_dir);
2288
2289 if !fixtures_path.exists() {
2290 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2291 }
2292
2293 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2295
2296 let file_path_clone = file_path.clone();
2298 tokio::task::spawn_blocking(move || {
2299 if file_path_clone.exists() {
2300 std::fs::remove_file(&file_path_clone).map_err(|e| {
2301 Error::generic(format!(
2302 "Failed to delete fixture file {}: {}",
2303 file_path_clone.display(),
2304 e
2305 ))
2306 })
2307 } else {
2308 Err(Error::generic(format!("Fixture file not found: {}", file_path_clone.display())))
2309 }
2310 })
2311 .await
2312 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2313
2314 tracing::info!("Deleted fixture file: {}", file_path.display());
2315
2316 cleanup_empty_directories(&file_path).await;
2318
2319 Ok(())
2320}
2321
2322fn find_fixture_file_by_id(
2324 fixtures_path: &std::path::Path,
2325 fixture_id: &str,
2326) -> Result<std::path::PathBuf> {
2327 let protocols = ["http", "websocket", "grpc"];
2329
2330 for protocol in &protocols {
2331 let protocol_path = fixtures_path.join(protocol);
2332 if let Ok(found_path) = search_fixture_in_directory(&protocol_path, fixture_id) {
2333 return Ok(found_path);
2334 }
2335 }
2336
2337 Err(Error::generic(format!(
2338 "Fixture with ID '{}' not found in any protocol directory",
2339 fixture_id
2340 )))
2341}
2342
2343fn search_fixture_in_directory(
2345 dir_path: &std::path::Path,
2346 fixture_id: &str,
2347) -> Result<std::path::PathBuf> {
2348 if !dir_path.exists() {
2349 return Err(Error::generic(format!("Directory does not exist: {}", dir_path.display())));
2350 }
2351
2352 if let Ok(entries) = std::fs::read_dir(dir_path) {
2353 for entry in entries {
2354 let entry = entry
2355 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2356 let path = entry.path();
2357
2358 if path.is_dir() {
2359 if let Ok(found_path) = search_fixture_in_directory(&path, fixture_id) {
2361 return Ok(found_path);
2362 }
2363 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2364 if let Ok(fixture_info) = parse_fixture_file_sync(&path, "unknown") {
2366 if fixture_info.id == fixture_id {
2367 return Ok(path);
2368 }
2369 }
2370 }
2371 }
2372 }
2373
2374 Err(Error::generic(format!(
2375 "Fixture not found in directory: {}",
2376 dir_path.display()
2377 )))
2378}
2379
2380async fn cleanup_empty_directories(file_path: &std::path::Path) {
2382 let file_path = file_path.to_path_buf();
2383
2384 let _ = tokio::task::spawn_blocking(move || {
2386 if let Some(parent) = file_path.parent() {
2387 let mut current = parent;
2389 let fixtures_dir =
2390 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2391 let fixtures_path = std::path::Path::new(&fixtures_dir);
2392
2393 while current != fixtures_path && current.parent().is_some() {
2394 if let Ok(entries) = std::fs::read_dir(current) {
2395 if entries.count() == 0 {
2396 if let Err(e) = std::fs::remove_dir(current) {
2397 tracing::debug!(
2398 "Failed to remove empty directory {}: {}",
2399 current.display(),
2400 e
2401 );
2402 break;
2403 } else {
2404 tracing::debug!("Removed empty directory: {}", current.display());
2405 }
2406 } else {
2407 break;
2408 }
2409 } else {
2410 break;
2411 }
2412
2413 if let Some(next_parent) = current.parent() {
2414 current = next_parent;
2415 } else {
2416 break;
2417 }
2418 }
2419 }
2420 })
2421 .await;
2422}
2423
2424pub async fn download_fixture(Query(params): Query<HashMap<String, String>>) -> impl IntoResponse {
2426 let fixture_id = match params.get("id") {
2428 Some(id) => id,
2429 None => {
2430 return (
2431 http::StatusCode::BAD_REQUEST,
2432 [(http::header::CONTENT_TYPE, "application/json")],
2433 r#"{"error": "Missing fixture ID parameter"}"#,
2434 )
2435 .into_response();
2436 }
2437 };
2438
2439 match download_fixture_by_id(fixture_id).await {
2441 Ok((content, file_name)) => (
2442 http::StatusCode::OK,
2443 [
2444 (http::header::CONTENT_TYPE, "application/json".to_string()),
2445 (
2446 http::header::CONTENT_DISPOSITION,
2447 format!("attachment; filename=\"{}\"", file_name),
2448 ),
2449 ],
2450 content,
2451 )
2452 .into_response(),
2453 Err(e) => {
2454 tracing::error!("Failed to download fixture {}: {}", fixture_id, e);
2455 let error_response = format!(r#"{{"error": "Failed to download fixture: {}"}}"#, e);
2456 (
2457 http::StatusCode::NOT_FOUND,
2458 [(http::header::CONTENT_TYPE, "application/json".to_string())],
2459 error_response,
2460 )
2461 .into_response()
2462 }
2463 }
2464}
2465
2466async fn download_fixture_by_id(fixture_id: &str) -> Result<(String, String)> {
2468 let fixtures_dir =
2470 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2471 let fixtures_path = std::path::Path::new(&fixtures_dir);
2472
2473 if !fixtures_path.exists() {
2474 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2475 }
2476
2477 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2478
2479 let file_path_clone = file_path.clone();
2481 let (content, file_name) = tokio::task::spawn_blocking(move || {
2482 let content = std::fs::read_to_string(&file_path_clone)
2483 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2484
2485 let file_name = file_path_clone
2486 .file_name()
2487 .and_then(|name| name.to_str())
2488 .unwrap_or("fixture.json")
2489 .to_string();
2490
2491 Ok::<_, Error>((content, file_name))
2492 })
2493 .await
2494 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2495
2496 tracing::info!("Downloaded fixture file: {} ({} bytes)", file_path.display(), content.len());
2497 Ok((content, file_name))
2498}
2499
2500pub async fn rename_fixture(
2502 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2503 Json(payload): Json<FixtureRenameRequest>,
2504) -> Json<ApiResponse<String>> {
2505 match rename_fixture_by_id(&fixture_id, &payload.new_name).await {
2506 Ok(new_path) => {
2507 tracing::info!("Successfully renamed fixture: {} -> {}", fixture_id, payload.new_name);
2508 Json(ApiResponse::success(format!("Fixture renamed successfully to: {}", new_path)))
2509 }
2510 Err(e) => {
2511 tracing::error!("Failed to rename fixture {}: {}", fixture_id, e);
2512 Json(ApiResponse::error(format!("Failed to rename fixture: {}", e)))
2513 }
2514 }
2515}
2516
2517async fn rename_fixture_by_id(fixture_id: &str, new_name: &str) -> Result<String> {
2519 if new_name.is_empty() {
2521 return Err(Error::generic("New name cannot be empty".to_string()));
2522 }
2523
2524 let new_name = if new_name.ends_with(".json") {
2526 new_name.to_string()
2527 } else {
2528 format!("{}.json", new_name)
2529 };
2530
2531 let fixtures_dir =
2533 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2534 let fixtures_path = std::path::Path::new(&fixtures_dir);
2535
2536 if !fixtures_path.exists() {
2537 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2538 }
2539
2540 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2541
2542 let parent = old_path
2544 .parent()
2545 .ok_or_else(|| Error::generic("Could not determine parent directory".to_string()))?;
2546
2547 let new_path = parent.join(&new_name);
2548
2549 if new_path.exists() {
2551 return Err(Error::generic(format!(
2552 "A fixture with name '{}' already exists in the same directory",
2553 new_name
2554 )));
2555 }
2556
2557 let old_path_clone = old_path.clone();
2559 let new_path_clone = new_path.clone();
2560 tokio::task::spawn_blocking(move || {
2561 std::fs::rename(&old_path_clone, &new_path_clone)
2562 .map_err(|e| Error::generic(format!("Failed to rename fixture file: {}", e)))
2563 })
2564 .await
2565 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2566
2567 tracing::info!("Renamed fixture file: {} -> {}", old_path.display(), new_path.display());
2568
2569 Ok(new_path
2571 .strip_prefix(fixtures_path)
2572 .unwrap_or(&new_path)
2573 .to_string_lossy()
2574 .to_string())
2575}
2576
2577pub async fn move_fixture(
2579 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2580 Json(payload): Json<FixtureMoveRequest>,
2581) -> Json<ApiResponse<String>> {
2582 match move_fixture_by_id(&fixture_id, &payload.new_path).await {
2583 Ok(new_location) => {
2584 tracing::info!("Successfully moved fixture: {} -> {}", fixture_id, payload.new_path);
2585 Json(ApiResponse::success(format!("Fixture moved successfully to: {}", new_location)))
2586 }
2587 Err(e) => {
2588 tracing::error!("Failed to move fixture {}: {}", fixture_id, e);
2589 Json(ApiResponse::error(format!("Failed to move fixture: {}", e)))
2590 }
2591 }
2592}
2593
2594async fn move_fixture_by_id(fixture_id: &str, new_path: &str) -> Result<String> {
2596 if new_path.is_empty() {
2598 return Err(Error::generic("New path cannot be empty".to_string()));
2599 }
2600
2601 let fixtures_dir =
2603 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2604 let fixtures_path = std::path::Path::new(&fixtures_dir);
2605
2606 if !fixtures_path.exists() {
2607 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2608 }
2609
2610 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2611
2612 let new_full_path = if new_path.starts_with('/') {
2614 fixtures_path.join(new_path.trim_start_matches('/'))
2616 } else {
2617 fixtures_path.join(new_path)
2619 };
2620
2621 let new_full_path = if new_full_path.extension().and_then(|s| s.to_str()) == Some("json") {
2623 new_full_path
2624 } else {
2625 if new_full_path.is_dir() || !new_path.contains('.') {
2627 let file_name = old_path.file_name().ok_or_else(|| {
2628 Error::generic("Could not determine original file name".to_string())
2629 })?;
2630 new_full_path.join(file_name)
2631 } else {
2632 new_full_path.with_extension("json")
2633 }
2634 };
2635
2636 if new_full_path.exists() {
2638 return Err(Error::generic(format!(
2639 "A fixture already exists at path: {}",
2640 new_full_path.display()
2641 )));
2642 }
2643
2644 let old_path_clone = old_path.clone();
2646 let new_full_path_clone = new_full_path.clone();
2647 tokio::task::spawn_blocking(move || {
2648 if let Some(parent) = new_full_path_clone.parent() {
2650 std::fs::create_dir_all(parent)
2651 .map_err(|e| Error::generic(format!("Failed to create target directory: {}", e)))?;
2652 }
2653
2654 std::fs::rename(&old_path_clone, &new_full_path_clone)
2656 .map_err(|e| Error::generic(format!("Failed to move fixture file: {}", e)))
2657 })
2658 .await
2659 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2660
2661 tracing::info!("Moved fixture file: {} -> {}", old_path.display(), new_full_path.display());
2662
2663 cleanup_empty_directories(&old_path).await;
2665
2666 Ok(new_full_path
2668 .strip_prefix(fixtures_path)
2669 .unwrap_or(&new_full_path)
2670 .to_string_lossy()
2671 .to_string())
2672}
2673
2674pub async fn get_validation(
2676 State(state): State<AdminState>,
2677) -> Json<ApiResponse<ValidationSettings>> {
2678 let config_state = state.get_config().await;
2680
2681 Json(ApiResponse::success(config_state.validation_settings))
2682}
2683
2684pub async fn update_validation(
2686 State(state): State<AdminState>,
2687 Json(update): Json<ValidationUpdate>,
2688) -> Json<ApiResponse<String>> {
2689 match update.mode.as_str() {
2691 "enforce" | "warn" | "off" => {}
2692 _ => {
2693 return Json(ApiResponse::error(
2694 "Invalid validation mode. Must be 'enforce', 'warn', or 'off'".to_string(),
2695 ))
2696 }
2697 }
2698
2699 let mode = update.mode.clone();
2701 state
2702 .update_validation_config(
2703 update.mode,
2704 update.aggregate_errors,
2705 update.validate_responses,
2706 update.overrides.unwrap_or_default(),
2707 )
2708 .await;
2709
2710 tracing::info!(
2711 "Updated validation settings: mode={}, aggregate_errors={}",
2712 mode,
2713 update.aggregate_errors
2714 );
2715
2716 Json(ApiResponse::success("Validation settings updated".to_string()))
2717}
2718
2719pub async fn get_env_vars() -> Json<ApiResponse<HashMap<String, String>>> {
2721 let mut env_vars = HashMap::new();
2723
2724 let relevant_vars = [
2725 "MOCKFORGE_LATENCY_ENABLED",
2727 "MOCKFORGE_FAILURES_ENABLED",
2728 "MOCKFORGE_PROXY_ENABLED",
2729 "MOCKFORGE_RECORD_ENABLED",
2730 "MOCKFORGE_REPLAY_ENABLED",
2731 "MOCKFORGE_LOG_LEVEL",
2732 "MOCKFORGE_CONFIG_FILE",
2733 "RUST_LOG",
2734 "MOCKFORGE_HTTP_PORT",
2736 "MOCKFORGE_HTTP_HOST",
2737 "MOCKFORGE_HTTP_OPENAPI_SPEC",
2738 "MOCKFORGE_CORS_ENABLED",
2739 "MOCKFORGE_REQUEST_TIMEOUT_SECS",
2740 "MOCKFORGE_WS_PORT",
2742 "MOCKFORGE_WS_HOST",
2743 "MOCKFORGE_WS_REPLAY_FILE",
2744 "MOCKFORGE_WS_CONNECTION_TIMEOUT_SECS",
2745 "MOCKFORGE_GRPC_PORT",
2747 "MOCKFORGE_GRPC_HOST",
2748 "MOCKFORGE_ADMIN_ENABLED",
2750 "MOCKFORGE_ADMIN_PORT",
2751 "MOCKFORGE_ADMIN_HOST",
2752 "MOCKFORGE_ADMIN_MOUNT_PATH",
2753 "MOCKFORGE_ADMIN_API_ENABLED",
2754 "MOCKFORGE_RESPONSE_TEMPLATE_EXPAND",
2756 "MOCKFORGE_REQUEST_VALIDATION",
2757 "MOCKFORGE_AGGREGATE_ERRORS",
2758 "MOCKFORGE_RESPONSE_VALIDATION",
2759 "MOCKFORGE_VALIDATION_STATUS",
2760 "MOCKFORGE_RAG_ENABLED",
2762 "MOCKFORGE_FAKE_TOKENS",
2763 "MOCKFORGE_FIXTURES_DIR",
2765 ];
2766
2767 for var_name in &relevant_vars {
2768 if let Ok(value) = std::env::var(var_name) {
2769 env_vars.insert(var_name.to_string(), value);
2770 }
2771 }
2772
2773 Json(ApiResponse::success(env_vars))
2774}
2775
2776pub async fn update_env_var(Json(update): Json<EnvVarUpdate>) -> Json<ApiResponse<String>> {
2778 std::env::set_var(&update.key, &update.value);
2780
2781 tracing::info!("Updated environment variable: {}={}", update.key, update.value);
2782
2783 Json(ApiResponse::success(format!(
2786 "Environment variable {} updated to '{}'. Note: This change is not persisted and will be lost on restart.",
2787 update.key, update.value
2788 )))
2789}
2790
2791pub async fn get_file_content(
2793 Json(request): Json<FileContentRequest>,
2794) -> Json<ApiResponse<String>> {
2795 if let Err(e) = validate_file_path(&request.file_path) {
2797 return Json(ApiResponse::error(format!("Invalid file path: {}", e)));
2798 }
2799
2800 match tokio::fs::read_to_string(&request.file_path).await {
2802 Ok(content) => {
2803 if let Err(e) = validate_file_content(&content) {
2805 return Json(ApiResponse::error(format!("Invalid file content: {}", e)));
2806 }
2807 Json(ApiResponse::success(content))
2808 }
2809 Err(e) => Json(ApiResponse::error(format!("Failed to read file: {}", e))),
2810 }
2811}
2812
2813pub async fn save_file_content(Json(request): Json<FileSaveRequest>) -> Json<ApiResponse<String>> {
2815 match save_file_to_filesystem(&request.file_path, &request.content).await {
2816 Ok(_) => {
2817 tracing::info!("Successfully saved file: {}", request.file_path);
2818 Json(ApiResponse::success("File saved successfully".to_string()))
2819 }
2820 Err(e) => {
2821 tracing::error!("Failed to save file {}: {}", request.file_path, e);
2822 Json(ApiResponse::error(format!("Failed to save file: {}", e)))
2823 }
2824 }
2825}
2826
2827async fn save_file_to_filesystem(file_path: &str, content: &str) -> Result<()> {
2829 validate_file_path(file_path)?;
2831
2832 validate_file_content(content)?;
2834
2835 let path = std::path::PathBuf::from(file_path);
2837 let content = content.to_string();
2838
2839 let path_clone = path.clone();
2841 let content_clone = content.clone();
2842 tokio::task::spawn_blocking(move || {
2843 if let Some(parent) = path_clone.parent() {
2845 std::fs::create_dir_all(parent).map_err(|e| {
2846 Error::generic(format!("Failed to create directory {}: {}", parent.display(), e))
2847 })?;
2848 }
2849
2850 std::fs::write(&path_clone, &content_clone).map_err(|e| {
2852 Error::generic(format!("Failed to write file {}: {}", path_clone.display(), e))
2853 })?;
2854
2855 let written_content = std::fs::read_to_string(&path_clone).map_err(|e| {
2857 Error::generic(format!("Failed to verify written file {}: {}", path_clone.display(), e))
2858 })?;
2859
2860 if written_content != content_clone {
2861 return Err(Error::generic(format!(
2862 "File content verification failed for {}",
2863 path_clone.display()
2864 )));
2865 }
2866
2867 Ok::<_, Error>(())
2868 })
2869 .await
2870 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2871
2872 tracing::info!("File saved successfully: {} ({} bytes)", path.display(), content.len());
2873 Ok(())
2874}
2875
2876fn validate_file_path(file_path: &str) -> Result<()> {
2878 if file_path.contains("..") {
2880 return Err(Error::generic("Path traversal detected in file path".to_string()));
2881 }
2882
2883 let path = std::path::Path::new(file_path);
2885 if path.is_absolute() {
2886 let allowed_dirs = [
2888 std::env::current_dir().unwrap_or_default(),
2889 std::path::PathBuf::from("."),
2890 std::path::PathBuf::from("fixtures"),
2891 std::path::PathBuf::from("config"),
2892 ];
2893
2894 let mut is_allowed = false;
2895 for allowed_dir in &allowed_dirs {
2896 if path.starts_with(allowed_dir) {
2897 is_allowed = true;
2898 break;
2899 }
2900 }
2901
2902 if !is_allowed {
2903 return Err(Error::generic("File path is outside allowed directories".to_string()));
2904 }
2905 }
2906
2907 let dangerous_extensions = ["exe", "bat", "cmd", "sh", "ps1", "scr", "com"];
2909 if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
2910 if dangerous_extensions.contains(&extension.to_lowercase().as_str()) {
2911 return Err(Error::generic(format!(
2912 "Dangerous file extension not allowed: {}",
2913 extension
2914 )));
2915 }
2916 }
2917
2918 Ok(())
2919}
2920
2921fn validate_file_content(content: &str) -> Result<()> {
2923 if content.len() > 10 * 1024 * 1024 {
2925 return Err(Error::generic("File content too large (max 10MB)".to_string()));
2927 }
2928
2929 if content.contains('\0') {
2931 return Err(Error::generic("File content contains null bytes".to_string()));
2932 }
2933
2934 Ok(())
2935}
2936
2937#[derive(Debug, Clone, Serialize, Deserialize)]
2939pub struct FixtureDeleteRequest {
2940 pub fixture_id: String,
2941}
2942
2943#[derive(Debug, Clone, Serialize, Deserialize)]
2945pub struct EnvVarUpdate {
2946 pub key: String,
2947 pub value: String,
2948}
2949
2950#[derive(Debug, Clone, Serialize, Deserialize)]
2952pub struct FixtureBulkDeleteRequest {
2953 pub fixture_ids: Vec<String>,
2954}
2955
2956#[derive(Debug, Clone, Serialize, Deserialize)]
2958pub struct FixtureBulkDeleteResult {
2959 pub deleted_count: usize,
2960 pub total_requested: usize,
2961 pub errors: Vec<String>,
2962}
2963
2964#[derive(Debug, Clone, Serialize, Deserialize)]
2966pub struct FixtureRenameRequest {
2967 pub new_name: String,
2968}
2969
2970#[derive(Debug, Clone, Serialize, Deserialize)]
2972pub struct FixtureMoveRequest {
2973 pub new_path: String,
2974}
2975
2976#[derive(Debug, Clone, Serialize, Deserialize)]
2978pub struct FileContentRequest {
2979 pub file_path: String,
2980 pub file_type: String,
2981}
2982
2983#[derive(Debug, Clone, Serialize, Deserialize)]
2985pub struct FileSaveRequest {
2986 pub file_path: String,
2987 pub content: String,
2988}
2989
2990pub async fn get_smoke_tests(
2992 State(state): State<AdminState>,
2993) -> Json<ApiResponse<Vec<SmokeTestResult>>> {
2994 let results = state.get_smoke_test_results().await;
2995 Json(ApiResponse::success(results))
2996}
2997
2998pub async fn run_smoke_tests_endpoint(
3000 State(state): State<AdminState>,
3001) -> Json<ApiResponse<String>> {
3002 tracing::info!("Starting smoke test execution");
3003
3004 let state_clone = state.clone();
3006 tokio::spawn(async move {
3007 if let Err(e) = execute_smoke_tests(&state_clone).await {
3008 tracing::error!("Smoke test execution failed: {}", e);
3009 } else {
3010 tracing::info!("Smoke test execution completed successfully");
3011 }
3012 });
3013
3014 Json(ApiResponse::success(
3015 "Smoke tests started. Check results in the smoke tests section.".to_string(),
3016 ))
3017}
3018
3019async fn execute_smoke_tests(state: &AdminState) -> Result<()> {
3021 let base_url =
3023 std::env::var("MOCKFORGE_BASE_URL").unwrap_or_else(|_| "http://localhost:3000".to_string());
3024
3025 let context = SmokeTestContext {
3026 base_url,
3027 timeout_seconds: 30,
3028 parallel: true,
3029 };
3030
3031 let fixtures = scan_fixtures_directory()?;
3033
3034 let http_fixtures: Vec<&FixtureInfo> =
3036 fixtures.iter().filter(|f| f.protocol == "http").collect();
3037
3038 if http_fixtures.is_empty() {
3039 tracing::warn!("No HTTP fixtures found for smoke testing");
3040 return Ok(());
3041 }
3042
3043 tracing::info!("Running smoke tests for {} HTTP fixtures", http_fixtures.len());
3044
3045 let mut test_results = Vec::new();
3047
3048 for fixture in http_fixtures {
3049 let test_result = create_smoke_test_from_fixture(fixture);
3050 test_results.push(test_result);
3051 }
3052
3053 let mut executed_results = Vec::new();
3055 for mut test_result in test_results {
3056 test_result.status = "running".to_string();
3058 state.update_smoke_test_result(test_result.clone()).await;
3059
3060 let start_time = std::time::Instant::now();
3062 match execute_single_smoke_test(&test_result, &context).await {
3063 Ok((status_code, response_time_ms)) => {
3064 test_result.status = "passed".to_string();
3065 test_result.status_code = Some(status_code);
3066 test_result.response_time_ms = Some(response_time_ms);
3067 test_result.error_message = None;
3068 }
3069 Err(e) => {
3070 test_result.status = "failed".to_string();
3071 test_result.error_message = Some(e.to_string());
3072 test_result.status_code = None;
3073 test_result.response_time_ms = None;
3074 }
3075 }
3076
3077 let duration = start_time.elapsed();
3078 test_result.duration_seconds = Some(duration.as_secs_f64());
3079 test_result.last_run = Some(chrono::Utc::now());
3080
3081 executed_results.push(test_result.clone());
3082 state.update_smoke_test_result(test_result).await;
3083 }
3084
3085 tracing::info!("Smoke test execution completed: {} tests run", executed_results.len());
3086 Ok(())
3087}
3088
3089fn create_smoke_test_from_fixture(fixture: &FixtureInfo) -> SmokeTestResult {
3091 let test_name = format!("{} {}", fixture.method, fixture.path);
3092 let description = format!("Smoke test for {} endpoint", fixture.path);
3093
3094 SmokeTestResult {
3095 id: format!("smoke_{}", fixture.id),
3096 name: test_name,
3097 method: fixture.method.clone(),
3098 path: fixture.path.clone(),
3099 description,
3100 last_run: None,
3101 status: "pending".to_string(),
3102 response_time_ms: None,
3103 error_message: None,
3104 status_code: None,
3105 duration_seconds: None,
3106 }
3107}
3108
3109async fn execute_single_smoke_test(
3111 test: &SmokeTestResult,
3112 context: &SmokeTestContext,
3113) -> Result<(u16, u64)> {
3114 let url = format!("{}{}", context.base_url, test.path);
3115 let client = reqwest::Client::builder()
3116 .timeout(std::time::Duration::from_secs(context.timeout_seconds))
3117 .build()
3118 .map_err(|e| Error::generic(format!("Failed to create HTTP client: {}", e)))?;
3119
3120 let start_time = std::time::Instant::now();
3121
3122 let response = match test.method.as_str() {
3123 "GET" => client.get(&url).send().await,
3124 "POST" => client.post(&url).send().await,
3125 "PUT" => client.put(&url).send().await,
3126 "DELETE" => client.delete(&url).send().await,
3127 "PATCH" => client.patch(&url).send().await,
3128 "HEAD" => client.head(&url).send().await,
3129 "OPTIONS" => client.request(reqwest::Method::OPTIONS, &url).send().await,
3130 _ => {
3131 return Err(Error::generic(format!("Unsupported HTTP method: {}", test.method)));
3132 }
3133 };
3134
3135 let response_time = start_time.elapsed();
3136 let response_time_ms = response_time.as_millis() as u64;
3137
3138 match response {
3139 Ok(resp) => {
3140 let status_code = resp.status().as_u16();
3141 if (200..400).contains(&status_code) {
3142 Ok((status_code, response_time_ms))
3143 } else {
3144 Err(Error::generic(format!(
3145 "HTTP error: {} {}",
3146 status_code,
3147 resp.status().canonical_reason().unwrap_or("Unknown")
3148 )))
3149 }
3150 }
3151 Err(e) => Err(Error::generic(format!("Request failed: {}", e))),
3152 }
3153}
3154
3155pub async fn install_plugin(Json(request): Json<serde_json::Value>) -> impl IntoResponse {
3157 let source = request.get("source").and_then(|s| s.as_str()).unwrap_or("");
3159
3160 if source.is_empty() {
3161 return Json(json!({
3162 "success": false,
3163 "error": "Plugin source is required"
3164 }));
3165 }
3166
3167 let plugin_path = if source.starts_with("http://") || source.starts_with("https://") {
3169 match download_plugin_from_url(source).await {
3171 Ok(temp_path) => temp_path,
3172 Err(e) => {
3173 return Json(json!({
3174 "success": false,
3175 "error": format!("Failed to download plugin: {}", e)
3176 }))
3177 }
3178 }
3179 } else {
3180 std::path::PathBuf::from(source)
3182 };
3183
3184 if !plugin_path.exists() {
3186 return Json(json!({
3187 "success": false,
3188 "error": format!("Plugin file not found: {}", source)
3189 }));
3190 }
3191
3192 Json(json!({
3194 "success": true,
3195 "message": format!("Plugin would be installed from: {}", source)
3196 }))
3197}
3198
3199async fn download_plugin_from_url(url: &str) -> Result<std::path::PathBuf> {
3201 let temp_file =
3203 std::env::temp_dir().join(format!("plugin_{}.tmp", chrono::Utc::now().timestamp()));
3204 let temp_path = temp_file.clone();
3205
3206 let response = reqwest::get(url)
3208 .await
3209 .map_err(|e| Error::generic(format!("Failed to download from URL: {}", e)))?;
3210
3211 if !response.status().is_success() {
3212 return Err(Error::generic(format!(
3213 "HTTP error {}: {}",
3214 response.status().as_u16(),
3215 response.status().canonical_reason().unwrap_or("Unknown")
3216 )));
3217 }
3218
3219 let bytes = response
3221 .bytes()
3222 .await
3223 .map_err(|e| Error::generic(format!("Failed to read response: {}", e)))?;
3224
3225 tokio::fs::write(&temp_file, &bytes)
3227 .await
3228 .map_err(|e| Error::generic(format!("Failed to write temporary file: {}", e)))?;
3229
3230 Ok(temp_path)
3231}
3232
3233pub async fn serve_icon() -> impl IntoResponse {
3234 ([(http::header::CONTENT_TYPE, "image/png")], "")
3236}
3237
3238pub async fn serve_icon_32() -> impl IntoResponse {
3239 ([(http::header::CONTENT_TYPE, "image/png")], "")
3240}
3241
3242pub async fn serve_icon_48() -> impl IntoResponse {
3243 ([(http::header::CONTENT_TYPE, "image/png")], "")
3244}
3245
3246pub async fn serve_logo() -> impl IntoResponse {
3247 ([(http::header::CONTENT_TYPE, "image/png")], "")
3248}
3249
3250pub async fn serve_logo_40() -> impl IntoResponse {
3251 ([(http::header::CONTENT_TYPE, "image/png")], "")
3252}
3253
3254pub async fn serve_logo_80() -> impl IntoResponse {
3255 ([(http::header::CONTENT_TYPE, "image/png")], "")
3256}
3257
3258pub async fn update_traffic_shaping(
3260 State(_state): State<AdminState>,
3261 Json(_config): Json<serde_json::Value>,
3262) -> Json<ApiResponse<String>> {
3263 Json(ApiResponse::success("Traffic shaping updated".to_string()))
3264}
3265
3266pub async fn import_postman(
3267 State(state): State<AdminState>,
3268 Json(request): Json<serde_json::Value>,
3269) -> Json<ApiResponse<String>> {
3270 use mockforge_core::workspace_import::{import_postman_to_workspace, WorkspaceImportConfig};
3271 use uuid::Uuid;
3272
3273 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3274 let filename = request.get("filename").and_then(|v| v.as_str());
3275 let environment = request.get("environment").and_then(|v| v.as_str());
3276 let base_url = request.get("base_url").and_then(|v| v.as_str());
3277
3278 let import_result = match mockforge_core::import::import_postman_collection(content, base_url) {
3280 Ok(result) => result,
3281 Err(e) => {
3282 let entry = ImportHistoryEntry {
3284 id: Uuid::new_v4().to_string(),
3285 format: "postman".to_string(),
3286 timestamp: chrono::Utc::now(),
3287 routes_count: 0,
3288 variables_count: 0,
3289 warnings_count: 0,
3290 success: false,
3291 filename: filename.map(|s| s.to_string()),
3292 environment: environment.map(|s| s.to_string()),
3293 base_url: base_url.map(|s| s.to_string()),
3294 error_message: Some(e.clone()),
3295 };
3296 let mut history = state.import_history.write().await;
3297 history.push(entry);
3298
3299 return Json(ApiResponse::error(format!("Postman import failed: {}", e)));
3300 }
3301 };
3302
3303 let workspace_name = filename
3305 .and_then(|f| f.split('.').next())
3306 .unwrap_or("Imported Postman Collection");
3307
3308 let config = WorkspaceImportConfig {
3309 create_folders: true,
3310 base_folder_name: None,
3311 preserve_hierarchy: true,
3312 max_depth: 5,
3313 };
3314
3315 let routes: Vec<ImportRoute> = import_result
3317 .routes
3318 .into_iter()
3319 .map(|route| ImportRoute {
3320 method: route.method,
3321 path: route.path,
3322 headers: route.headers,
3323 body: route.body,
3324 response: ImportResponse {
3325 status: route.response.status,
3326 headers: route.response.headers,
3327 body: route.response.body,
3328 },
3329 })
3330 .collect();
3331
3332 match import_postman_to_workspace(routes, workspace_name.to_string(), config) {
3333 Ok(workspace_result) => {
3334 if let Err(e) =
3336 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3337 {
3338 tracing::error!("Failed to save workspace: {}", e);
3339 return Json(ApiResponse::error(format!(
3340 "Import succeeded but failed to save workspace: {}",
3341 e
3342 )));
3343 }
3344
3345 let entry = ImportHistoryEntry {
3347 id: Uuid::new_v4().to_string(),
3348 format: "postman".to_string(),
3349 timestamp: chrono::Utc::now(),
3350 routes_count: workspace_result.request_count,
3351 variables_count: import_result.variables.len(),
3352 warnings_count: workspace_result.warnings.len(),
3353 success: true,
3354 filename: filename.map(|s| s.to_string()),
3355 environment: environment.map(|s| s.to_string()),
3356 base_url: base_url.map(|s| s.to_string()),
3357 error_message: None,
3358 };
3359 let mut history = state.import_history.write().await;
3360 history.push(entry);
3361
3362 Json(ApiResponse::success(format!(
3363 "Successfully imported {} routes into workspace '{}'",
3364 workspace_result.request_count, workspace_name
3365 )))
3366 }
3367 Err(e) => {
3368 let entry = ImportHistoryEntry {
3370 id: Uuid::new_v4().to_string(),
3371 format: "postman".to_string(),
3372 timestamp: chrono::Utc::now(),
3373 routes_count: 0,
3374 variables_count: 0,
3375 warnings_count: 0,
3376 success: false,
3377 filename: filename.map(|s| s.to_string()),
3378 environment: environment.map(|s| s.to_string()),
3379 base_url: base_url.map(|s| s.to_string()),
3380 error_message: Some(e.to_string()),
3381 };
3382 let mut history = state.import_history.write().await;
3383 history.push(entry);
3384
3385 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3386 }
3387 }
3388}
3389
3390pub async fn import_insomnia(
3391 State(state): State<AdminState>,
3392 Json(request): Json<serde_json::Value>,
3393) -> Json<ApiResponse<String>> {
3394 use uuid::Uuid;
3395
3396 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3397 let filename = request.get("filename").and_then(|v| v.as_str());
3398 let environment = request.get("environment").and_then(|v| v.as_str());
3399 let base_url = request.get("base_url").and_then(|v| v.as_str());
3400
3401 let import_result = match mockforge_core::import::import_insomnia_export(content, environment) {
3403 Ok(result) => result,
3404 Err(e) => {
3405 let entry = ImportHistoryEntry {
3407 id: Uuid::new_v4().to_string(),
3408 format: "insomnia".to_string(),
3409 timestamp: chrono::Utc::now(),
3410 routes_count: 0,
3411 variables_count: 0,
3412 warnings_count: 0,
3413 success: false,
3414 filename: filename.map(|s| s.to_string()),
3415 environment: environment.map(|s| s.to_string()),
3416 base_url: base_url.map(|s| s.to_string()),
3417 error_message: Some(e.clone()),
3418 };
3419 let mut history = state.import_history.write().await;
3420 history.push(entry);
3421
3422 return Json(ApiResponse::error(format!("Insomnia import failed: {}", e)));
3423 }
3424 };
3425
3426 let workspace_name = filename
3428 .and_then(|f| f.split('.').next())
3429 .unwrap_or("Imported Insomnia Collection");
3430
3431 let _config = WorkspaceImportConfig {
3432 create_folders: true,
3433 base_folder_name: None,
3434 preserve_hierarchy: true,
3435 max_depth: 5,
3436 };
3437
3438 let variables_count = import_result.variables.len();
3440
3441 match mockforge_core::workspace_import::create_workspace_from_insomnia(
3442 import_result,
3443 Some(workspace_name.to_string()),
3444 ) {
3445 Ok(workspace_result) => {
3446 if let Err(e) =
3448 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3449 {
3450 tracing::error!("Failed to save workspace: {}", e);
3451 return Json(ApiResponse::error(format!(
3452 "Import succeeded but failed to save workspace: {}",
3453 e
3454 )));
3455 }
3456
3457 let entry = ImportHistoryEntry {
3459 id: Uuid::new_v4().to_string(),
3460 format: "insomnia".to_string(),
3461 timestamp: chrono::Utc::now(),
3462 routes_count: workspace_result.request_count,
3463 variables_count,
3464 warnings_count: workspace_result.warnings.len(),
3465 success: true,
3466 filename: filename.map(|s| s.to_string()),
3467 environment: environment.map(|s| s.to_string()),
3468 base_url: base_url.map(|s| s.to_string()),
3469 error_message: None,
3470 };
3471 let mut history = state.import_history.write().await;
3472 history.push(entry);
3473
3474 Json(ApiResponse::success(format!(
3475 "Successfully imported {} routes into workspace '{}'",
3476 workspace_result.request_count, workspace_name
3477 )))
3478 }
3479 Err(e) => {
3480 let entry = ImportHistoryEntry {
3482 id: Uuid::new_v4().to_string(),
3483 format: "insomnia".to_string(),
3484 timestamp: chrono::Utc::now(),
3485 routes_count: 0,
3486 variables_count: 0,
3487 warnings_count: 0,
3488 success: false,
3489 filename: filename.map(|s| s.to_string()),
3490 environment: environment.map(|s| s.to_string()),
3491 base_url: base_url.map(|s| s.to_string()),
3492 error_message: Some(e.to_string()),
3493 };
3494 let mut history = state.import_history.write().await;
3495 history.push(entry);
3496
3497 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3498 }
3499 }
3500}
3501
3502pub async fn import_openapi(
3503 State(_state): State<AdminState>,
3504 Json(_request): Json<serde_json::Value>,
3505) -> Json<ApiResponse<String>> {
3506 Json(ApiResponse::success("OpenAPI import completed".to_string()))
3507}
3508
3509pub async fn import_curl(
3510 State(state): State<AdminState>,
3511 Json(request): Json<serde_json::Value>,
3512) -> Json<ApiResponse<String>> {
3513 use uuid::Uuid;
3514
3515 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3516 let filename = request.get("filename").and_then(|v| v.as_str());
3517 let base_url = request.get("base_url").and_then(|v| v.as_str());
3518
3519 let import_result = match mockforge_core::import::import_curl_commands(content, base_url) {
3521 Ok(result) => result,
3522 Err(e) => {
3523 let entry = ImportHistoryEntry {
3525 id: Uuid::new_v4().to_string(),
3526 format: "curl".to_string(),
3527 timestamp: chrono::Utc::now(),
3528 routes_count: 0,
3529 variables_count: 0,
3530 warnings_count: 0,
3531 success: false,
3532 filename: filename.map(|s| s.to_string()),
3533 environment: None,
3534 base_url: base_url.map(|s| s.to_string()),
3535 error_message: Some(e.clone()),
3536 };
3537 let mut history = state.import_history.write().await;
3538 history.push(entry);
3539
3540 return Json(ApiResponse::error(format!("Curl import failed: {}", e)));
3541 }
3542 };
3543
3544 let workspace_name =
3546 filename.and_then(|f| f.split('.').next()).unwrap_or("Imported Curl Commands");
3547
3548 match mockforge_core::workspace_import::create_workspace_from_curl(
3549 import_result,
3550 Some(workspace_name.to_string()),
3551 ) {
3552 Ok(workspace_result) => {
3553 if let Err(e) =
3555 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3556 {
3557 tracing::error!("Failed to save workspace: {}", e);
3558 return Json(ApiResponse::error(format!(
3559 "Import succeeded but failed to save workspace: {}",
3560 e
3561 )));
3562 }
3563
3564 let entry = ImportHistoryEntry {
3566 id: Uuid::new_v4().to_string(),
3567 format: "curl".to_string(),
3568 timestamp: chrono::Utc::now(),
3569 routes_count: workspace_result.request_count,
3570 variables_count: 0, warnings_count: workspace_result.warnings.len(),
3572 success: true,
3573 filename: filename.map(|s| s.to_string()),
3574 environment: None,
3575 base_url: base_url.map(|s| s.to_string()),
3576 error_message: None,
3577 };
3578 let mut history = state.import_history.write().await;
3579 history.push(entry);
3580
3581 Json(ApiResponse::success(format!(
3582 "Successfully imported {} routes into workspace '{}'",
3583 workspace_result.request_count, workspace_name
3584 )))
3585 }
3586 Err(e) => {
3587 let entry = ImportHistoryEntry {
3589 id: Uuid::new_v4().to_string(),
3590 format: "curl".to_string(),
3591 timestamp: chrono::Utc::now(),
3592 routes_count: 0,
3593 variables_count: 0,
3594 warnings_count: 0,
3595 success: false,
3596 filename: filename.map(|s| s.to_string()),
3597 environment: None,
3598 base_url: base_url.map(|s| s.to_string()),
3599 error_message: Some(e.to_string()),
3600 };
3601 let mut history = state.import_history.write().await;
3602 history.push(entry);
3603
3604 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3605 }
3606 }
3607}
3608
3609pub async fn preview_import(
3610 State(_state): State<AdminState>,
3611 Json(request): Json<serde_json::Value>,
3612) -> Json<ApiResponse<serde_json::Value>> {
3613 use mockforge_core::import::{
3614 import_curl_commands, import_insomnia_export, import_postman_collection,
3615 };
3616
3617 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3618 let filename = request.get("filename").and_then(|v| v.as_str());
3619 let environment = request.get("environment").and_then(|v| v.as_str());
3620 let base_url = request.get("base_url").and_then(|v| v.as_str());
3621
3622 let format = if let Some(fname) = filename {
3624 if fname.to_lowercase().contains("postman")
3625 || fname.to_lowercase().ends_with(".postman_collection")
3626 {
3627 "postman"
3628 } else if fname.to_lowercase().contains("insomnia")
3629 || fname.to_lowercase().ends_with(".insomnia")
3630 {
3631 "insomnia"
3632 } else if fname.to_lowercase().contains("curl")
3633 || fname.to_lowercase().ends_with(".sh")
3634 || fname.to_lowercase().ends_with(".curl")
3635 {
3636 "curl"
3637 } else {
3638 "unknown"
3639 }
3640 } else {
3641 "unknown"
3642 };
3643
3644 match format {
3645 "postman" => match import_postman_collection(content, base_url) {
3646 Ok(import_result) => {
3647 let routes: Vec<serde_json::Value> = import_result
3648 .routes
3649 .into_iter()
3650 .map(|route| {
3651 serde_json::json!({
3652 "method": route.method,
3653 "path": route.path,
3654 "headers": route.headers,
3655 "body": route.body,
3656 "status_code": route.response.status,
3657 "response": serde_json::json!({
3658 "status": route.response.status,
3659 "headers": route.response.headers,
3660 "body": route.response.body
3661 })
3662 })
3663 })
3664 .collect();
3665
3666 let response = serde_json::json!({
3667 "routes": routes,
3668 "variables": import_result.variables,
3669 "warnings": import_result.warnings
3670 });
3671
3672 Json(ApiResponse::success(response))
3673 }
3674 Err(e) => Json(ApiResponse::error(format!("Postman import failed: {}", e))),
3675 },
3676 "insomnia" => match import_insomnia_export(content, environment) {
3677 Ok(import_result) => {
3678 let routes: Vec<serde_json::Value> = import_result
3679 .routes
3680 .into_iter()
3681 .map(|route| {
3682 serde_json::json!({
3683 "method": route.method,
3684 "path": route.path,
3685 "headers": route.headers,
3686 "body": route.body,
3687 "status_code": route.response.status,
3688 "response": serde_json::json!({
3689 "status": route.response.status,
3690 "headers": route.response.headers,
3691 "body": route.response.body
3692 })
3693 })
3694 })
3695 .collect();
3696
3697 let response = serde_json::json!({
3698 "routes": routes,
3699 "variables": import_result.variables,
3700 "warnings": import_result.warnings
3701 });
3702
3703 Json(ApiResponse::success(response))
3704 }
3705 Err(e) => Json(ApiResponse::error(format!("Insomnia import failed: {}", e))),
3706 },
3707 "curl" => match import_curl_commands(content, base_url) {
3708 Ok(import_result) => {
3709 let routes: Vec<serde_json::Value> = import_result
3710 .routes
3711 .into_iter()
3712 .map(|route| {
3713 serde_json::json!({
3714 "method": route.method,
3715 "path": route.path,
3716 "headers": route.headers,
3717 "body": route.body,
3718 "status_code": route.response.status,
3719 "response": serde_json::json!({
3720 "status": route.response.status,
3721 "headers": route.response.headers,
3722 "body": route.response.body
3723 })
3724 })
3725 })
3726 .collect();
3727
3728 let response = serde_json::json!({
3729 "routes": routes,
3730 "variables": serde_json::json!({}),
3731 "warnings": import_result.warnings
3732 });
3733
3734 Json(ApiResponse::success(response))
3735 }
3736 Err(e) => Json(ApiResponse::error(format!("Curl import failed: {}", e))),
3737 },
3738 _ => Json(ApiResponse::error("Unsupported import format".to_string())),
3739 }
3740}
3741
3742pub async fn get_import_history(
3743 State(state): State<AdminState>,
3744) -> Json<ApiResponse<serde_json::Value>> {
3745 let history = state.import_history.read().await;
3746 let total = history.len();
3747
3748 let imports: Vec<serde_json::Value> = history
3749 .iter()
3750 .rev()
3751 .take(50)
3752 .map(|entry| {
3753 serde_json::json!({
3754 "id": entry.id,
3755 "format": entry.format,
3756 "timestamp": entry.timestamp.to_rfc3339(),
3757 "routes_count": entry.routes_count,
3758 "variables_count": entry.variables_count,
3759 "warnings_count": entry.warnings_count,
3760 "success": entry.success,
3761 "filename": entry.filename,
3762 "environment": entry.environment,
3763 "base_url": entry.base_url,
3764 "error_message": entry.error_message
3765 })
3766 })
3767 .collect();
3768
3769 let response = serde_json::json!({
3770 "imports": imports,
3771 "total": total
3772 });
3773
3774 Json(ApiResponse::success(response))
3775}
3776
3777pub async fn get_admin_api_state(
3778 State(_state): State<AdminState>,
3779) -> Json<ApiResponse<serde_json::Value>> {
3780 Json(ApiResponse::success(serde_json::json!({
3781 "status": "active"
3782 })))
3783}
3784
3785pub async fn get_admin_api_replay(
3786 State(_state): State<AdminState>,
3787) -> Json<ApiResponse<serde_json::Value>> {
3788 Json(ApiResponse::success(serde_json::json!({
3789 "replay": []
3790 })))
3791}
3792
3793pub async fn get_sse_status(
3794 State(_state): State<AdminState>,
3795) -> Json<ApiResponse<serde_json::Value>> {
3796 Json(ApiResponse::success(serde_json::json!({
3797 "available": true,
3798 "endpoint": "/sse",
3799 "config": {
3800 "event_type": "status",
3801 "interval_ms": 1000,
3802 "data_template": "{}"
3803 }
3804 })))
3805}
3806
3807pub async fn get_sse_connections(
3808 State(_state): State<AdminState>,
3809) -> Json<ApiResponse<serde_json::Value>> {
3810 Json(ApiResponse::success(serde_json::json!({
3811 "active_connections": 0
3812 })))
3813}
3814
3815pub async fn get_workspaces(
3817 State(_state): State<AdminState>,
3818) -> Json<ApiResponse<Vec<serde_json::Value>>> {
3819 Json(ApiResponse::success(vec![]))
3820}
3821
3822pub async fn create_workspace(
3823 State(_state): State<AdminState>,
3824 Json(_request): Json<serde_json::Value>,
3825) -> Json<ApiResponse<String>> {
3826 Json(ApiResponse::success("Workspace created".to_string()))
3827}
3828
3829pub async fn open_workspace_from_directory(
3830 State(_state): State<AdminState>,
3831 Json(_request): Json<serde_json::Value>,
3832) -> Json<ApiResponse<String>> {
3833 Json(ApiResponse::success("Workspace opened from directory".to_string()))
3834}
3835
3836pub async fn get_reality_level(
3840 State(state): State<AdminState>,
3841) -> Json<ApiResponse<serde_json::Value>> {
3842 let engine = state.reality_engine.read().await;
3843 let level = engine.get_level().await;
3844 let config = engine.get_config().await;
3845
3846 Json(ApiResponse::success(serde_json::json!({
3847 "level": level.value(),
3848 "level_name": level.name(),
3849 "description": level.description(),
3850 "chaos": {
3851 "enabled": config.chaos.enabled,
3852 "error_rate": config.chaos.error_rate,
3853 "delay_rate": config.chaos.delay_rate,
3854 },
3855 "latency": {
3856 "base_ms": config.latency.base_ms,
3857 "jitter_ms": config.latency.jitter_ms,
3858 },
3859 "mockai": {
3860 "enabled": config.mockai.enabled,
3861 },
3862 })))
3863}
3864
3865#[derive(Deserialize)]
3867pub struct SetRealityLevelRequest {
3868 level: u8,
3869}
3870
3871pub async fn set_reality_level(
3872 State(state): State<AdminState>,
3873 Json(request): Json<SetRealityLevelRequest>,
3874) -> Json<ApiResponse<serde_json::Value>> {
3875 let level = match mockforge_core::RealityLevel::from_value(request.level) {
3876 Some(l) => l,
3877 None => {
3878 return Json(ApiResponse::error(format!(
3879 "Invalid reality level: {}. Must be between 1 and 5.",
3880 request.level
3881 )));
3882 }
3883 };
3884
3885 let engine = state.reality_engine.write().await;
3887 engine.set_level(level).await;
3888 let config = engine.get_config().await;
3889 drop(engine); let mut update_errors = Vec::new();
3893
3894 if let Some(ref chaos_api_state) = state.chaos_api_state {
3896 let mut chaos_config = chaos_api_state.config.write().await;
3897
3898 use mockforge_chaos::config::{FaultInjectionConfig, LatencyConfig};
3901
3902 let latency_config = if config.latency.base_ms > 0 {
3903 Some(LatencyConfig {
3904 enabled: true,
3905 fixed_delay_ms: Some(config.latency.base_ms),
3906 random_delay_range_ms: config
3907 .latency
3908 .max_ms
3909 .map(|max| (config.latency.min_ms, max)),
3910 jitter_percent: if config.latency.jitter_ms > 0 {
3911 (config.latency.jitter_ms as f64 / config.latency.base_ms as f64).min(1.0)
3912 } else {
3913 0.0
3914 },
3915 probability: 1.0,
3916 })
3917 } else {
3918 None
3919 };
3920
3921 let fault_injection_config = if config.chaos.enabled {
3922 Some(FaultInjectionConfig {
3923 enabled: true,
3924 http_errors: config.chaos.status_codes.clone(),
3925 http_error_probability: config.chaos.error_rate,
3926 connection_errors: false,
3927 connection_error_probability: 0.0,
3928 timeout_errors: config.chaos.inject_timeouts,
3929 timeout_ms: config.chaos.timeout_ms,
3930 timeout_probability: if config.chaos.inject_timeouts {
3931 config.chaos.error_rate
3932 } else {
3933 0.0
3934 },
3935 partial_responses: false,
3936 partial_response_probability: 0.0,
3937 payload_corruption: false,
3938 payload_corruption_probability: 0.0,
3939 corruption_type: mockforge_chaos::config::CorruptionType::None,
3940 error_pattern: Some(mockforge_chaos::config::ErrorPattern::Random {
3941 probability: config.chaos.error_rate,
3942 }),
3943 mockai_enabled: false,
3944 })
3945 } else {
3946 None
3947 };
3948
3949 chaos_config.enabled = config.chaos.enabled;
3951 chaos_config.latency = latency_config;
3952 chaos_config.fault_injection = fault_injection_config;
3953
3954 drop(chaos_config);
3955 tracing::info!("✅ Updated chaos config for reality level {}", level.value());
3956
3957 }
3962
3963 if let Some(ref latency_injector) = state.latency_injector {
3965 match mockforge_core::latency::LatencyInjector::update_profile_async(
3966 latency_injector,
3967 config.latency.clone(),
3968 )
3969 .await
3970 {
3971 Ok(_) => {
3972 tracing::info!("✅ Updated latency injector for reality level {}", level.value());
3973 }
3974 Err(e) => {
3975 let error_msg = format!("Failed to update latency injector: {}", e);
3976 tracing::warn!("{}", error_msg);
3977 update_errors.push(error_msg);
3978 }
3979 }
3980 }
3981
3982 if let Some(ref mockai) = state.mockai {
3984 match mockforge_core::intelligent_behavior::MockAI::update_config_async(
3985 mockai,
3986 config.mockai.clone(),
3987 )
3988 .await
3989 {
3990 Ok(_) => {
3991 tracing::info!("✅ Updated MockAI config for reality level {}", level.value());
3992 }
3993 Err(e) => {
3994 let error_msg = format!("Failed to update MockAI: {}", e);
3995 tracing::warn!("{}", error_msg);
3996 update_errors.push(error_msg);
3997 }
3998 }
3999 }
4000
4001 let mut response = serde_json::json!({
4003 "level": level.value(),
4004 "level_name": level.name(),
4005 "description": level.description(),
4006 "chaos": {
4007 "enabled": config.chaos.enabled,
4008 "error_rate": config.chaos.error_rate,
4009 "delay_rate": config.chaos.delay_rate,
4010 },
4011 "latency": {
4012 "base_ms": config.latency.base_ms,
4013 "jitter_ms": config.latency.jitter_ms,
4014 },
4015 "mockai": {
4016 "enabled": config.mockai.enabled,
4017 },
4018 });
4019
4020 if !update_errors.is_empty() {
4022 response["warnings"] = serde_json::json!(update_errors);
4023 tracing::warn!(
4024 "Reality level updated to {} but some subsystems failed to update: {:?}",
4025 level.value(),
4026 update_errors
4027 );
4028 } else {
4029 tracing::info!(
4030 "✅ Reality level successfully updated to {} (hot-reload applied)",
4031 level.value()
4032 );
4033 }
4034
4035 Json(ApiResponse::success(response))
4036}
4037
4038pub async fn list_reality_presets(
4040 State(state): State<AdminState>,
4041) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4042 let persistence = &state.workspace_persistence;
4043 match persistence.list_reality_presets().await {
4044 Ok(preset_paths) => {
4045 let presets: Vec<serde_json::Value> = preset_paths
4046 .iter()
4047 .map(|path| {
4048 serde_json::json!({
4049 "id": path.file_name().and_then(|n| n.to_str()).unwrap_or("unknown"),
4050 "path": path.to_string_lossy(),
4051 "name": path.file_stem().and_then(|n| n.to_str()).unwrap_or("unknown"),
4052 })
4053 })
4054 .collect();
4055 Json(ApiResponse::success(presets))
4056 }
4057 Err(e) => Json(ApiResponse::error(format!("Failed to list presets: {}", e))),
4058 }
4059}
4060
4061#[derive(Deserialize)]
4063pub struct ImportPresetRequest {
4064 path: String,
4065}
4066
4067pub async fn import_reality_preset(
4068 State(state): State<AdminState>,
4069 Json(request): Json<ImportPresetRequest>,
4070) -> Json<ApiResponse<serde_json::Value>> {
4071 let persistence = &state.workspace_persistence;
4072 let path = std::path::Path::new(&request.path);
4073
4074 match persistence.import_reality_preset(path).await {
4075 Ok(preset) => {
4076 let engine = state.reality_engine.write().await;
4078 engine.apply_preset(preset.clone()).await;
4079
4080 Json(ApiResponse::success(serde_json::json!({
4081 "name": preset.name,
4082 "description": preset.description,
4083 "level": preset.config.level.value(),
4084 "level_name": preset.config.level.name(),
4085 })))
4086 }
4087 Err(e) => Json(ApiResponse::error(format!("Failed to import preset: {}", e))),
4088 }
4089}
4090
4091#[derive(Deserialize)]
4093pub struct ExportPresetRequest {
4094 name: String,
4095 description: Option<String>,
4096}
4097
4098pub async fn export_reality_preset(
4099 State(state): State<AdminState>,
4100 Json(request): Json<ExportPresetRequest>,
4101) -> Json<ApiResponse<serde_json::Value>> {
4102 let engine = state.reality_engine.read().await;
4103 let preset = engine.create_preset(request.name.clone(), request.description.clone()).await;
4104
4105 let persistence = &state.workspace_persistence;
4106 let presets_dir = persistence.presets_dir();
4107 let filename = format!("{}.json", request.name.replace(' ', "_").to_lowercase());
4108 let output_path = presets_dir.join(&filename);
4109
4110 match persistence.export_reality_preset(&preset, &output_path).await {
4111 Ok(_) => Json(ApiResponse::success(serde_json::json!({
4112 "name": preset.name,
4113 "description": preset.description,
4114 "path": output_path.to_string_lossy(),
4115 "level": preset.config.level.value(),
4116 }))),
4117 Err(e) => Json(ApiResponse::error(format!("Failed to export preset: {}", e))),
4118 }
4119}
4120
4121pub async fn get_continuum_ratio(
4125 State(state): State<AdminState>,
4126 Query(params): Query<std::collections::HashMap<String, String>>,
4127) -> Json<ApiResponse<serde_json::Value>> {
4128 let path = params.get("path").cloned().unwrap_or_else(|| "/".to_string());
4129 let engine = state.continuum_engine.read().await;
4130 let ratio = engine.get_blend_ratio(&path).await;
4131 let config = engine.get_config().await;
4132 let enabled = engine.is_enabled().await;
4133
4134 Json(ApiResponse::success(serde_json::json!({
4135 "path": path,
4136 "blend_ratio": ratio,
4137 "enabled": enabled,
4138 "transition_mode": format!("{:?}", config.transition_mode),
4139 "merge_strategy": format!("{:?}", config.merge_strategy),
4140 "default_ratio": config.default_ratio,
4141 })))
4142}
4143
4144#[derive(Deserialize)]
4146pub struct SetContinuumRatioRequest {
4147 path: String,
4148 ratio: f64,
4149}
4150
4151pub async fn set_continuum_ratio(
4152 State(state): State<AdminState>,
4153 Json(request): Json<SetContinuumRatioRequest>,
4154) -> Json<ApiResponse<serde_json::Value>> {
4155 let ratio = request.ratio.clamp(0.0, 1.0);
4156 let engine = state.continuum_engine.read().await;
4157 engine.set_blend_ratio(&request.path, ratio).await;
4158
4159 Json(ApiResponse::success(serde_json::json!({
4160 "path": request.path,
4161 "blend_ratio": ratio,
4162 })))
4163}
4164
4165pub async fn get_continuum_schedule(
4167 State(state): State<AdminState>,
4168) -> Json<ApiResponse<serde_json::Value>> {
4169 let engine = state.continuum_engine.read().await;
4170 let schedule = engine.get_time_schedule().await;
4171
4172 match schedule {
4173 Some(s) => Json(ApiResponse::success(serde_json::json!({
4174 "start_time": s.start_time.to_rfc3339(),
4175 "end_time": s.end_time.to_rfc3339(),
4176 "start_ratio": s.start_ratio,
4177 "end_ratio": s.end_ratio,
4178 "curve": format!("{:?}", s.curve),
4179 "duration_days": s.duration().num_days(),
4180 }))),
4181 None => Json(ApiResponse::success(serde_json::json!(null))),
4182 }
4183}
4184
4185#[derive(Deserialize)]
4187pub struct SetContinuumScheduleRequest {
4188 start_time: String,
4189 end_time: String,
4190 start_ratio: f64,
4191 end_ratio: f64,
4192 curve: Option<String>,
4193}
4194
4195pub async fn set_continuum_schedule(
4196 State(state): State<AdminState>,
4197 Json(request): Json<SetContinuumScheduleRequest>,
4198) -> Json<ApiResponse<serde_json::Value>> {
4199 let start_time = chrono::DateTime::parse_from_rfc3339(&request.start_time)
4200 .map_err(|e| format!("Invalid start_time: {}", e))
4201 .map(|dt| dt.with_timezone(&chrono::Utc));
4202
4203 let end_time = chrono::DateTime::parse_from_rfc3339(&request.end_time)
4204 .map_err(|e| format!("Invalid end_time: {}", e))
4205 .map(|dt| dt.with_timezone(&chrono::Utc));
4206
4207 match (start_time, end_time) {
4208 (Ok(start), Ok(end)) => {
4209 let curve = request
4210 .curve
4211 .as_deref()
4212 .map(|c| match c {
4213 "linear" => mockforge_core::TransitionCurve::Linear,
4214 "exponential" => mockforge_core::TransitionCurve::Exponential,
4215 "sigmoid" => mockforge_core::TransitionCurve::Sigmoid,
4216 _ => mockforge_core::TransitionCurve::Linear,
4217 })
4218 .unwrap_or(mockforge_core::TransitionCurve::Linear);
4219
4220 let schedule = mockforge_core::TimeSchedule::with_curve(
4221 start,
4222 end,
4223 request.start_ratio.clamp(0.0, 1.0),
4224 request.end_ratio.clamp(0.0, 1.0),
4225 curve,
4226 );
4227
4228 let engine = state.continuum_engine.read().await;
4229 engine.set_time_schedule(schedule.clone()).await;
4230
4231 Json(ApiResponse::success(serde_json::json!({
4232 "start_time": schedule.start_time.to_rfc3339(),
4233 "end_time": schedule.end_time.to_rfc3339(),
4234 "start_ratio": schedule.start_ratio,
4235 "end_ratio": schedule.end_ratio,
4236 "curve": format!("{:?}", schedule.curve),
4237 })))
4238 }
4239 (Err(e), _) | (_, Err(e)) => Json(ApiResponse::error(e)),
4240 }
4241}
4242
4243#[derive(Deserialize)]
4245pub struct AdvanceContinuumRatioRequest {
4246 increment: Option<f64>,
4247}
4248
4249pub async fn advance_continuum_ratio(
4250 State(state): State<AdminState>,
4251 Json(request): Json<AdvanceContinuumRatioRequest>,
4252) -> Json<ApiResponse<serde_json::Value>> {
4253 let increment = request.increment.unwrap_or(0.1);
4254 let engine = state.continuum_engine.read().await;
4255 engine.advance_ratio(increment).await;
4256 let config = engine.get_config().await;
4257
4258 Json(ApiResponse::success(serde_json::json!({
4259 "default_ratio": config.default_ratio,
4260 "increment": increment,
4261 })))
4262}
4263
4264#[derive(Deserialize)]
4266pub struct SetContinuumEnabledRequest {
4267 enabled: bool,
4268}
4269
4270pub async fn set_continuum_enabled(
4271 State(state): State<AdminState>,
4272 Json(request): Json<SetContinuumEnabledRequest>,
4273) -> Json<ApiResponse<serde_json::Value>> {
4274 let engine = state.continuum_engine.read().await;
4275 engine.set_enabled(request.enabled).await;
4276
4277 Json(ApiResponse::success(serde_json::json!({
4278 "enabled": request.enabled,
4279 })))
4280}
4281
4282pub async fn get_continuum_overrides(
4284 State(state): State<AdminState>,
4285) -> Json<ApiResponse<serde_json::Value>> {
4286 let engine = state.continuum_engine.read().await;
4287 let overrides = engine.get_manual_overrides().await;
4288
4289 Json(ApiResponse::success(serde_json::json!(overrides)))
4290}
4291
4292pub async fn clear_continuum_overrides(
4294 State(state): State<AdminState>,
4295) -> Json<ApiResponse<serde_json::Value>> {
4296 let engine = state.continuum_engine.read().await;
4297 engine.clear_manual_overrides().await;
4298
4299 Json(ApiResponse::success(serde_json::json!({
4300 "message": "All manual overrides cleared",
4301 })))
4302}
4303
4304pub async fn get_workspace(
4305 State(_state): State<AdminState>,
4306 axum::extract::Path(workspace_id): axum::extract::Path<String>,
4307) -> Json<ApiResponse<serde_json::Value>> {
4308 Json(ApiResponse::success(serde_json::json!({
4309 "workspace": {
4310 "summary": {
4311 "id": workspace_id,
4312 "name": "Mock Workspace",
4313 "description": "A mock workspace"
4314 },
4315 "folders": [],
4316 "requests": []
4317 }
4318 })))
4319}
4320
4321pub async fn delete_workspace(
4322 State(_state): State<AdminState>,
4323 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4324) -> Json<ApiResponse<String>> {
4325 Json(ApiResponse::success("Workspace deleted".to_string()))
4326}
4327
4328pub async fn set_active_workspace(
4329 State(_state): State<AdminState>,
4330 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4331) -> Json<ApiResponse<String>> {
4332 Json(ApiResponse::success("Workspace activated".to_string()))
4333}
4334
4335pub async fn create_folder(
4336 State(_state): State<AdminState>,
4337 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4338 Json(_request): Json<serde_json::Value>,
4339) -> Json<ApiResponse<String>> {
4340 Json(ApiResponse::success("Folder created".to_string()))
4341}
4342
4343pub async fn create_request(
4344 State(_state): State<AdminState>,
4345 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4346 Json(_request): Json<serde_json::Value>,
4347) -> Json<ApiResponse<String>> {
4348 Json(ApiResponse::success("Request created".to_string()))
4349}
4350
4351pub async fn execute_workspace_request(
4352 State(_state): State<AdminState>,
4353 axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
4354 Json(_request): Json<serde_json::Value>,
4355) -> Json<ApiResponse<serde_json::Value>> {
4356 Json(ApiResponse::success(serde_json::json!({
4357 "status": "executed",
4358 "response": {}
4359 })))
4360}
4361
4362pub async fn get_request_history(
4363 State(_state): State<AdminState>,
4364 axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
4365) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4366 Json(ApiResponse::success(vec![]))
4367}
4368
4369pub async fn get_folder(
4370 State(_state): State<AdminState>,
4371 axum::extract::Path((_workspace_id, folder_id)): axum::extract::Path<(String, String)>,
4372) -> Json<ApiResponse<serde_json::Value>> {
4373 Json(ApiResponse::success(serde_json::json!({
4374 "folder": {
4375 "summary": {
4376 "id": folder_id,
4377 "name": "Mock Folder",
4378 "description": "A mock folder"
4379 },
4380 "requests": []
4381 }
4382 })))
4383}
4384
4385pub async fn import_to_workspace(
4386 State(_state): State<AdminState>,
4387 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4388 Json(_request): Json<serde_json::Value>,
4389) -> Json<ApiResponse<String>> {
4390 Json(ApiResponse::success("Import to workspace completed".to_string()))
4391}
4392
4393pub async fn export_workspaces(
4394 State(_state): State<AdminState>,
4395 Json(_request): Json<serde_json::Value>,
4396) -> Json<ApiResponse<String>> {
4397 Json(ApiResponse::success("Workspaces exported".to_string()))
4398}
4399
4400pub async fn get_environments(
4402 State(_state): State<AdminState>,
4403 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4404) -> Json<ApiResponse<serde_json::Value>> {
4405 let environments = vec![serde_json::json!({
4407 "id": "global",
4408 "name": "Global",
4409 "description": "Global environment variables",
4410 "variable_count": 0,
4411 "is_global": true,
4412 "active": true,
4413 "order": 0
4414 })];
4415
4416 Json(ApiResponse::success(serde_json::json!({
4417 "environments": environments,
4418 "total": 1
4419 })))
4420}
4421
4422pub async fn create_environment(
4423 State(_state): State<AdminState>,
4424 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4425 Json(_request): Json<serde_json::Value>,
4426) -> Json<ApiResponse<String>> {
4427 Json(ApiResponse::success("Environment created".to_string()))
4428}
4429
4430pub async fn update_environment(
4431 State(_state): State<AdminState>,
4432 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4433 Json(_request): Json<serde_json::Value>,
4434) -> Json<ApiResponse<String>> {
4435 Json(ApiResponse::success("Environment updated".to_string()))
4436}
4437
4438pub async fn delete_environment(
4439 State(_state): State<AdminState>,
4440 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4441) -> Json<ApiResponse<String>> {
4442 Json(ApiResponse::success("Environment deleted".to_string()))
4443}
4444
4445pub async fn set_active_environment(
4446 State(_state): State<AdminState>,
4447 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4448) -> Json<ApiResponse<String>> {
4449 Json(ApiResponse::success("Environment activated".to_string()))
4450}
4451
4452pub async fn update_environments_order(
4453 State(_state): State<AdminState>,
4454 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4455 Json(_request): Json<serde_json::Value>,
4456) -> Json<ApiResponse<String>> {
4457 Json(ApiResponse::success("Environment order updated".to_string()))
4458}
4459
4460pub async fn get_environment_variables(
4461 State(_state): State<AdminState>,
4462 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4463) -> Json<ApiResponse<serde_json::Value>> {
4464 Json(ApiResponse::success(serde_json::json!({
4465 "variables": []
4466 })))
4467}
4468
4469pub async fn set_environment_variable(
4470 State(_state): State<AdminState>,
4471 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4472 Json(_request): Json<serde_json::Value>,
4473) -> Json<ApiResponse<String>> {
4474 Json(ApiResponse::success("Environment variable set".to_string()))
4475}
4476
4477pub async fn remove_environment_variable(
4478 State(_state): State<AdminState>,
4479 axum::extract::Path((_workspace_id, _environment_id, _variable_name)): axum::extract::Path<(
4480 String,
4481 String,
4482 String,
4483 )>,
4484) -> Json<ApiResponse<String>> {
4485 Json(ApiResponse::success("Environment variable removed".to_string()))
4486}
4487
4488pub async fn get_autocomplete_suggestions(
4490 State(_state): State<AdminState>,
4491 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4492 Json(_request): Json<serde_json::Value>,
4493) -> Json<ApiResponse<serde_json::Value>> {
4494 Json(ApiResponse::success(serde_json::json!({
4495 "suggestions": [],
4496 "start_position": 0,
4497 "end_position": 0
4498 })))
4499}
4500
4501pub async fn get_sync_status(
4503 State(_state): State<AdminState>,
4504 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4505) -> Json<ApiResponse<serde_json::Value>> {
4506 Json(ApiResponse::success(serde_json::json!({
4507 "status": "disabled"
4508 })))
4509}
4510
4511pub async fn configure_sync(
4512 State(_state): State<AdminState>,
4513 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4514 Json(_request): Json<serde_json::Value>,
4515) -> Json<ApiResponse<String>> {
4516 Json(ApiResponse::success("Sync configured".to_string()))
4517}
4518
4519pub async fn disable_sync(
4520 State(_state): State<AdminState>,
4521 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4522) -> Json<ApiResponse<String>> {
4523 Json(ApiResponse::success("Sync disabled".to_string()))
4524}
4525
4526pub async fn trigger_sync(
4527 State(_state): State<AdminState>,
4528 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4529) -> Json<ApiResponse<String>> {
4530 Json(ApiResponse::success("Sync triggered".to_string()))
4531}
4532
4533pub async fn get_sync_changes(
4534 State(_state): State<AdminState>,
4535 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4536) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4537 Json(ApiResponse::success(vec![]))
4538}
4539
4540pub async fn confirm_sync_changes(
4541 State(_state): State<AdminState>,
4542 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4543 Json(_request): Json<serde_json::Value>,
4544) -> Json<ApiResponse<String>> {
4545 Json(ApiResponse::success("Sync changes confirmed".to_string()))
4546}
4547
4548pub async fn validate_plugin(
4550 State(_state): State<AdminState>,
4551 Json(_request): Json<serde_json::Value>,
4552) -> Json<ApiResponse<String>> {
4553 Json(ApiResponse::success("Plugin validated".to_string()))
4554}
4555
4556pub async fn clear_import_history(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
4558 let mut history = state.import_history.write().await;
4559 history.clear();
4560 Json(ApiResponse::success("Import history cleared".to_string()))
4561}
4562
4563#[cfg(test)]
4564mod tests {
4565 use super::*;
4566
4567 #[test]
4568 fn test_request_metrics_creation() {
4569 use std::collections::HashMap;
4570
4571 let metrics = RequestMetrics {
4572 total_requests: 100,
4573 active_connections: 5,
4574 requests_by_endpoint: HashMap::new(),
4575 response_times: vec![10, 20, 30],
4576 response_times_by_endpoint: HashMap::new(),
4577 errors_by_endpoint: HashMap::new(),
4578 last_request_by_endpoint: HashMap::new(),
4579 };
4580
4581 assert_eq!(metrics.total_requests, 100);
4582 assert_eq!(metrics.active_connections, 5);
4583 assert_eq!(metrics.response_times.len(), 3);
4584 }
4585
4586 #[test]
4587 fn test_system_metrics_creation() {
4588 let metrics = SystemMetrics {
4589 cpu_usage_percent: 45.5,
4590 memory_usage_mb: 100,
4591 active_threads: 10,
4592 };
4593
4594 assert_eq!(metrics.active_threads, 10);
4595 assert!(metrics.cpu_usage_percent > 0.0);
4596 assert_eq!(metrics.memory_usage_mb, 100);
4597 }
4598
4599 #[test]
4600 fn test_time_series_point() {
4601 let point = TimeSeriesPoint {
4602 timestamp: chrono::Utc::now(),
4603 value: 42.5,
4604 };
4605
4606 assert_eq!(point.value, 42.5);
4607 }
4608
4609 #[test]
4610 fn test_restart_status() {
4611 let status = RestartStatus {
4612 in_progress: true,
4613 initiated_at: Some(chrono::Utc::now()),
4614 reason: Some("Manual restart".to_string()),
4615 success: None,
4616 };
4617
4618 assert!(status.in_progress);
4619 assert!(status.reason.is_some());
4620 }
4621
4622 #[test]
4623 fn test_configuration_state() {
4624 use std::collections::HashMap;
4625
4626 let state = ConfigurationState {
4627 latency_profile: crate::models::LatencyProfile {
4628 name: "default".to_string(),
4629 base_ms: 100,
4630 jitter_ms: 10,
4631 tag_overrides: HashMap::new(),
4632 },
4633 fault_config: crate::models::FaultConfig {
4634 enabled: false,
4635 failure_rate: 0.0,
4636 status_codes: vec![],
4637 active_failures: 0,
4638 },
4639 proxy_config: crate::models::ProxyConfig {
4640 enabled: false,
4641 upstream_url: None,
4642 timeout_seconds: 30,
4643 requests_proxied: 0,
4644 },
4645 validation_settings: crate::models::ValidationSettings {
4646 mode: "off".to_string(),
4647 aggregate_errors: false,
4648 validate_responses: false,
4649 overrides: HashMap::new(),
4650 },
4651 };
4652
4653 assert_eq!(state.latency_profile.name, "default");
4654 assert!(!state.fault_config.enabled);
4655 assert!(!state.proxy_config.enabled);
4656 }
4657
4658 #[test]
4659 fn test_admin_state_new() {
4660 let http_addr: std::net::SocketAddr = "127.0.0.1:3000".parse().unwrap();
4661 let state = AdminState::new(
4662 Some(http_addr),
4663 None,
4664 None,
4665 None,
4666 true,
4667 8080,
4668 None,
4669 None,
4670 None,
4671 None,
4672 None,
4673 );
4674
4675 assert_eq!(state.http_server_addr, Some(http_addr));
4676 assert!(state.api_enabled);
4677 assert_eq!(state.admin_port, 8080);
4678 }
4679}