1use axum::{
13 extract::{Query, State},
14 http::{self, StatusCode},
15 response::{
16 sse::{Event, Sse},
17 Html, IntoResponse, Json,
18 },
19};
20use chrono::Utc;
21use futures_util::stream::{self, Stream};
22use mockforge_core::{Error, Result};
23use mockforge_plugin_loader::PluginRegistry;
24use serde::{Deserialize, Serialize};
25use serde_json::json;
26use std::collections::HashMap;
27use std::convert::Infallible;
28use std::process::Command;
29use std::process::Stdio;
30use std::sync::Arc;
31use std::time::Duration;
32use sysinfo::System;
33use tokio::sync::RwLock;
34
35use crate::models::{
37 ApiResponse, ConfigUpdate, DashboardData, DashboardSystemInfo, FaultConfig, HealthCheck,
38 LatencyProfile, LogFilter, MetricsData, ProxyConfig, RequestLog, RouteInfo, ServerInfo,
39 ServerStatus, SimpleMetricsData, SystemInfo, ValidationSettings, ValidationUpdate,
40};
41
42use mockforge_core::workspace_import::{ImportResponse, ImportRoute};
44
45pub mod admin;
47pub mod analytics;
48pub mod analytics_stream;
49pub mod analytics_v2;
50pub mod assets;
51pub mod chains;
52pub mod contract_diff;
53pub mod graph;
54pub mod health;
55pub mod migration;
56pub mod playground;
57pub mod plugin;
58pub mod verification;
59pub mod voice;
60
61pub use assets::*;
63pub use chains::*;
64pub use graph::*;
65pub use migration::*;
66pub use plugin::*;
67
68use mockforge_core::workspace_import::WorkspaceImportConfig;
70use mockforge_core::workspace_persistence::WorkspacePersistence;
71
72#[derive(Debug, Clone, Default)]
74pub struct RequestMetrics {
75 pub total_requests: u64,
77 pub active_connections: u64,
79 pub requests_by_endpoint: HashMap<String, u64>,
81 pub response_times: Vec<u64>,
83 pub response_times_by_endpoint: HashMap<String, Vec<u64>>,
85 pub errors_by_endpoint: HashMap<String, u64>,
87 pub last_request_by_endpoint: HashMap<String, chrono::DateTime<chrono::Utc>>,
89}
90
91#[derive(Debug, Clone)]
93pub struct SystemMetrics {
94 pub memory_usage_mb: u64,
96 pub cpu_usage_percent: f64,
98 pub active_threads: u32,
100}
101
102#[derive(Debug, Clone)]
104pub struct TimeSeriesPoint {
105 pub timestamp: chrono::DateTime<chrono::Utc>,
107 pub value: f64,
109}
110
111#[derive(Debug, Clone, Default)]
113pub struct TimeSeriesData {
114 pub memory_usage: Vec<TimeSeriesPoint>,
116 pub cpu_usage: Vec<TimeSeriesPoint>,
118 pub request_count: Vec<TimeSeriesPoint>,
120 pub response_time: Vec<TimeSeriesPoint>,
122}
123
124#[derive(Debug, Clone, Serialize, Deserialize)]
126pub struct RestartStatus {
127 pub in_progress: bool,
129 pub initiated_at: Option<chrono::DateTime<chrono::Utc>>,
131 pub reason: Option<String>,
133 pub success: Option<bool>,
135}
136
137#[derive(Debug, Clone, Serialize, Deserialize)]
139pub struct FixtureInfo {
140 pub id: String,
142 pub protocol: String,
144 pub method: String,
146 pub path: String,
148 pub saved_at: chrono::DateTime<chrono::Utc>,
150 pub file_size: u64,
152 pub file_path: String,
154 pub fingerprint: String,
156 pub metadata: serde_json::Value,
158}
159
160#[derive(Debug, Clone, Serialize, Deserialize)]
162pub struct SmokeTestResult {
163 pub id: String,
165 pub name: String,
167 pub method: String,
169 pub path: String,
171 pub description: String,
173 pub last_run: Option<chrono::DateTime<chrono::Utc>>,
175 pub status: String,
177 pub response_time_ms: Option<u64>,
179 pub error_message: Option<String>,
181 pub status_code: Option<u16>,
183 pub duration_seconds: Option<f64>,
185}
186
187#[derive(Debug, Clone)]
189pub struct SmokeTestContext {
190 pub base_url: String,
192 pub timeout_seconds: u64,
194 pub parallel: bool,
196}
197
198#[derive(Debug, Clone, Serialize)]
200pub struct ConfigurationState {
201 pub latency_profile: LatencyProfile,
203 pub fault_config: FaultConfig,
205 pub proxy_config: ProxyConfig,
207 pub validation_settings: ValidationSettings,
209}
210
211#[derive(Debug, Clone, Serialize, Deserialize)]
213pub struct ImportHistoryEntry {
214 pub id: String,
216 pub format: String,
218 pub timestamp: chrono::DateTime<chrono::Utc>,
220 pub routes_count: usize,
222 pub variables_count: usize,
224 pub warnings_count: usize,
226 pub success: bool,
228 pub filename: Option<String>,
230 pub environment: Option<String>,
232 pub base_url: Option<String>,
234 pub error_message: Option<String>,
236}
237
238#[derive(Clone)]
240pub struct AdminState {
241 pub http_server_addr: Option<std::net::SocketAddr>,
243 pub ws_server_addr: Option<std::net::SocketAddr>,
245 pub grpc_server_addr: Option<std::net::SocketAddr>,
247 pub graphql_server_addr: Option<std::net::SocketAddr>,
249 pub api_enabled: bool,
251 pub admin_port: u16,
253 pub start_time: chrono::DateTime<chrono::Utc>,
255 pub metrics: Arc<RwLock<RequestMetrics>>,
257 pub system_metrics: Arc<RwLock<SystemMetrics>>,
259 pub config: Arc<RwLock<ConfigurationState>>,
261 pub logs: Arc<RwLock<Vec<RequestLog>>>,
263 pub time_series: Arc<RwLock<TimeSeriesData>>,
265 pub restart_status: Arc<RwLock<RestartStatus>>,
267 pub smoke_test_results: Arc<RwLock<Vec<SmokeTestResult>>>,
269 pub import_history: Arc<RwLock<Vec<ImportHistoryEntry>>>,
271 pub workspace_persistence: Arc<WorkspacePersistence>,
273 pub plugin_registry: Arc<RwLock<PluginRegistry>>,
275 pub reality_engine: Arc<RwLock<mockforge_core::RealityEngine>>,
277 pub continuum_engine: Arc<RwLock<mockforge_core::RealityContinuumEngine>>,
279 pub chaos_api_state: Option<std::sync::Arc<mockforge_chaos::api::ChaosApiState>>,
282 pub latency_injector:
285 Option<std::sync::Arc<tokio::sync::RwLock<mockforge_core::latency::LatencyInjector>>>,
286 pub mockai:
289 Option<std::sync::Arc<tokio::sync::RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
290}
291
292impl AdminState {
293 pub async fn start_system_monitoring(&self) {
295 let state_clone = self.clone();
296 tokio::spawn(async move {
297 let mut sys = System::new_all();
298 let mut refresh_count = 0u64;
299
300 tracing::info!("Starting system monitoring background task");
301
302 loop {
303 sys.refresh_all();
305
306 let cpu_usage = sys.global_cpu_usage();
308
309 let _total_memory = sys.total_memory() as f64;
311 let used_memory = sys.used_memory() as f64;
312 let memory_usage_mb = used_memory / 1024.0 / 1024.0;
313
314 let active_threads = sys.cpus().len() as u32;
316
317 let memory_mb_u64 = memory_usage_mb as u64;
319
320 if refresh_count.is_multiple_of(10) {
322 tracing::debug!(
323 "System metrics updated: CPU={:.1}%, Mem={}MB, Threads={}",
324 cpu_usage,
325 memory_mb_u64,
326 active_threads
327 );
328 }
329
330 state_clone
331 .update_system_metrics(memory_mb_u64, cpu_usage as f64, active_threads)
332 .await;
333
334 refresh_count += 1;
335
336 tokio::time::sleep(Duration::from_secs(10)).await;
338 }
339 });
340 }
341
342 pub fn new(
357 http_server_addr: Option<std::net::SocketAddr>,
358 ws_server_addr: Option<std::net::SocketAddr>,
359 grpc_server_addr: Option<std::net::SocketAddr>,
360 graphql_server_addr: Option<std::net::SocketAddr>,
361 api_enabled: bool,
362 admin_port: u16,
363 chaos_api_state: Option<std::sync::Arc<mockforge_chaos::api::ChaosApiState>>,
364 latency_injector: Option<
365 std::sync::Arc<tokio::sync::RwLock<mockforge_core::latency::LatencyInjector>>,
366 >,
367 mockai: Option<
368 std::sync::Arc<tokio::sync::RwLock<mockforge_core::intelligent_behavior::MockAI>>,
369 >,
370 continuum_config: Option<mockforge_core::ContinuumConfig>,
371 virtual_clock: Option<std::sync::Arc<mockforge_core::VirtualClock>>,
372 ) -> Self {
373 let start_time = chrono::Utc::now();
374
375 Self {
376 http_server_addr,
377 ws_server_addr,
378 grpc_server_addr,
379 graphql_server_addr,
380 api_enabled,
381 admin_port,
382 start_time,
383 metrics: Arc::new(RwLock::new(RequestMetrics::default())),
384 system_metrics: Arc::new(RwLock::new(SystemMetrics {
385 memory_usage_mb: 0,
386 cpu_usage_percent: 0.0,
387 active_threads: 0,
388 })),
389 config: Arc::new(RwLock::new(ConfigurationState {
390 latency_profile: LatencyProfile {
391 name: "default".to_string(),
392 base_ms: 50,
393 jitter_ms: 20,
394 tag_overrides: HashMap::new(),
395 },
396 fault_config: FaultConfig {
397 enabled: false,
398 failure_rate: 0.0,
399 status_codes: vec![500, 502, 503],
400 active_failures: 0,
401 },
402 proxy_config: ProxyConfig {
403 enabled: false,
404 upstream_url: None,
405 timeout_seconds: 30,
406 requests_proxied: 0,
407 },
408 validation_settings: ValidationSettings {
409 mode: "enforce".to_string(),
410 aggregate_errors: true,
411 validate_responses: false,
412 overrides: HashMap::new(),
413 },
414 })),
415 logs: Arc::new(RwLock::new(Vec::new())),
416 time_series: Arc::new(RwLock::new(TimeSeriesData::default())),
417 restart_status: Arc::new(RwLock::new(RestartStatus {
418 in_progress: false,
419 initiated_at: None,
420 reason: None,
421 success: None,
422 })),
423 smoke_test_results: Arc::new(RwLock::new(Vec::new())),
424 import_history: Arc::new(RwLock::new(Vec::new())),
425 workspace_persistence: Arc::new(WorkspacePersistence::new("./workspaces")),
426 plugin_registry: Arc::new(RwLock::new(PluginRegistry::new())),
427 reality_engine: Arc::new(RwLock::new(mockforge_core::RealityEngine::new())),
428 continuum_engine: Arc::new(RwLock::new({
429 let config = continuum_config.unwrap_or_default();
430 if let Some(clock) = virtual_clock {
431 mockforge_core::RealityContinuumEngine::with_virtual_clock(config, clock)
432 } else {
433 mockforge_core::RealityContinuumEngine::new(config)
434 }
435 })),
436 chaos_api_state,
437 latency_injector,
438 mockai,
439 }
440 }
441
442 pub async fn record_request(
444 &self,
445 method: &str,
446 path: &str,
447 status_code: u16,
448 response_time_ms: u64,
449 error: Option<String>,
450 ) {
451 let mut metrics = self.metrics.write().await;
452
453 metrics.total_requests += 1;
454 let endpoint = format!("{} {}", method, path);
455 *metrics.requests_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
456
457 if status_code >= 400 {
458 *metrics.errors_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
459 }
460
461 metrics.response_times.push(response_time_ms);
463 if metrics.response_times.len() > 100 {
464 metrics.response_times.remove(0);
465 }
466
467 let endpoint_times = metrics
469 .response_times_by_endpoint
470 .entry(endpoint.clone())
471 .or_insert_with(Vec::new);
472 endpoint_times.push(response_time_ms);
473 if endpoint_times.len() > 50 {
474 endpoint_times.remove(0);
475 }
476
477 metrics.last_request_by_endpoint.insert(endpoint, chrono::Utc::now());
479
480 let total_requests = metrics.total_requests;
482
483 drop(metrics);
485
486 self.update_time_series_on_request(response_time_ms, total_requests).await;
488
489 let mut logs = self.logs.write().await;
491 let log_entry = RequestLog {
492 id: format!("req_{}", total_requests),
493 timestamp: Utc::now(),
494 method: method.to_string(),
495 path: path.to_string(),
496 status_code,
497 response_time_ms,
498 client_ip: None,
499 user_agent: None,
500 headers: HashMap::new(),
501 response_size_bytes: 0,
502 error_message: error,
503 };
504
505 logs.push(log_entry);
506
507 if logs.len() > 1000 {
509 logs.remove(0);
510 }
511 }
512
513 pub async fn get_metrics(&self) -> RequestMetrics {
515 self.metrics.read().await.clone()
516 }
517
518 pub async fn update_system_metrics(&self, memory_mb: u64, cpu_percent: f64, threads: u32) {
520 let mut system_metrics = self.system_metrics.write().await;
521 system_metrics.memory_usage_mb = memory_mb;
522 system_metrics.cpu_usage_percent = cpu_percent;
523 system_metrics.active_threads = threads;
524
525 self.update_time_series_data(memory_mb as f64, cpu_percent).await;
527 }
528
529 async fn update_time_series_data(&self, memory_mb: f64, cpu_percent: f64) {
531 let now = chrono::Utc::now();
532 let mut time_series = self.time_series.write().await;
533
534 time_series.memory_usage.push(TimeSeriesPoint {
536 timestamp: now,
537 value: memory_mb,
538 });
539
540 time_series.cpu_usage.push(TimeSeriesPoint {
542 timestamp: now,
543 value: cpu_percent,
544 });
545
546 let metrics = self.metrics.read().await;
548 time_series.request_count.push(TimeSeriesPoint {
549 timestamp: now,
550 value: metrics.total_requests as f64,
551 });
552
553 let avg_response_time = if !metrics.response_times.is_empty() {
555 metrics.response_times.iter().sum::<u64>() as f64 / metrics.response_times.len() as f64
556 } else {
557 0.0
558 };
559 time_series.response_time.push(TimeSeriesPoint {
560 timestamp: now,
561 value: avg_response_time,
562 });
563
564 const MAX_POINTS: usize = 100;
566 if time_series.memory_usage.len() > MAX_POINTS {
567 time_series.memory_usage.remove(0);
568 }
569 if time_series.cpu_usage.len() > MAX_POINTS {
570 time_series.cpu_usage.remove(0);
571 }
572 if time_series.request_count.len() > MAX_POINTS {
573 time_series.request_count.remove(0);
574 }
575 if time_series.response_time.len() > MAX_POINTS {
576 time_series.response_time.remove(0);
577 }
578 }
579
580 pub async fn get_system_metrics(&self) -> SystemMetrics {
582 self.system_metrics.read().await.clone()
583 }
584
585 pub async fn get_time_series_data(&self) -> TimeSeriesData {
587 self.time_series.read().await.clone()
588 }
589
590 pub async fn get_restart_status(&self) -> RestartStatus {
592 self.restart_status.read().await.clone()
593 }
594
595 pub async fn initiate_restart(&self, reason: String) -> Result<()> {
597 let mut status = self.restart_status.write().await;
598
599 if status.in_progress {
600 return Err(Error::generic("Restart already in progress".to_string()));
601 }
602
603 status.in_progress = true;
604 status.initiated_at = Some(chrono::Utc::now());
605 status.reason = Some(reason);
606 status.success = None;
607
608 Ok(())
609 }
610
611 pub async fn complete_restart(&self, success: bool) {
613 let mut status = self.restart_status.write().await;
614 status.in_progress = false;
615 status.success = Some(success);
616 }
617
618 pub async fn get_smoke_test_results(&self) -> Vec<SmokeTestResult> {
620 self.smoke_test_results.read().await.clone()
621 }
622
623 pub async fn update_smoke_test_result(&self, result: SmokeTestResult) {
625 let mut results = self.smoke_test_results.write().await;
626
627 if let Some(existing) = results.iter_mut().find(|r| r.id == result.id) {
629 *existing = result;
630 } else {
631 results.push(result);
632 }
633
634 if results.len() > 100 {
636 results.remove(0);
637 }
638 }
639
640 pub async fn clear_smoke_test_results(&self) {
642 let mut results = self.smoke_test_results.write().await;
643 results.clear();
644 }
645
646 async fn update_time_series_on_request(&self, response_time_ms: u64, total_requests: u64) {
648 let now = chrono::Utc::now();
649 let mut time_series = self.time_series.write().await;
650
651 time_series.request_count.push(TimeSeriesPoint {
653 timestamp: now,
654 value: total_requests as f64,
655 });
656
657 time_series.response_time.push(TimeSeriesPoint {
659 timestamp: now,
660 value: response_time_ms as f64,
661 });
662
663 const MAX_POINTS: usize = 100;
665 if time_series.request_count.len() > MAX_POINTS {
666 time_series.request_count.remove(0);
667 }
668 if time_series.response_time.len() > MAX_POINTS {
669 time_series.response_time.remove(0);
670 }
671 }
672
673 pub async fn get_config(&self) -> ConfigurationState {
675 self.config.read().await.clone()
676 }
677
678 pub async fn update_latency_config(
680 &self,
681 base_ms: u64,
682 jitter_ms: u64,
683 tag_overrides: HashMap<String, u64>,
684 ) {
685 let mut config = self.config.write().await;
686 config.latency_profile.base_ms = base_ms;
687 config.latency_profile.jitter_ms = jitter_ms;
688 config.latency_profile.tag_overrides = tag_overrides;
689 }
690
691 pub async fn update_fault_config(
693 &self,
694 enabled: bool,
695 failure_rate: f64,
696 status_codes: Vec<u16>,
697 ) {
698 let mut config = self.config.write().await;
699 config.fault_config.enabled = enabled;
700 config.fault_config.failure_rate = failure_rate;
701 config.fault_config.status_codes = status_codes;
702 }
703
704 pub async fn update_proxy_config(
706 &self,
707 enabled: bool,
708 upstream_url: Option<String>,
709 timeout_seconds: u64,
710 ) {
711 let mut config = self.config.write().await;
712 config.proxy_config.enabled = enabled;
713 config.proxy_config.upstream_url = upstream_url;
714 config.proxy_config.timeout_seconds = timeout_seconds;
715 }
716
717 pub async fn update_validation_config(
719 &self,
720 mode: String,
721 aggregate_errors: bool,
722 validate_responses: bool,
723 overrides: HashMap<String, String>,
724 ) {
725 let mut config = self.config.write().await;
726 config.validation_settings.mode = mode;
727 config.validation_settings.aggregate_errors = aggregate_errors;
728 config.validation_settings.validate_responses = validate_responses;
729 config.validation_settings.overrides = overrides;
730 }
731
732 pub async fn get_logs_filtered(&self, filter: &LogFilter) -> Vec<RequestLog> {
734 let logs = self.logs.read().await;
735
736 logs.iter()
737 .rev() .filter(|log| {
739 if let Some(ref method) = filter.method {
740 if log.method != *method {
741 return false;
742 }
743 }
744 if let Some(ref path_pattern) = filter.path_pattern {
745 if !log.path.contains(path_pattern) {
746 return false;
747 }
748 }
749 if let Some(status) = filter.status_code {
750 if log.status_code != status {
751 return false;
752 }
753 }
754 true
755 })
756 .take(filter.limit.unwrap_or(100))
757 .cloned()
758 .collect()
759 }
760
761 pub async fn clear_logs(&self) {
763 let mut logs = self.logs.write().await;
764 logs.clear();
765 }
766}
767
768pub async fn serve_admin_html() -> Html<&'static str> {
770 Html(crate::get_admin_html())
771}
772
773pub async fn serve_admin_css() -> ([(http::HeaderName, &'static str); 1], &'static str) {
775 ([(http::header::CONTENT_TYPE, "text/css")], crate::get_admin_css())
776}
777
778pub async fn serve_admin_js() -> ([(http::HeaderName, &'static str); 1], &'static str) {
780 ([(http::header::CONTENT_TYPE, "application/javascript")], crate::get_admin_js())
781}
782
783pub async fn get_dashboard(State(state): State<AdminState>) -> Json<ApiResponse<DashboardData>> {
785 let uptime = Utc::now().signed_duration_since(state.start_time).num_seconds() as u64;
786
787 let system_metrics = state.get_system_metrics().await;
789 let _config = state.get_config().await;
790
791 let (recent_logs, calculated_metrics): (Vec<RequestLog>, RequestMetrics) =
793 if let Some(global_logger) = mockforge_core::get_global_logger() {
794 let all_logs = global_logger.get_recent_logs(None).await;
796 let recent_logs_subset = global_logger.get_recent_logs(Some(20)).await;
797
798 let total_requests = all_logs.len() as u64;
800 let mut requests_by_endpoint = HashMap::new();
801 let mut errors_by_endpoint = HashMap::new();
802 let mut response_times = Vec::new();
803 let mut last_request_by_endpoint = HashMap::new();
804
805 for log in &all_logs {
806 let endpoint_key = format!("{} {}", log.method, log.path);
807 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
808
809 if log.status_code >= 400 {
810 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
811 }
812
813 response_times.push(log.response_time_ms);
814 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
815 }
816
817 let calculated_metrics = RequestMetrics {
818 total_requests,
819 active_connections: 0, requests_by_endpoint,
821 response_times,
822 response_times_by_endpoint: HashMap::new(), errors_by_endpoint,
824 last_request_by_endpoint,
825 };
826
827 let recent_logs = recent_logs_subset
829 .into_iter()
830 .map(|log| RequestLog {
831 id: log.id,
832 timestamp: log.timestamp,
833 method: log.method,
834 path: log.path,
835 status_code: log.status_code,
836 response_time_ms: log.response_time_ms,
837 client_ip: log.client_ip,
838 user_agent: log.user_agent,
839 headers: log.headers,
840 response_size_bytes: log.response_size_bytes,
841 error_message: log.error_message,
842 })
843 .collect();
844
845 (recent_logs, calculated_metrics)
846 } else {
847 let logs = state.logs.read().await;
849 let recent_logs = logs.iter().rev().take(10).cloned().collect();
850 let metrics = state.get_metrics().await;
851 (recent_logs, metrics)
852 };
853
854 let metrics = calculated_metrics;
855
856 let system_info = SystemInfo {
857 version: env!("CARGO_PKG_VERSION").to_string(),
858 uptime_seconds: uptime,
859 memory_usage_mb: system_metrics.memory_usage_mb,
860 cpu_usage_percent: system_metrics.cpu_usage_percent,
861 active_threads: system_metrics.active_threads as usize,
862 total_routes: metrics.requests_by_endpoint.len(),
863 total_fixtures: count_fixtures().unwrap_or(0),
864 };
865
866 let servers = vec![
867 ServerStatus {
868 server_type: "HTTP".to_string(),
869 address: state.http_server_addr.map(|addr| addr.to_string()),
870 running: state.http_server_addr.is_some(),
871 start_time: Some(state.start_time),
872 uptime_seconds: Some(uptime),
873 active_connections: metrics.active_connections,
874 total_requests: count_requests_by_server_type(&metrics, "HTTP"),
875 },
876 ServerStatus {
877 server_type: "WebSocket".to_string(),
878 address: state.ws_server_addr.map(|addr| addr.to_string()),
879 running: state.ws_server_addr.is_some(),
880 start_time: Some(state.start_time),
881 uptime_seconds: Some(uptime),
882 active_connections: metrics.active_connections / 2, total_requests: count_requests_by_server_type(&metrics, "WebSocket"),
884 },
885 ServerStatus {
886 server_type: "gRPC".to_string(),
887 address: state.grpc_server_addr.map(|addr| addr.to_string()),
888 running: state.grpc_server_addr.is_some(),
889 start_time: Some(state.start_time),
890 uptime_seconds: Some(uptime),
891 active_connections: metrics.active_connections / 3, total_requests: count_requests_by_server_type(&metrics, "gRPC"),
893 },
894 ];
895
896 let mut routes = Vec::new();
898 for (endpoint, count) in &metrics.requests_by_endpoint {
899 let parts: Vec<&str> = endpoint.splitn(2, ' ').collect();
900 if parts.len() == 2 {
901 let method = parts[0].to_string();
902 let path = parts[1].to_string();
903 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
904
905 routes.push(RouteInfo {
906 method: Some(method.clone()),
907 path: path.clone(),
908 priority: 0,
909 has_fixtures: route_has_fixtures(&method, &path),
910 latency_ms: calculate_endpoint_latency(&metrics, endpoint),
911 request_count: *count,
912 last_request: get_endpoint_last_request(&metrics, endpoint),
913 error_count,
914 });
915 }
916 }
917
918 let dashboard = DashboardData {
919 server_info: ServerInfo {
920 version: env!("CARGO_PKG_VERSION").to_string(),
921 build_time: option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown").to_string(),
922 git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("unknown").to_string(),
923 http_server: state.http_server_addr.map(|addr| addr.to_string()),
924 ws_server: state.ws_server_addr.map(|addr| addr.to_string()),
925 grpc_server: state.grpc_server_addr.map(|addr| addr.to_string()),
926 graphql_server: state.graphql_server_addr.map(|addr| addr.to_string()),
927 api_enabled: state.api_enabled,
928 admin_port: state.admin_port,
929 },
930 system_info: DashboardSystemInfo {
931 os: std::env::consts::OS.to_string(),
932 arch: std::env::consts::ARCH.to_string(),
933 uptime,
934 memory_usage: system_metrics.memory_usage_mb * 1024 * 1024, },
936 metrics: SimpleMetricsData {
937 total_requests: metrics.requests_by_endpoint.values().sum(),
938 active_requests: metrics.active_connections,
939 average_response_time: if metrics.response_times.is_empty() {
940 0.0
941 } else {
942 metrics.response_times.iter().sum::<u64>() as f64
943 / metrics.response_times.len() as f64
944 },
945 error_rate: {
946 let total_requests = metrics.requests_by_endpoint.values().sum::<u64>();
947 let total_errors = metrics.errors_by_endpoint.values().sum::<u64>();
948 if total_requests == 0 {
949 0.0
950 } else {
951 total_errors as f64 / total_requests as f64
952 }
953 },
954 },
955 servers,
956 recent_logs,
957 system: system_info,
958 };
959
960 Json(ApiResponse::success(dashboard))
961}
962
963pub async fn get_routes(State(state): State<AdminState>) -> impl IntoResponse {
965 if let Some(http_addr) = state.http_server_addr {
966 let url = format!("http://{}/__mockforge/routes", http_addr);
968 if let Ok(response) = reqwest::get(&url).await {
969 if response.status().is_success() {
970 if let Ok(body) = response.text().await {
971 return (StatusCode::OK, [("content-type", "application/json")], body);
972 }
973 }
974 }
975 }
976
977 (
979 StatusCode::OK,
980 [("content-type", "application/json")],
981 r#"{"routes":[]}"#.to_string(),
982 )
983}
984
985pub async fn get_server_info(State(state): State<AdminState>) -> Json<serde_json::Value> {
987 Json(json!({
988 "http_server": state.http_server_addr.map(|addr| addr.to_string()),
989 "ws_server": state.ws_server_addr.map(|addr| addr.to_string()),
990 "grpc_server": state.grpc_server_addr.map(|addr| addr.to_string()),
991 "admin_port": state.admin_port
992 }))
993}
994
995pub async fn get_health() -> Json<HealthCheck> {
997 Json(
998 HealthCheck::healthy()
999 .with_service("http".to_string(), "healthy".to_string())
1000 .with_service("websocket".to_string(), "healthy".to_string())
1001 .with_service("grpc".to_string(), "healthy".to_string()),
1002 )
1003}
1004
1005pub async fn get_logs(
1007 State(state): State<AdminState>,
1008 Query(params): Query<HashMap<String, String>>,
1009) -> Json<ApiResponse<Vec<RequestLog>>> {
1010 let mut filter = LogFilter::default();
1011
1012 if let Some(method) = params.get("method") {
1013 filter.method = Some(method.clone());
1014 }
1015 if let Some(path) = params.get("path") {
1016 filter.path_pattern = Some(path.clone());
1017 }
1018 if let Some(status) = params.get("status").and_then(|s| s.parse().ok()) {
1019 filter.status_code = Some(status);
1020 }
1021 if let Some(limit) = params.get("limit").and_then(|s| s.parse().ok()) {
1022 filter.limit = Some(limit);
1023 }
1024
1025 let logs = if let Some(global_logger) = mockforge_core::get_global_logger() {
1027 let centralized_logs = global_logger.get_recent_logs(filter.limit).await;
1029
1030 centralized_logs
1032 .into_iter()
1033 .filter(|log| {
1034 if let Some(ref method) = filter.method {
1035 if log.method != *method {
1036 return false;
1037 }
1038 }
1039 if let Some(ref path_pattern) = filter.path_pattern {
1040 if !log.path.contains(path_pattern) {
1041 return false;
1042 }
1043 }
1044 if let Some(status) = filter.status_code {
1045 if log.status_code != status {
1046 return false;
1047 }
1048 }
1049 true
1050 })
1051 .map(|log| RequestLog {
1052 id: log.id,
1053 timestamp: log.timestamp,
1054 method: log.method,
1055 path: log.path,
1056 status_code: log.status_code,
1057 response_time_ms: log.response_time_ms,
1058 client_ip: log.client_ip,
1059 user_agent: log.user_agent,
1060 headers: log.headers,
1061 response_size_bytes: log.response_size_bytes,
1062 error_message: log.error_message,
1063 })
1064 .collect()
1065 } else {
1066 state.get_logs_filtered(&filter).await
1068 };
1069
1070 Json(ApiResponse::success(logs))
1071}
1072
1073const RECENT_LOGS_LIMIT: usize = 20;
1075const RECENT_LOGS_TTL_MINUTES: i64 = 5;
1076
1077pub async fn logs_sse(
1079 State(_state): State<AdminState>,
1080) -> Sse<impl Stream<Item = std::result::Result<Event, Infallible>>> {
1081 tracing::info!("SSE endpoint /logs/sse accessed - starting real-time log streaming for recent requests only");
1082
1083 let stream = stream::unfold(std::collections::HashSet::new(), |mut seen_ids| async move {
1084 tokio::time::sleep(Duration::from_millis(500)).await;
1085
1086 if let Some(global_logger) = mockforge_core::get_global_logger() {
1088 let centralized_logs = global_logger.get_recent_logs(Some(RECENT_LOGS_LIMIT)).await;
1089
1090 tracing::debug!(
1091 "SSE: Checking logs - total logs: {}, seen logs: {}",
1092 centralized_logs.len(),
1093 seen_ids.len()
1094 );
1095
1096 let now = chrono::Utc::now();
1098 let ttl_cutoff = now - chrono::Duration::minutes(RECENT_LOGS_TTL_MINUTES);
1099
1100 let new_logs: Vec<RequestLog> = centralized_logs
1102 .into_iter()
1103 .filter(|log| {
1104 log.timestamp > ttl_cutoff && !seen_ids.contains(&log.id)
1106 })
1107 .map(|log| RequestLog {
1108 id: log.id,
1109 timestamp: log.timestamp,
1110 method: log.method,
1111 path: log.path,
1112 status_code: log.status_code,
1113 response_time_ms: log.response_time_ms,
1114 client_ip: log.client_ip,
1115 user_agent: log.user_agent,
1116 headers: log.headers,
1117 response_size_bytes: log.response_size_bytes,
1118 error_message: log.error_message,
1119 })
1120 .collect();
1121
1122 for log in &new_logs {
1124 seen_ids.insert(log.id.clone());
1125 }
1126
1127 if !new_logs.is_empty() {
1129 tracing::info!("SSE: Sending {} new logs to client", new_logs.len());
1130
1131 let event_data = serde_json::to_string(&new_logs).unwrap_or_default();
1132 let event = Ok(Event::default().event("new_logs").data(event_data));
1133
1134 return Some((event, seen_ids));
1135 }
1136 }
1137
1138 let event = Ok(Event::default().event("keep_alive").data(""));
1140 Some((event, seen_ids))
1141 });
1142
1143 Sse::new(stream).keep_alive(
1144 axum::response::sse::KeepAlive::new()
1145 .interval(Duration::from_secs(15))
1146 .text("keep-alive-text"),
1147 )
1148}
1149
1150pub async fn get_metrics(State(state): State<AdminState>) -> Json<ApiResponse<MetricsData>> {
1152 let metrics = if let Some(global_logger) = mockforge_core::get_global_logger() {
1154 let all_logs = global_logger.get_recent_logs(None).await;
1155
1156 let total_requests = all_logs.len() as u64;
1157 let mut requests_by_endpoint = HashMap::new();
1158 let mut errors_by_endpoint = HashMap::new();
1159 let mut response_times = Vec::new();
1160 let mut last_request_by_endpoint = HashMap::new();
1161
1162 for log in &all_logs {
1163 let endpoint_key = format!("{} {}", log.method, log.path);
1164 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1165
1166 if log.status_code >= 400 {
1167 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1168 }
1169
1170 response_times.push(log.response_time_ms);
1171 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
1172 }
1173
1174 RequestMetrics {
1175 total_requests,
1176 active_connections: 0,
1177 requests_by_endpoint,
1178 response_times,
1179 response_times_by_endpoint: HashMap::new(),
1180 errors_by_endpoint,
1181 last_request_by_endpoint,
1182 }
1183 } else {
1184 state.get_metrics().await
1185 };
1186
1187 let system_metrics = state.get_system_metrics().await;
1188 let time_series = state.get_time_series_data().await;
1189
1190 let mut response_times = metrics.response_times.clone();
1192 response_times.sort();
1193
1194 let p50 = if !response_times.is_empty() {
1195 response_times[response_times.len() / 2] as u64
1196 } else {
1197 0
1198 };
1199
1200 let p95 = if !response_times.is_empty() {
1201 let idx = (response_times.len() as f64 * 0.95) as usize;
1202 response_times[response_times.len().min(idx)] as u64
1203 } else {
1204 0
1205 };
1206
1207 let p99 = if !response_times.is_empty() {
1208 let idx = (response_times.len() as f64 * 0.99) as usize;
1209 response_times[response_times.len().min(idx)] as u64
1210 } else {
1211 0
1212 };
1213
1214 let mut error_rate_by_endpoint = HashMap::new();
1216 for (endpoint, total_count) in &metrics.requests_by_endpoint {
1217 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
1218 let error_rate = if *total_count > 0 {
1219 error_count as f64 / *total_count as f64
1220 } else {
1221 0.0
1222 };
1223 error_rate_by_endpoint.insert(endpoint.clone(), error_rate);
1224 }
1225
1226 let memory_usage_over_time = if time_series.memory_usage.is_empty() {
1229 vec![(Utc::now(), system_metrics.memory_usage_mb)]
1230 } else {
1231 time_series
1232 .memory_usage
1233 .iter()
1234 .map(|point| (point.timestamp, point.value as u64))
1235 .collect()
1236 };
1237
1238 let cpu_usage_over_time = if time_series.cpu_usage.is_empty() {
1239 vec![(Utc::now(), system_metrics.cpu_usage_percent)]
1240 } else {
1241 time_series
1242 .cpu_usage
1243 .iter()
1244 .map(|point| (point.timestamp, point.value))
1245 .collect()
1246 };
1247
1248 let metrics_data = MetricsData {
1249 requests_by_endpoint: metrics.requests_by_endpoint,
1250 response_time_percentiles: HashMap::from([
1251 ("p50".to_string(), p50),
1252 ("p95".to_string(), p95),
1253 ("p99".to_string(), p99),
1254 ]),
1255 error_rate_by_endpoint,
1256 memory_usage_over_time,
1257 cpu_usage_over_time,
1258 };
1259
1260 Json(ApiResponse::success(metrics_data))
1261}
1262
1263pub async fn update_latency(
1265 State(state): State<AdminState>,
1266 Json(update): Json<ConfigUpdate>,
1267) -> Json<ApiResponse<String>> {
1268 if update.config_type != "latency" {
1269 return Json(ApiResponse::error("Invalid config type".to_string()));
1270 }
1271
1272 let base_ms = update.data.get("base_ms").and_then(|v| v.as_u64()).unwrap_or(50);
1274
1275 let jitter_ms = update.data.get("jitter_ms").and_then(|v| v.as_u64()).unwrap_or(20);
1276
1277 let tag_overrides = update
1278 .data
1279 .get("tag_overrides")
1280 .and_then(|v| v.as_object())
1281 .map(|obj| obj.iter().filter_map(|(k, v)| v.as_u64().map(|val| (k.clone(), val))).collect())
1282 .unwrap_or_default();
1283
1284 state.update_latency_config(base_ms, jitter_ms, tag_overrides).await;
1286
1287 tracing::info!("Updated latency profile: base_ms={}, jitter_ms={}", base_ms, jitter_ms);
1288
1289 Json(ApiResponse::success("Latency profile updated".to_string()))
1290}
1291
1292pub async fn update_faults(
1294 State(state): State<AdminState>,
1295 Json(update): Json<ConfigUpdate>,
1296) -> Json<ApiResponse<String>> {
1297 if update.config_type != "faults" {
1298 return Json(ApiResponse::error("Invalid config type".to_string()));
1299 }
1300
1301 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1303
1304 let failure_rate = update.data.get("failure_rate").and_then(|v| v.as_f64()).unwrap_or(0.0);
1305
1306 let status_codes = update
1307 .data
1308 .get("status_codes")
1309 .and_then(|v| v.as_array())
1310 .map(|arr| arr.iter().filter_map(|v| v.as_u64().map(|n| n as u16)).collect())
1311 .unwrap_or_else(|| vec![500, 502, 503]);
1312
1313 state.update_fault_config(enabled, failure_rate, status_codes).await;
1315
1316 tracing::info!(
1317 "Updated fault configuration: enabled={}, failure_rate={}",
1318 enabled,
1319 failure_rate
1320 );
1321
1322 Json(ApiResponse::success("Fault configuration updated".to_string()))
1323}
1324
1325pub async fn update_proxy(
1327 State(state): State<AdminState>,
1328 Json(update): Json<ConfigUpdate>,
1329) -> Json<ApiResponse<String>> {
1330 if update.config_type != "proxy" {
1331 return Json(ApiResponse::error("Invalid config type".to_string()));
1332 }
1333
1334 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1336
1337 let upstream_url =
1338 update.data.get("upstream_url").and_then(|v| v.as_str()).map(|s| s.to_string());
1339
1340 let timeout_seconds = update.data.get("timeout_seconds").and_then(|v| v.as_u64()).unwrap_or(30);
1341
1342 state.update_proxy_config(enabled, upstream_url.clone(), timeout_seconds).await;
1344
1345 tracing::info!(
1346 "Updated proxy configuration: enabled={}, upstream_url={:?}",
1347 enabled,
1348 upstream_url
1349 );
1350
1351 Json(ApiResponse::success("Proxy configuration updated".to_string()))
1352}
1353
1354pub async fn clear_logs(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1356 state.clear_logs().await;
1358 tracing::info!("Cleared all request logs");
1359
1360 Json(ApiResponse::success("Logs cleared".to_string()))
1361}
1362
1363pub async fn restart_servers(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1365 let current_status = state.get_restart_status().await;
1367 if current_status.in_progress {
1368 return Json(ApiResponse::error("Server restart already in progress".to_string()));
1369 }
1370
1371 if let Err(e) = state
1373 .initiate_restart("Manual restart requested via admin UI".to_string())
1374 .await
1375 {
1376 return Json(ApiResponse::error(format!("Failed to initiate restart: {}", e)));
1377 }
1378
1379 let state_clone = state.clone();
1381 tokio::spawn(async move {
1382 if let Err(e) = perform_server_restart(&state_clone).await {
1383 tracing::error!("Server restart failed: {}", e);
1384 state_clone.complete_restart(false).await;
1385 } else {
1386 tracing::info!("Server restart completed successfully");
1387 state_clone.complete_restart(true).await;
1388 }
1389 });
1390
1391 tracing::info!("Server restart initiated via admin UI");
1392 Json(ApiResponse::success(
1393 "Server restart initiated. Please wait for completion.".to_string(),
1394 ))
1395}
1396
1397async fn perform_server_restart(_state: &AdminState) -> Result<()> {
1399 let current_pid = std::process::id();
1401 tracing::info!("Initiating restart for process PID: {}", current_pid);
1402
1403 let parent_pid = get_parent_process_id(current_pid).await?;
1405 tracing::info!("Found parent process PID: {}", parent_pid);
1406
1407 if let Ok(()) = restart_via_parent_signal(parent_pid).await {
1409 tracing::info!("Restart initiated via parent process signal");
1410 return Ok(());
1411 }
1412
1413 if let Ok(()) = restart_via_process_replacement().await {
1415 tracing::info!("Restart initiated via process replacement");
1416 return Ok(());
1417 }
1418
1419 restart_via_script().await
1421}
1422
1423async fn get_parent_process_id(pid: u32) -> Result<u32> {
1425 #[cfg(target_os = "linux")]
1427 {
1428 let stat_path = format!("/proc/{}/stat", pid);
1430 if let Ok(ppid) = tokio::task::spawn_blocking(move || -> Result<u32> {
1431 let content = std::fs::read_to_string(&stat_path)
1432 .map_err(|e| Error::generic(format!("Failed to read {}: {}", stat_path, e)))?;
1433
1434 let fields: Vec<&str> = content.split_whitespace().collect();
1435 if fields.len() > 3 {
1436 fields[3]
1437 .parse::<u32>()
1438 .map_err(|e| Error::generic(format!("Failed to parse PPID: {}", e)))
1439 } else {
1440 Err(Error::generic("Insufficient fields in /proc/pid/stat".to_string()))
1441 }
1442 })
1443 .await
1444 {
1445 return ppid;
1446 }
1447 }
1448
1449 Ok(1) }
1452
1453async fn restart_via_parent_signal(parent_pid: u32) -> Result<()> {
1455 #[cfg(unix)]
1456 {
1457 use std::process::Command;
1458
1459 let output = Command::new("kill")
1461 .args(["-TERM", &parent_pid.to_string()])
1462 .output()
1463 .map_err(|e| Error::generic(format!("Failed to send signal: {}", e)))?;
1464
1465 if !output.status.success() {
1466 return Err(Error::generic(
1467 "Failed to send restart signal to parent process".to_string(),
1468 ));
1469 }
1470
1471 tokio::time::sleep(Duration::from_millis(100)).await;
1473 Ok(())
1474 }
1475
1476 #[cfg(not(unix))]
1477 {
1478 Err(Error::generic(
1479 "Signal-based restart not supported on this platform".to_string(),
1480 ))
1481 }
1482}
1483
1484async fn restart_via_process_replacement() -> Result<()> {
1486 let current_exe = std::env::current_exe()
1488 .map_err(|e| Error::generic(format!("Failed to get current executable: {}", e)))?;
1489
1490 let args: Vec<String> = std::env::args().collect();
1492
1493 tracing::info!("Restarting with command: {:?}", args);
1494
1495 let mut child = Command::new(¤t_exe)
1497 .args(&args[1..]) .stdout(Stdio::inherit())
1499 .stderr(Stdio::inherit())
1500 .spawn()
1501 .map_err(|e| Error::generic(format!("Failed to start new process: {}", e)))?;
1502
1503 tokio::time::sleep(Duration::from_millis(500)).await;
1505
1506 match child.try_wait() {
1508 Ok(Some(status)) => {
1509 if status.success() {
1510 tracing::info!("New process started successfully");
1511 Ok(())
1512 } else {
1513 Err(Error::generic("New process exited with error".to_string()))
1514 }
1515 }
1516 Ok(None) => {
1517 tracing::info!("New process is running, exiting current process");
1518 std::process::exit(0);
1520 }
1521 Err(e) => Err(Error::generic(format!("Failed to check new process status: {}", e))),
1522 }
1523}
1524
1525async fn restart_via_script() -> Result<()> {
1527 let script_paths = ["./scripts/restart.sh", "./restart.sh", "restart.sh"];
1529
1530 for script_path in &script_paths {
1531 if std::path::Path::new(script_path).exists() {
1532 tracing::info!("Using restart script: {}", script_path);
1533
1534 let output = Command::new("bash")
1535 .arg(script_path)
1536 .output()
1537 .map_err(|e| Error::generic(format!("Failed to execute restart script: {}", e)))?;
1538
1539 if output.status.success() {
1540 return Ok(());
1541 } else {
1542 tracing::warn!(
1543 "Restart script failed: {}",
1544 String::from_utf8_lossy(&output.stderr)
1545 );
1546 }
1547 }
1548 }
1549
1550 let clear_script = "./scripts/clear-ports.sh";
1552 if std::path::Path::new(clear_script).exists() {
1553 tracing::info!("Using clear-ports script as fallback");
1554
1555 let _ = Command::new("bash").arg(clear_script).output();
1556 }
1557
1558 Err(Error::generic(
1559 "No restart mechanism available. Please restart manually.".to_string(),
1560 ))
1561}
1562
1563pub async fn get_restart_status(
1565 State(state): State<AdminState>,
1566) -> Json<ApiResponse<RestartStatus>> {
1567 let status = state.get_restart_status().await;
1568 Json(ApiResponse::success(status))
1569}
1570
1571pub async fn get_config(State(state): State<AdminState>) -> Json<ApiResponse<serde_json::Value>> {
1573 let config_state = state.get_config().await;
1574
1575 let config = json!({
1576 "latency": {
1577 "enabled": true,
1578 "base_ms": config_state.latency_profile.base_ms,
1579 "jitter_ms": config_state.latency_profile.jitter_ms,
1580 "tag_overrides": config_state.latency_profile.tag_overrides
1581 },
1582 "faults": {
1583 "enabled": config_state.fault_config.enabled,
1584 "failure_rate": config_state.fault_config.failure_rate,
1585 "status_codes": config_state.fault_config.status_codes
1586 },
1587 "proxy": {
1588 "enabled": config_state.proxy_config.enabled,
1589 "upstream_url": config_state.proxy_config.upstream_url,
1590 "timeout_seconds": config_state.proxy_config.timeout_seconds
1591 },
1592 "validation": {
1593 "mode": config_state.validation_settings.mode,
1594 "aggregate_errors": config_state.validation_settings.aggregate_errors,
1595 "validate_responses": config_state.validation_settings.validate_responses,
1596 "overrides": config_state.validation_settings.overrides
1597 }
1598 });
1599
1600 Json(ApiResponse::success(config))
1601}
1602
1603pub fn count_fixtures() -> Result<usize> {
1605 let fixtures_dir =
1607 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1608 let fixtures_path = std::path::Path::new(&fixtures_dir);
1609
1610 if !fixtures_path.exists() {
1611 return Ok(0);
1612 }
1613
1614 let mut total_count = 0;
1615
1616 let http_fixtures_path = fixtures_path.join("http");
1618 if http_fixtures_path.exists() {
1619 total_count += count_fixtures_in_directory(&http_fixtures_path)?;
1620 }
1621
1622 let ws_fixtures_path = fixtures_path.join("websocket");
1624 if ws_fixtures_path.exists() {
1625 total_count += count_fixtures_in_directory(&ws_fixtures_path)?;
1626 }
1627
1628 let grpc_fixtures_path = fixtures_path.join("grpc");
1630 if grpc_fixtures_path.exists() {
1631 total_count += count_fixtures_in_directory(&grpc_fixtures_path)?;
1632 }
1633
1634 Ok(total_count)
1635}
1636
1637fn count_fixtures_in_directory(dir_path: &std::path::Path) -> Result<usize> {
1639 let mut count = 0;
1640
1641 if let Ok(entries) = std::fs::read_dir(dir_path) {
1642 for entry in entries {
1643 let entry = entry
1644 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1645 let path = entry.path();
1646
1647 if path.is_dir() {
1648 count += count_fixtures_in_directory(&path)?;
1650 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1651 count += 1;
1653 }
1654 }
1655 }
1656
1657 Ok(count)
1658}
1659
1660pub fn route_has_fixtures(method: &str, path: &str) -> bool {
1662 let fixtures_dir =
1664 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1665 let fixtures_path = std::path::Path::new(&fixtures_dir);
1666
1667 if !fixtures_path.exists() {
1668 return false;
1669 }
1670
1671 let method_lower = method.to_lowercase();
1673 let path_hash = path.replace(['/', ':'], "_");
1674 let http_fixtures_path = fixtures_path.join("http").join(&method_lower).join(&path_hash);
1675
1676 if http_fixtures_path.exists() {
1677 if let Ok(entries) = std::fs::read_dir(&http_fixtures_path) {
1679 for entry in entries.flatten() {
1680 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1681 return true;
1682 }
1683 }
1684 }
1685 }
1686
1687 if method.to_uppercase() == "WS" {
1689 let ws_fixtures_path = fixtures_path.join("websocket").join(&path_hash);
1690
1691 if ws_fixtures_path.exists() {
1692 if let Ok(entries) = std::fs::read_dir(&ws_fixtures_path) {
1693 for entry in entries.flatten() {
1694 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1695 return true;
1696 }
1697 }
1698 }
1699 }
1700 }
1701
1702 false
1703}
1704
1705fn calculate_endpoint_latency(metrics: &RequestMetrics, endpoint: &str) -> Option<u64> {
1707 metrics.response_times_by_endpoint.get(endpoint).and_then(|times| {
1708 if times.is_empty() {
1709 None
1710 } else {
1711 let sum: u64 = times.iter().sum();
1712 Some(sum / times.len() as u64)
1713 }
1714 })
1715}
1716
1717fn get_endpoint_last_request(
1719 metrics: &RequestMetrics,
1720 endpoint: &str,
1721) -> Option<chrono::DateTime<chrono::Utc>> {
1722 metrics.last_request_by_endpoint.get(endpoint).copied()
1723}
1724
1725fn count_requests_by_server_type(metrics: &RequestMetrics, server_type: &str) -> u64 {
1727 match server_type {
1728 "HTTP" => {
1729 metrics
1731 .requests_by_endpoint
1732 .iter()
1733 .filter(|(endpoint, _)| {
1734 let method = endpoint.split(' ').next().unwrap_or("");
1735 matches!(
1736 method,
1737 "GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "HEAD" | "OPTIONS"
1738 )
1739 })
1740 .map(|(_, count)| count)
1741 .sum()
1742 }
1743 "WebSocket" => {
1744 metrics
1746 .requests_by_endpoint
1747 .iter()
1748 .filter(|(endpoint, _)| {
1749 let method = endpoint.split(' ').next().unwrap_or("");
1750 method == "WS"
1751 })
1752 .map(|(_, count)| count)
1753 .sum()
1754 }
1755 "gRPC" => {
1756 metrics
1758 .requests_by_endpoint
1759 .iter()
1760 .filter(|(endpoint, _)| {
1761 let method = endpoint.split(' ').next().unwrap_or("");
1762 method == "gRPC"
1763 })
1764 .map(|(_, count)| count)
1765 .sum()
1766 }
1767 _ => 0,
1768 }
1769}
1770
1771pub async fn get_fixtures() -> Json<ApiResponse<Vec<FixtureInfo>>> {
1773 match scan_fixtures_directory() {
1774 Ok(fixtures) => Json(ApiResponse::success(fixtures)),
1775 Err(e) => {
1776 tracing::error!("Failed to scan fixtures directory: {}", e);
1777 Json(ApiResponse::error(format!("Failed to load fixtures: {}", e)))
1778 }
1779 }
1780}
1781
1782fn scan_fixtures_directory() -> Result<Vec<FixtureInfo>> {
1784 let fixtures_dir =
1785 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1786 let fixtures_path = std::path::Path::new(&fixtures_dir);
1787
1788 if !fixtures_path.exists() {
1789 tracing::warn!("Fixtures directory does not exist: {}", fixtures_dir);
1790 return Ok(Vec::new());
1791 }
1792
1793 let mut all_fixtures = Vec::new();
1794
1795 let http_fixtures = scan_protocol_fixtures(fixtures_path, "http")?;
1797 all_fixtures.extend(http_fixtures);
1798
1799 let ws_fixtures = scan_protocol_fixtures(fixtures_path, "websocket")?;
1801 all_fixtures.extend(ws_fixtures);
1802
1803 let grpc_fixtures = scan_protocol_fixtures(fixtures_path, "grpc")?;
1805 all_fixtures.extend(grpc_fixtures);
1806
1807 all_fixtures.sort_by(|a, b| b.saved_at.cmp(&a.saved_at));
1809
1810 tracing::info!("Found {} fixtures in directory: {}", all_fixtures.len(), fixtures_dir);
1811 Ok(all_fixtures)
1812}
1813
1814fn scan_protocol_fixtures(
1816 fixtures_path: &std::path::Path,
1817 protocol: &str,
1818) -> Result<Vec<FixtureInfo>> {
1819 let protocol_path = fixtures_path.join(protocol);
1820 let mut fixtures = Vec::new();
1821
1822 if !protocol_path.exists() {
1823 return Ok(fixtures);
1824 }
1825
1826 if let Ok(entries) = std::fs::read_dir(&protocol_path) {
1828 for entry in entries {
1829 let entry = entry
1830 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1831 let path = entry.path();
1832
1833 if path.is_dir() {
1834 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
1836 fixtures.extend(sub_fixtures);
1837 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1838 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
1840 fixtures.push(fixture);
1841 }
1842 }
1843 }
1844 }
1845
1846 Ok(fixtures)
1847}
1848
1849fn scan_directory_recursive(
1851 dir_path: &std::path::Path,
1852 protocol: &str,
1853) -> Result<Vec<FixtureInfo>> {
1854 let mut fixtures = Vec::new();
1855
1856 if let Ok(entries) = std::fs::read_dir(dir_path) {
1857 for entry in entries {
1858 let entry = entry
1859 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1860 let path = entry.path();
1861
1862 if path.is_dir() {
1863 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
1865 fixtures.extend(sub_fixtures);
1866 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1867 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
1869 fixtures.push(fixture);
1870 }
1871 }
1872 }
1873 }
1874
1875 Ok(fixtures)
1876}
1877
1878fn parse_fixture_file_sync(file_path: &std::path::Path, protocol: &str) -> Result<FixtureInfo> {
1880 let metadata = std::fs::metadata(file_path)
1882 .map_err(|e| Error::generic(format!("Failed to read file metadata: {}", e)))?;
1883
1884 let file_size = metadata.len();
1885 let modified_time = metadata
1886 .modified()
1887 .map_err(|e| Error::generic(format!("Failed to get file modification time: {}", e)))?;
1888
1889 let saved_at = chrono::DateTime::from(modified_time);
1890
1891 let content = std::fs::read_to_string(file_path)
1893 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
1894
1895 let fixture_data: serde_json::Value = serde_json::from_str(&content)
1896 .map_err(|e| Error::generic(format!("Failed to parse fixture JSON: {}", e)))?;
1897
1898 let (method, path) = extract_method_and_path(&fixture_data, protocol)?;
1900
1901 let id = generate_fixture_id(file_path, &content);
1903
1904 let fingerprint = extract_fingerprint(file_path, &fixture_data)?;
1906
1907 let fixtures_dir =
1909 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1910 let fixtures_path = std::path::Path::new(&fixtures_dir);
1911 let file_path_str = file_path
1912 .strip_prefix(fixtures_path)
1913 .unwrap_or(file_path)
1914 .to_string_lossy()
1915 .to_string();
1916
1917 Ok(FixtureInfo {
1918 id,
1919 protocol: protocol.to_string(),
1920 method,
1921 path,
1922 saved_at,
1923 file_size,
1924 file_path: file_path_str,
1925 fingerprint,
1926 metadata: fixture_data,
1927 })
1928}
1929
1930fn extract_method_and_path(
1932 fixture_data: &serde_json::Value,
1933 protocol: &str,
1934) -> Result<(String, String)> {
1935 match protocol {
1936 "http" => {
1937 let method = fixture_data
1939 .get("request")
1940 .and_then(|req| req.get("method"))
1941 .and_then(|m| m.as_str())
1942 .unwrap_or("UNKNOWN")
1943 .to_uppercase();
1944
1945 let path = fixture_data
1946 .get("request")
1947 .and_then(|req| req.get("path"))
1948 .and_then(|p| p.as_str())
1949 .unwrap_or("/unknown")
1950 .to_string();
1951
1952 Ok((method, path))
1953 }
1954 "websocket" => {
1955 let path = fixture_data
1957 .get("path")
1958 .and_then(|p| p.as_str())
1959 .or_else(|| {
1960 fixture_data
1961 .get("request")
1962 .and_then(|req| req.get("path"))
1963 .and_then(|p| p.as_str())
1964 })
1965 .unwrap_or("/ws")
1966 .to_string();
1967
1968 Ok(("WS".to_string(), path))
1969 }
1970 "grpc" => {
1971 let service =
1973 fixture_data.get("service").and_then(|s| s.as_str()).unwrap_or("UnknownService");
1974
1975 let method =
1976 fixture_data.get("method").and_then(|m| m.as_str()).unwrap_or("UnknownMethod");
1977
1978 let path = format!("/{}/{}", service, method);
1979 Ok(("gRPC".to_string(), path))
1980 }
1981 _ => {
1982 let path = fixture_data
1983 .get("path")
1984 .and_then(|p| p.as_str())
1985 .unwrap_or("/unknown")
1986 .to_string();
1987 Ok((protocol.to_uppercase(), path))
1988 }
1989 }
1990}
1991
1992fn generate_fixture_id(file_path: &std::path::Path, content: &str) -> String {
1994 use std::collections::hash_map::DefaultHasher;
1995 use std::hash::{Hash, Hasher};
1996
1997 let mut hasher = DefaultHasher::new();
1998 file_path.hash(&mut hasher);
1999 content.hash(&mut hasher);
2000 format!("fixture_{:x}", hasher.finish())
2001}
2002
2003fn extract_fingerprint(
2005 file_path: &std::path::Path,
2006 fixture_data: &serde_json::Value,
2007) -> Result<String> {
2008 if let Some(fingerprint) = fixture_data.get("fingerprint").and_then(|f| f.as_str()) {
2010 return Ok(fingerprint.to_string());
2011 }
2012
2013 if let Some(file_name) = file_path.file_stem().and_then(|s| s.to_str()) {
2015 if let Some(hash) = file_name.split('_').next_back() {
2017 if hash.len() >= 8 && hash.chars().all(|c| c.is_alphanumeric()) {
2018 return Ok(hash.to_string());
2019 }
2020 }
2021 }
2022
2023 use std::collections::hash_map::DefaultHasher;
2025 use std::hash::{Hash, Hasher};
2026
2027 let mut hasher = DefaultHasher::new();
2028 file_path.hash(&mut hasher);
2029 Ok(format!("{:x}", hasher.finish()))
2030}
2031
2032pub async fn delete_fixture(
2034 Json(payload): Json<FixtureDeleteRequest>,
2035) -> Json<ApiResponse<String>> {
2036 match delete_fixture_by_id(&payload.fixture_id).await {
2037 Ok(_) => {
2038 tracing::info!("Successfully deleted fixture: {}", payload.fixture_id);
2039 Json(ApiResponse::success("Fixture deleted successfully".to_string()))
2040 }
2041 Err(e) => {
2042 tracing::error!("Failed to delete fixture {}: {}", payload.fixture_id, e);
2043 Json(ApiResponse::error(format!("Failed to delete fixture: {}", e)))
2044 }
2045 }
2046}
2047
2048pub async fn delete_fixtures_bulk(
2050 Json(payload): Json<FixtureBulkDeleteRequest>,
2051) -> Json<ApiResponse<FixtureBulkDeleteResult>> {
2052 let mut deleted_count = 0;
2053 let mut errors = Vec::new();
2054
2055 for fixture_id in &payload.fixture_ids {
2056 match delete_fixture_by_id(fixture_id).await {
2057 Ok(_) => {
2058 deleted_count += 1;
2059 tracing::info!("Successfully deleted fixture: {}", fixture_id);
2060 }
2061 Err(e) => {
2062 errors.push(format!("Failed to delete {}: {}", fixture_id, e));
2063 tracing::error!("Failed to delete fixture {}: {}", fixture_id, e);
2064 }
2065 }
2066 }
2067
2068 let result = FixtureBulkDeleteResult {
2069 deleted_count,
2070 total_requested: payload.fixture_ids.len(),
2071 errors: errors.clone(),
2072 };
2073
2074 if errors.is_empty() {
2075 Json(ApiResponse::success(result))
2076 } else {
2077 Json(ApiResponse::error(format!(
2078 "Partial success: {} deleted, {} errors",
2079 deleted_count,
2080 errors.len()
2081 )))
2082 }
2083}
2084
2085async fn delete_fixture_by_id(fixture_id: &str) -> Result<()> {
2087 let fixtures_dir =
2090 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2091 let fixtures_path = std::path::Path::new(&fixtures_dir);
2092
2093 if !fixtures_path.exists() {
2094 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2095 }
2096
2097 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2099
2100 let file_path_clone = file_path.clone();
2102 tokio::task::spawn_blocking(move || {
2103 if file_path_clone.exists() {
2104 std::fs::remove_file(&file_path_clone).map_err(|e| {
2105 Error::generic(format!(
2106 "Failed to delete fixture file {}: {}",
2107 file_path_clone.display(),
2108 e
2109 ))
2110 })
2111 } else {
2112 Err(Error::generic(format!("Fixture file not found: {}", file_path_clone.display())))
2113 }
2114 })
2115 .await
2116 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2117
2118 tracing::info!("Deleted fixture file: {}", file_path.display());
2119
2120 cleanup_empty_directories(&file_path).await;
2122
2123 Ok(())
2124}
2125
2126fn find_fixture_file_by_id(
2128 fixtures_path: &std::path::Path,
2129 fixture_id: &str,
2130) -> Result<std::path::PathBuf> {
2131 let protocols = ["http", "websocket", "grpc"];
2133
2134 for protocol in &protocols {
2135 let protocol_path = fixtures_path.join(protocol);
2136 if let Ok(found_path) = search_fixture_in_directory(&protocol_path, fixture_id) {
2137 return Ok(found_path);
2138 }
2139 }
2140
2141 Err(Error::generic(format!(
2142 "Fixture with ID '{}' not found in any protocol directory",
2143 fixture_id
2144 )))
2145}
2146
2147fn search_fixture_in_directory(
2149 dir_path: &std::path::Path,
2150 fixture_id: &str,
2151) -> Result<std::path::PathBuf> {
2152 if !dir_path.exists() {
2153 return Err(Error::generic(format!("Directory does not exist: {}", dir_path.display())));
2154 }
2155
2156 if let Ok(entries) = std::fs::read_dir(dir_path) {
2157 for entry in entries {
2158 let entry = entry
2159 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2160 let path = entry.path();
2161
2162 if path.is_dir() {
2163 if let Ok(found_path) = search_fixture_in_directory(&path, fixture_id) {
2165 return Ok(found_path);
2166 }
2167 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2168 if let Ok(fixture_info) = parse_fixture_file_sync(&path, "unknown") {
2170 if fixture_info.id == fixture_id {
2171 return Ok(path);
2172 }
2173 }
2174 }
2175 }
2176 }
2177
2178 Err(Error::generic(format!(
2179 "Fixture not found in directory: {}",
2180 dir_path.display()
2181 )))
2182}
2183
2184async fn cleanup_empty_directories(file_path: &std::path::Path) {
2186 let file_path = file_path.to_path_buf();
2187
2188 let _ = tokio::task::spawn_blocking(move || {
2190 if let Some(parent) = file_path.parent() {
2191 let mut current = parent;
2193 let fixtures_dir =
2194 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2195 let fixtures_path = std::path::Path::new(&fixtures_dir);
2196
2197 while current != fixtures_path && current.parent().is_some() {
2198 if let Ok(entries) = std::fs::read_dir(current) {
2199 if entries.count() == 0 {
2200 if let Err(e) = std::fs::remove_dir(current) {
2201 tracing::debug!(
2202 "Failed to remove empty directory {}: {}",
2203 current.display(),
2204 e
2205 );
2206 break;
2207 } else {
2208 tracing::debug!("Removed empty directory: {}", current.display());
2209 }
2210 } else {
2211 break;
2212 }
2213 } else {
2214 break;
2215 }
2216
2217 if let Some(next_parent) = current.parent() {
2218 current = next_parent;
2219 } else {
2220 break;
2221 }
2222 }
2223 }
2224 })
2225 .await;
2226}
2227
2228pub async fn download_fixture(Query(params): Query<HashMap<String, String>>) -> impl IntoResponse {
2230 let fixture_id = match params.get("id") {
2232 Some(id) => id,
2233 None => {
2234 return (
2235 http::StatusCode::BAD_REQUEST,
2236 [(http::header::CONTENT_TYPE, "application/json")],
2237 r#"{"error": "Missing fixture ID parameter"}"#,
2238 )
2239 .into_response();
2240 }
2241 };
2242
2243 match download_fixture_by_id(fixture_id).await {
2245 Ok((content, file_name)) => (
2246 http::StatusCode::OK,
2247 [
2248 (http::header::CONTENT_TYPE, "application/json".to_string()),
2249 (
2250 http::header::CONTENT_DISPOSITION,
2251 format!("attachment; filename=\"{}\"", file_name),
2252 ),
2253 ],
2254 content,
2255 )
2256 .into_response(),
2257 Err(e) => {
2258 tracing::error!("Failed to download fixture {}: {}", fixture_id, e);
2259 let error_response = format!(r#"{{"error": "Failed to download fixture: {}"}}"#, e);
2260 (
2261 http::StatusCode::NOT_FOUND,
2262 [(http::header::CONTENT_TYPE, "application/json".to_string())],
2263 error_response,
2264 )
2265 .into_response()
2266 }
2267 }
2268}
2269
2270async fn download_fixture_by_id(fixture_id: &str) -> Result<(String, String)> {
2272 let fixtures_dir =
2274 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2275 let fixtures_path = std::path::Path::new(&fixtures_dir);
2276
2277 if !fixtures_path.exists() {
2278 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2279 }
2280
2281 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2282
2283 let file_path_clone = file_path.clone();
2285 let (content, file_name) = tokio::task::spawn_blocking(move || {
2286 let content = std::fs::read_to_string(&file_path_clone)
2287 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2288
2289 let file_name = file_path_clone
2290 .file_name()
2291 .and_then(|name| name.to_str())
2292 .unwrap_or("fixture.json")
2293 .to_string();
2294
2295 Ok::<_, Error>((content, file_name))
2296 })
2297 .await
2298 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2299
2300 tracing::info!("Downloaded fixture file: {} ({} bytes)", file_path.display(), content.len());
2301 Ok((content, file_name))
2302}
2303
2304pub async fn rename_fixture(
2306 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2307 Json(payload): Json<FixtureRenameRequest>,
2308) -> Json<ApiResponse<String>> {
2309 match rename_fixture_by_id(&fixture_id, &payload.new_name).await {
2310 Ok(new_path) => {
2311 tracing::info!("Successfully renamed fixture: {} -> {}", fixture_id, payload.new_name);
2312 Json(ApiResponse::success(format!("Fixture renamed successfully to: {}", new_path)))
2313 }
2314 Err(e) => {
2315 tracing::error!("Failed to rename fixture {}: {}", fixture_id, e);
2316 Json(ApiResponse::error(format!("Failed to rename fixture: {}", e)))
2317 }
2318 }
2319}
2320
2321async fn rename_fixture_by_id(fixture_id: &str, new_name: &str) -> Result<String> {
2323 if new_name.is_empty() {
2325 return Err(Error::generic("New name cannot be empty".to_string()));
2326 }
2327
2328 let new_name = if new_name.ends_with(".json") {
2330 new_name.to_string()
2331 } else {
2332 format!("{}.json", new_name)
2333 };
2334
2335 let fixtures_dir =
2337 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2338 let fixtures_path = std::path::Path::new(&fixtures_dir);
2339
2340 if !fixtures_path.exists() {
2341 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2342 }
2343
2344 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2345
2346 let parent = old_path
2348 .parent()
2349 .ok_or_else(|| Error::generic("Could not determine parent directory".to_string()))?;
2350
2351 let new_path = parent.join(&new_name);
2352
2353 if new_path.exists() {
2355 return Err(Error::generic(format!(
2356 "A fixture with name '{}' already exists in the same directory",
2357 new_name
2358 )));
2359 }
2360
2361 let old_path_clone = old_path.clone();
2363 let new_path_clone = new_path.clone();
2364 tokio::task::spawn_blocking(move || {
2365 std::fs::rename(&old_path_clone, &new_path_clone)
2366 .map_err(|e| Error::generic(format!("Failed to rename fixture file: {}", e)))
2367 })
2368 .await
2369 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2370
2371 tracing::info!("Renamed fixture file: {} -> {}", old_path.display(), new_path.display());
2372
2373 Ok(new_path
2375 .strip_prefix(fixtures_path)
2376 .unwrap_or(&new_path)
2377 .to_string_lossy()
2378 .to_string())
2379}
2380
2381pub async fn move_fixture(
2383 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2384 Json(payload): Json<FixtureMoveRequest>,
2385) -> Json<ApiResponse<String>> {
2386 match move_fixture_by_id(&fixture_id, &payload.new_path).await {
2387 Ok(new_location) => {
2388 tracing::info!("Successfully moved fixture: {} -> {}", fixture_id, payload.new_path);
2389 Json(ApiResponse::success(format!("Fixture moved successfully to: {}", new_location)))
2390 }
2391 Err(e) => {
2392 tracing::error!("Failed to move fixture {}: {}", fixture_id, e);
2393 Json(ApiResponse::error(format!("Failed to move fixture: {}", e)))
2394 }
2395 }
2396}
2397
2398async fn move_fixture_by_id(fixture_id: &str, new_path: &str) -> Result<String> {
2400 if new_path.is_empty() {
2402 return Err(Error::generic("New path cannot be empty".to_string()));
2403 }
2404
2405 let fixtures_dir =
2407 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2408 let fixtures_path = std::path::Path::new(&fixtures_dir);
2409
2410 if !fixtures_path.exists() {
2411 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2412 }
2413
2414 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2415
2416 let new_full_path = if new_path.starts_with('/') {
2418 fixtures_path.join(new_path.trim_start_matches('/'))
2420 } else {
2421 fixtures_path.join(new_path)
2423 };
2424
2425 let new_full_path = if new_full_path.extension().and_then(|s| s.to_str()) == Some("json") {
2427 new_full_path
2428 } else {
2429 if new_full_path.is_dir() || !new_path.contains('.') {
2431 let file_name = old_path.file_name().ok_or_else(|| {
2432 Error::generic("Could not determine original file name".to_string())
2433 })?;
2434 new_full_path.join(file_name)
2435 } else {
2436 new_full_path.with_extension("json")
2437 }
2438 };
2439
2440 if new_full_path.exists() {
2442 return Err(Error::generic(format!(
2443 "A fixture already exists at path: {}",
2444 new_full_path.display()
2445 )));
2446 }
2447
2448 let old_path_clone = old_path.clone();
2450 let new_full_path_clone = new_full_path.clone();
2451 tokio::task::spawn_blocking(move || {
2452 if let Some(parent) = new_full_path_clone.parent() {
2454 std::fs::create_dir_all(parent)
2455 .map_err(|e| Error::generic(format!("Failed to create target directory: {}", e)))?;
2456 }
2457
2458 std::fs::rename(&old_path_clone, &new_full_path_clone)
2460 .map_err(|e| Error::generic(format!("Failed to move fixture file: {}", e)))
2461 })
2462 .await
2463 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2464
2465 tracing::info!("Moved fixture file: {} -> {}", old_path.display(), new_full_path.display());
2466
2467 cleanup_empty_directories(&old_path).await;
2469
2470 Ok(new_full_path
2472 .strip_prefix(fixtures_path)
2473 .unwrap_or(&new_full_path)
2474 .to_string_lossy()
2475 .to_string())
2476}
2477
2478pub async fn get_validation(
2480 State(state): State<AdminState>,
2481) -> Json<ApiResponse<ValidationSettings>> {
2482 let config_state = state.get_config().await;
2484
2485 Json(ApiResponse::success(config_state.validation_settings))
2486}
2487
2488pub async fn update_validation(
2490 State(state): State<AdminState>,
2491 Json(update): Json<ValidationUpdate>,
2492) -> Json<ApiResponse<String>> {
2493 match update.mode.as_str() {
2495 "enforce" | "warn" | "off" => {}
2496 _ => {
2497 return Json(ApiResponse::error(
2498 "Invalid validation mode. Must be 'enforce', 'warn', or 'off'".to_string(),
2499 ))
2500 }
2501 }
2502
2503 let mode = update.mode.clone();
2505 state
2506 .update_validation_config(
2507 update.mode,
2508 update.aggregate_errors,
2509 update.validate_responses,
2510 update.overrides.unwrap_or_default(),
2511 )
2512 .await;
2513
2514 tracing::info!(
2515 "Updated validation settings: mode={}, aggregate_errors={}",
2516 mode,
2517 update.aggregate_errors
2518 );
2519
2520 Json(ApiResponse::success("Validation settings updated".to_string()))
2521}
2522
2523pub async fn get_env_vars() -> Json<ApiResponse<HashMap<String, String>>> {
2525 let mut env_vars = HashMap::new();
2527
2528 let relevant_vars = [
2529 "MOCKFORGE_LATENCY_ENABLED",
2531 "MOCKFORGE_FAILURES_ENABLED",
2532 "MOCKFORGE_PROXY_ENABLED",
2533 "MOCKFORGE_RECORD_ENABLED",
2534 "MOCKFORGE_REPLAY_ENABLED",
2535 "MOCKFORGE_LOG_LEVEL",
2536 "MOCKFORGE_CONFIG_FILE",
2537 "RUST_LOG",
2538 "MOCKFORGE_HTTP_PORT",
2540 "MOCKFORGE_HTTP_HOST",
2541 "MOCKFORGE_HTTP_OPENAPI_SPEC",
2542 "MOCKFORGE_CORS_ENABLED",
2543 "MOCKFORGE_REQUEST_TIMEOUT_SECS",
2544 "MOCKFORGE_WS_PORT",
2546 "MOCKFORGE_WS_HOST",
2547 "MOCKFORGE_WS_REPLAY_FILE",
2548 "MOCKFORGE_WS_CONNECTION_TIMEOUT_SECS",
2549 "MOCKFORGE_GRPC_PORT",
2551 "MOCKFORGE_GRPC_HOST",
2552 "MOCKFORGE_ADMIN_ENABLED",
2554 "MOCKFORGE_ADMIN_PORT",
2555 "MOCKFORGE_ADMIN_HOST",
2556 "MOCKFORGE_ADMIN_MOUNT_PATH",
2557 "MOCKFORGE_ADMIN_API_ENABLED",
2558 "MOCKFORGE_RESPONSE_TEMPLATE_EXPAND",
2560 "MOCKFORGE_REQUEST_VALIDATION",
2561 "MOCKFORGE_AGGREGATE_ERRORS",
2562 "MOCKFORGE_RESPONSE_VALIDATION",
2563 "MOCKFORGE_VALIDATION_STATUS",
2564 "MOCKFORGE_RAG_ENABLED",
2566 "MOCKFORGE_FAKE_TOKENS",
2567 "MOCKFORGE_FIXTURES_DIR",
2569 ];
2570
2571 for var_name in &relevant_vars {
2572 if let Ok(value) = std::env::var(var_name) {
2573 env_vars.insert(var_name.to_string(), value);
2574 }
2575 }
2576
2577 Json(ApiResponse::success(env_vars))
2578}
2579
2580pub async fn update_env_var(Json(update): Json<EnvVarUpdate>) -> Json<ApiResponse<String>> {
2582 std::env::set_var(&update.key, &update.value);
2584
2585 tracing::info!("Updated environment variable: {}={}", update.key, update.value);
2586
2587 Json(ApiResponse::success(format!(
2590 "Environment variable {} updated to '{}'. Note: This change is not persisted and will be lost on restart.",
2591 update.key, update.value
2592 )))
2593}
2594
2595pub async fn get_file_content(
2597 Json(request): Json<FileContentRequest>,
2598) -> Json<ApiResponse<String>> {
2599 if let Err(e) = validate_file_path(&request.file_path) {
2601 return Json(ApiResponse::error(format!("Invalid file path: {}", e)));
2602 }
2603
2604 match tokio::fs::read_to_string(&request.file_path).await {
2606 Ok(content) => {
2607 if let Err(e) = validate_file_content(&content) {
2609 return Json(ApiResponse::error(format!("Invalid file content: {}", e)));
2610 }
2611 Json(ApiResponse::success(content))
2612 }
2613 Err(e) => Json(ApiResponse::error(format!("Failed to read file: {}", e))),
2614 }
2615}
2616
2617pub async fn save_file_content(Json(request): Json<FileSaveRequest>) -> Json<ApiResponse<String>> {
2619 match save_file_to_filesystem(&request.file_path, &request.content).await {
2620 Ok(_) => {
2621 tracing::info!("Successfully saved file: {}", request.file_path);
2622 Json(ApiResponse::success("File saved successfully".to_string()))
2623 }
2624 Err(e) => {
2625 tracing::error!("Failed to save file {}: {}", request.file_path, e);
2626 Json(ApiResponse::error(format!("Failed to save file: {}", e)))
2627 }
2628 }
2629}
2630
2631async fn save_file_to_filesystem(file_path: &str, content: &str) -> Result<()> {
2633 validate_file_path(file_path)?;
2635
2636 validate_file_content(content)?;
2638
2639 let path = std::path::PathBuf::from(file_path);
2641 let content = content.to_string();
2642
2643 let path_clone = path.clone();
2645 let content_clone = content.clone();
2646 tokio::task::spawn_blocking(move || {
2647 if let Some(parent) = path_clone.parent() {
2649 std::fs::create_dir_all(parent).map_err(|e| {
2650 Error::generic(format!("Failed to create directory {}: {}", parent.display(), e))
2651 })?;
2652 }
2653
2654 std::fs::write(&path_clone, &content_clone).map_err(|e| {
2656 Error::generic(format!("Failed to write file {}: {}", path_clone.display(), e))
2657 })?;
2658
2659 let written_content = std::fs::read_to_string(&path_clone).map_err(|e| {
2661 Error::generic(format!("Failed to verify written file {}: {}", path_clone.display(), e))
2662 })?;
2663
2664 if written_content != content_clone {
2665 return Err(Error::generic(format!(
2666 "File content verification failed for {}",
2667 path_clone.display()
2668 )));
2669 }
2670
2671 Ok::<_, Error>(())
2672 })
2673 .await
2674 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2675
2676 tracing::info!("File saved successfully: {} ({} bytes)", path.display(), content.len());
2677 Ok(())
2678}
2679
2680fn validate_file_path(file_path: &str) -> Result<()> {
2682 if file_path.contains("..") {
2684 return Err(Error::generic("Path traversal detected in file path".to_string()));
2685 }
2686
2687 let path = std::path::Path::new(file_path);
2689 if path.is_absolute() {
2690 let allowed_dirs = [
2692 std::env::current_dir().unwrap_or_default(),
2693 std::path::PathBuf::from("."),
2694 std::path::PathBuf::from("fixtures"),
2695 std::path::PathBuf::from("config"),
2696 ];
2697
2698 let mut is_allowed = false;
2699 for allowed_dir in &allowed_dirs {
2700 if path.starts_with(allowed_dir) {
2701 is_allowed = true;
2702 break;
2703 }
2704 }
2705
2706 if !is_allowed {
2707 return Err(Error::generic("File path is outside allowed directories".to_string()));
2708 }
2709 }
2710
2711 let dangerous_extensions = ["exe", "bat", "cmd", "sh", "ps1", "scr", "com"];
2713 if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
2714 if dangerous_extensions.contains(&extension.to_lowercase().as_str()) {
2715 return Err(Error::generic(format!(
2716 "Dangerous file extension not allowed: {}",
2717 extension
2718 )));
2719 }
2720 }
2721
2722 Ok(())
2723}
2724
2725fn validate_file_content(content: &str) -> Result<()> {
2727 if content.len() > 10 * 1024 * 1024 {
2729 return Err(Error::generic("File content too large (max 10MB)".to_string()));
2731 }
2732
2733 if content.contains('\0') {
2735 return Err(Error::generic("File content contains null bytes".to_string()));
2736 }
2737
2738 Ok(())
2739}
2740
2741#[derive(Debug, Clone, Serialize, Deserialize)]
2743pub struct FixtureDeleteRequest {
2744 pub fixture_id: String,
2745}
2746
2747#[derive(Debug, Clone, Serialize, Deserialize)]
2749pub struct EnvVarUpdate {
2750 pub key: String,
2751 pub value: String,
2752}
2753
2754#[derive(Debug, Clone, Serialize, Deserialize)]
2756pub struct FixtureBulkDeleteRequest {
2757 pub fixture_ids: Vec<String>,
2758}
2759
2760#[derive(Debug, Clone, Serialize, Deserialize)]
2762pub struct FixtureBulkDeleteResult {
2763 pub deleted_count: usize,
2764 pub total_requested: usize,
2765 pub errors: Vec<String>,
2766}
2767
2768#[derive(Debug, Clone, Serialize, Deserialize)]
2770pub struct FixtureRenameRequest {
2771 pub new_name: String,
2772}
2773
2774#[derive(Debug, Clone, Serialize, Deserialize)]
2776pub struct FixtureMoveRequest {
2777 pub new_path: String,
2778}
2779
2780#[derive(Debug, Clone, Serialize, Deserialize)]
2782pub struct FileContentRequest {
2783 pub file_path: String,
2784 pub file_type: String,
2785}
2786
2787#[derive(Debug, Clone, Serialize, Deserialize)]
2789pub struct FileSaveRequest {
2790 pub file_path: String,
2791 pub content: String,
2792}
2793
2794pub async fn get_smoke_tests(
2796 State(state): State<AdminState>,
2797) -> Json<ApiResponse<Vec<SmokeTestResult>>> {
2798 let results = state.get_smoke_test_results().await;
2799 Json(ApiResponse::success(results))
2800}
2801
2802pub async fn run_smoke_tests_endpoint(
2804 State(state): State<AdminState>,
2805) -> Json<ApiResponse<String>> {
2806 tracing::info!("Starting smoke test execution");
2807
2808 let state_clone = state.clone();
2810 tokio::spawn(async move {
2811 if let Err(e) = execute_smoke_tests(&state_clone).await {
2812 tracing::error!("Smoke test execution failed: {}", e);
2813 } else {
2814 tracing::info!("Smoke test execution completed successfully");
2815 }
2816 });
2817
2818 Json(ApiResponse::success(
2819 "Smoke tests started. Check results in the smoke tests section.".to_string(),
2820 ))
2821}
2822
2823async fn execute_smoke_tests(state: &AdminState) -> Result<()> {
2825 let base_url =
2827 std::env::var("MOCKFORGE_BASE_URL").unwrap_or_else(|_| "http://localhost:3000".to_string());
2828
2829 let context = SmokeTestContext {
2830 base_url,
2831 timeout_seconds: 30,
2832 parallel: true,
2833 };
2834
2835 let fixtures = scan_fixtures_directory()?;
2837
2838 let http_fixtures: Vec<&FixtureInfo> =
2840 fixtures.iter().filter(|f| f.protocol == "http").collect();
2841
2842 if http_fixtures.is_empty() {
2843 tracing::warn!("No HTTP fixtures found for smoke testing");
2844 return Ok(());
2845 }
2846
2847 tracing::info!("Running smoke tests for {} HTTP fixtures", http_fixtures.len());
2848
2849 let mut test_results = Vec::new();
2851
2852 for fixture in http_fixtures {
2853 let test_result = create_smoke_test_from_fixture(fixture);
2854 test_results.push(test_result);
2855 }
2856
2857 let mut executed_results = Vec::new();
2859 for mut test_result in test_results {
2860 test_result.status = "running".to_string();
2862 state.update_smoke_test_result(test_result.clone()).await;
2863
2864 let start_time = std::time::Instant::now();
2866 match execute_single_smoke_test(&test_result, &context).await {
2867 Ok((status_code, response_time_ms)) => {
2868 test_result.status = "passed".to_string();
2869 test_result.status_code = Some(status_code);
2870 test_result.response_time_ms = Some(response_time_ms);
2871 test_result.error_message = None;
2872 }
2873 Err(e) => {
2874 test_result.status = "failed".to_string();
2875 test_result.error_message = Some(e.to_string());
2876 test_result.status_code = None;
2877 test_result.response_time_ms = None;
2878 }
2879 }
2880
2881 let duration = start_time.elapsed();
2882 test_result.duration_seconds = Some(duration.as_secs_f64());
2883 test_result.last_run = Some(chrono::Utc::now());
2884
2885 executed_results.push(test_result.clone());
2886 state.update_smoke_test_result(test_result).await;
2887 }
2888
2889 tracing::info!("Smoke test execution completed: {} tests run", executed_results.len());
2890 Ok(())
2891}
2892
2893fn create_smoke_test_from_fixture(fixture: &FixtureInfo) -> SmokeTestResult {
2895 let test_name = format!("{} {}", fixture.method, fixture.path);
2896 let description = format!("Smoke test for {} endpoint", fixture.path);
2897
2898 SmokeTestResult {
2899 id: format!("smoke_{}", fixture.id),
2900 name: test_name,
2901 method: fixture.method.clone(),
2902 path: fixture.path.clone(),
2903 description,
2904 last_run: None,
2905 status: "pending".to_string(),
2906 response_time_ms: None,
2907 error_message: None,
2908 status_code: None,
2909 duration_seconds: None,
2910 }
2911}
2912
2913async fn execute_single_smoke_test(
2915 test: &SmokeTestResult,
2916 context: &SmokeTestContext,
2917) -> Result<(u16, u64)> {
2918 let url = format!("{}{}", context.base_url, test.path);
2919 let client = reqwest::Client::builder()
2920 .timeout(std::time::Duration::from_secs(context.timeout_seconds))
2921 .build()
2922 .map_err(|e| Error::generic(format!("Failed to create HTTP client: {}", e)))?;
2923
2924 let start_time = std::time::Instant::now();
2925
2926 let response = match test.method.as_str() {
2927 "GET" => client.get(&url).send().await,
2928 "POST" => client.post(&url).send().await,
2929 "PUT" => client.put(&url).send().await,
2930 "DELETE" => client.delete(&url).send().await,
2931 "PATCH" => client.patch(&url).send().await,
2932 "HEAD" => client.head(&url).send().await,
2933 "OPTIONS" => client.request(reqwest::Method::OPTIONS, &url).send().await,
2934 _ => {
2935 return Err(Error::generic(format!("Unsupported HTTP method: {}", test.method)));
2936 }
2937 };
2938
2939 let response_time = start_time.elapsed();
2940 let response_time_ms = response_time.as_millis() as u64;
2941
2942 match response {
2943 Ok(resp) => {
2944 let status_code = resp.status().as_u16();
2945 if (200..400).contains(&status_code) {
2946 Ok((status_code, response_time_ms))
2947 } else {
2948 Err(Error::generic(format!(
2949 "HTTP error: {} {}",
2950 status_code,
2951 resp.status().canonical_reason().unwrap_or("Unknown")
2952 )))
2953 }
2954 }
2955 Err(e) => Err(Error::generic(format!("Request failed: {}", e))),
2956 }
2957}
2958
2959pub async fn install_plugin(Json(request): Json<serde_json::Value>) -> impl IntoResponse {
2961 let source = request.get("source").and_then(|s| s.as_str()).unwrap_or("");
2963
2964 if source.is_empty() {
2965 return Json(json!({
2966 "success": false,
2967 "error": "Plugin source is required"
2968 }));
2969 }
2970
2971 let plugin_path = if source.starts_with("http://") || source.starts_with("https://") {
2973 match download_plugin_from_url(source).await {
2975 Ok(temp_path) => temp_path,
2976 Err(e) => {
2977 return Json(json!({
2978 "success": false,
2979 "error": format!("Failed to download plugin: {}", e)
2980 }))
2981 }
2982 }
2983 } else {
2984 std::path::PathBuf::from(source)
2986 };
2987
2988 if !plugin_path.exists() {
2990 return Json(json!({
2991 "success": false,
2992 "error": format!("Plugin file not found: {}", source)
2993 }));
2994 }
2995
2996 Json(json!({
2998 "success": true,
2999 "message": format!("Plugin would be installed from: {}", source)
3000 }))
3001}
3002
3003async fn download_plugin_from_url(url: &str) -> Result<std::path::PathBuf> {
3005 let temp_file =
3007 std::env::temp_dir().join(format!("plugin_{}.tmp", chrono::Utc::now().timestamp()));
3008 let temp_path = temp_file.clone();
3009
3010 let response = reqwest::get(url)
3012 .await
3013 .map_err(|e| Error::generic(format!("Failed to download from URL: {}", e)))?;
3014
3015 if !response.status().is_success() {
3016 return Err(Error::generic(format!(
3017 "HTTP error {}: {}",
3018 response.status().as_u16(),
3019 response.status().canonical_reason().unwrap_or("Unknown")
3020 )));
3021 }
3022
3023 let bytes = response
3025 .bytes()
3026 .await
3027 .map_err(|e| Error::generic(format!("Failed to read response: {}", e)))?;
3028
3029 tokio::fs::write(&temp_file, &bytes)
3031 .await
3032 .map_err(|e| Error::generic(format!("Failed to write temporary file: {}", e)))?;
3033
3034 Ok(temp_path)
3035}
3036
3037pub async fn serve_icon() -> impl IntoResponse {
3038 ([(http::header::CONTENT_TYPE, "image/png")], "")
3040}
3041
3042pub async fn serve_icon_32() -> impl IntoResponse {
3043 ([(http::header::CONTENT_TYPE, "image/png")], "")
3044}
3045
3046pub async fn serve_icon_48() -> impl IntoResponse {
3047 ([(http::header::CONTENT_TYPE, "image/png")], "")
3048}
3049
3050pub async fn serve_logo() -> impl IntoResponse {
3051 ([(http::header::CONTENT_TYPE, "image/png")], "")
3052}
3053
3054pub async fn serve_logo_40() -> impl IntoResponse {
3055 ([(http::header::CONTENT_TYPE, "image/png")], "")
3056}
3057
3058pub async fn serve_logo_80() -> impl IntoResponse {
3059 ([(http::header::CONTENT_TYPE, "image/png")], "")
3060}
3061
3062pub async fn update_traffic_shaping(
3064 State(_state): State<AdminState>,
3065 Json(_config): Json<serde_json::Value>,
3066) -> Json<ApiResponse<String>> {
3067 Json(ApiResponse::success("Traffic shaping updated".to_string()))
3068}
3069
3070pub async fn import_postman(
3071 State(state): State<AdminState>,
3072 Json(request): Json<serde_json::Value>,
3073) -> Json<ApiResponse<String>> {
3074 use mockforge_core::workspace_import::{import_postman_to_workspace, WorkspaceImportConfig};
3075 use uuid::Uuid;
3076
3077 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3078 let filename = request.get("filename").and_then(|v| v.as_str());
3079 let environment = request.get("environment").and_then(|v| v.as_str());
3080 let base_url = request.get("base_url").and_then(|v| v.as_str());
3081
3082 let import_result = match mockforge_core::import::import_postman_collection(content, base_url) {
3084 Ok(result) => result,
3085 Err(e) => {
3086 let entry = ImportHistoryEntry {
3088 id: Uuid::new_v4().to_string(),
3089 format: "postman".to_string(),
3090 timestamp: chrono::Utc::now(),
3091 routes_count: 0,
3092 variables_count: 0,
3093 warnings_count: 0,
3094 success: false,
3095 filename: filename.map(|s| s.to_string()),
3096 environment: environment.map(|s| s.to_string()),
3097 base_url: base_url.map(|s| s.to_string()),
3098 error_message: Some(e.clone()),
3099 };
3100 let mut history = state.import_history.write().await;
3101 history.push(entry);
3102
3103 return Json(ApiResponse::error(format!("Postman import failed: {}", e)));
3104 }
3105 };
3106
3107 let workspace_name = filename
3109 .and_then(|f| f.split('.').next())
3110 .unwrap_or("Imported Postman Collection");
3111
3112 let config = WorkspaceImportConfig {
3113 create_folders: true,
3114 base_folder_name: None,
3115 preserve_hierarchy: true,
3116 max_depth: 5,
3117 };
3118
3119 let routes: Vec<ImportRoute> = import_result
3121 .routes
3122 .into_iter()
3123 .map(|route| ImportRoute {
3124 method: route.method,
3125 path: route.path,
3126 headers: route.headers,
3127 body: route.body,
3128 response: ImportResponse {
3129 status: route.response.status,
3130 headers: route.response.headers,
3131 body: route.response.body,
3132 },
3133 })
3134 .collect();
3135
3136 match import_postman_to_workspace(routes, workspace_name.to_string(), config) {
3137 Ok(workspace_result) => {
3138 if let Err(e) =
3140 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3141 {
3142 tracing::error!("Failed to save workspace: {}", e);
3143 return Json(ApiResponse::error(format!(
3144 "Import succeeded but failed to save workspace: {}",
3145 e
3146 )));
3147 }
3148
3149 let entry = ImportHistoryEntry {
3151 id: Uuid::new_v4().to_string(),
3152 format: "postman".to_string(),
3153 timestamp: chrono::Utc::now(),
3154 routes_count: workspace_result.request_count,
3155 variables_count: import_result.variables.len(),
3156 warnings_count: workspace_result.warnings.len(),
3157 success: true,
3158 filename: filename.map(|s| s.to_string()),
3159 environment: environment.map(|s| s.to_string()),
3160 base_url: base_url.map(|s| s.to_string()),
3161 error_message: None,
3162 };
3163 let mut history = state.import_history.write().await;
3164 history.push(entry);
3165
3166 Json(ApiResponse::success(format!(
3167 "Successfully imported {} routes into workspace '{}'",
3168 workspace_result.request_count, workspace_name
3169 )))
3170 }
3171 Err(e) => {
3172 let entry = ImportHistoryEntry {
3174 id: Uuid::new_v4().to_string(),
3175 format: "postman".to_string(),
3176 timestamp: chrono::Utc::now(),
3177 routes_count: 0,
3178 variables_count: 0,
3179 warnings_count: 0,
3180 success: false,
3181 filename: filename.map(|s| s.to_string()),
3182 environment: environment.map(|s| s.to_string()),
3183 base_url: base_url.map(|s| s.to_string()),
3184 error_message: Some(e.to_string()),
3185 };
3186 let mut history = state.import_history.write().await;
3187 history.push(entry);
3188
3189 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3190 }
3191 }
3192}
3193
3194pub async fn import_insomnia(
3195 State(state): State<AdminState>,
3196 Json(request): Json<serde_json::Value>,
3197) -> Json<ApiResponse<String>> {
3198 use uuid::Uuid;
3199
3200 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3201 let filename = request.get("filename").and_then(|v| v.as_str());
3202 let environment = request.get("environment").and_then(|v| v.as_str());
3203 let base_url = request.get("base_url").and_then(|v| v.as_str());
3204
3205 let import_result = match mockforge_core::import::import_insomnia_export(content, environment) {
3207 Ok(result) => result,
3208 Err(e) => {
3209 let entry = ImportHistoryEntry {
3211 id: Uuid::new_v4().to_string(),
3212 format: "insomnia".to_string(),
3213 timestamp: chrono::Utc::now(),
3214 routes_count: 0,
3215 variables_count: 0,
3216 warnings_count: 0,
3217 success: false,
3218 filename: filename.map(|s| s.to_string()),
3219 environment: environment.map(|s| s.to_string()),
3220 base_url: base_url.map(|s| s.to_string()),
3221 error_message: Some(e.clone()),
3222 };
3223 let mut history = state.import_history.write().await;
3224 history.push(entry);
3225
3226 return Json(ApiResponse::error(format!("Insomnia import failed: {}", e)));
3227 }
3228 };
3229
3230 let workspace_name = filename
3232 .and_then(|f| f.split('.').next())
3233 .unwrap_or("Imported Insomnia Collection");
3234
3235 let _config = WorkspaceImportConfig {
3236 create_folders: true,
3237 base_folder_name: None,
3238 preserve_hierarchy: true,
3239 max_depth: 5,
3240 };
3241
3242 let variables_count = import_result.variables.len();
3244
3245 match mockforge_core::workspace_import::create_workspace_from_insomnia(
3246 import_result,
3247 Some(workspace_name.to_string()),
3248 ) {
3249 Ok(workspace_result) => {
3250 if let Err(e) =
3252 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3253 {
3254 tracing::error!("Failed to save workspace: {}", e);
3255 return Json(ApiResponse::error(format!(
3256 "Import succeeded but failed to save workspace: {}",
3257 e
3258 )));
3259 }
3260
3261 let entry = ImportHistoryEntry {
3263 id: Uuid::new_v4().to_string(),
3264 format: "insomnia".to_string(),
3265 timestamp: chrono::Utc::now(),
3266 routes_count: workspace_result.request_count,
3267 variables_count,
3268 warnings_count: workspace_result.warnings.len(),
3269 success: true,
3270 filename: filename.map(|s| s.to_string()),
3271 environment: environment.map(|s| s.to_string()),
3272 base_url: base_url.map(|s| s.to_string()),
3273 error_message: None,
3274 };
3275 let mut history = state.import_history.write().await;
3276 history.push(entry);
3277
3278 Json(ApiResponse::success(format!(
3279 "Successfully imported {} routes into workspace '{}'",
3280 workspace_result.request_count, workspace_name
3281 )))
3282 }
3283 Err(e) => {
3284 let entry = ImportHistoryEntry {
3286 id: Uuid::new_v4().to_string(),
3287 format: "insomnia".to_string(),
3288 timestamp: chrono::Utc::now(),
3289 routes_count: 0,
3290 variables_count: 0,
3291 warnings_count: 0,
3292 success: false,
3293 filename: filename.map(|s| s.to_string()),
3294 environment: environment.map(|s| s.to_string()),
3295 base_url: base_url.map(|s| s.to_string()),
3296 error_message: Some(e.to_string()),
3297 };
3298 let mut history = state.import_history.write().await;
3299 history.push(entry);
3300
3301 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3302 }
3303 }
3304}
3305
3306pub async fn import_openapi(
3307 State(_state): State<AdminState>,
3308 Json(_request): Json<serde_json::Value>,
3309) -> Json<ApiResponse<String>> {
3310 Json(ApiResponse::success("OpenAPI import completed".to_string()))
3311}
3312
3313pub async fn import_curl(
3314 State(state): State<AdminState>,
3315 Json(request): Json<serde_json::Value>,
3316) -> Json<ApiResponse<String>> {
3317 use uuid::Uuid;
3318
3319 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3320 let filename = request.get("filename").and_then(|v| v.as_str());
3321 let base_url = request.get("base_url").and_then(|v| v.as_str());
3322
3323 let import_result = match mockforge_core::import::import_curl_commands(content, base_url) {
3325 Ok(result) => result,
3326 Err(e) => {
3327 let entry = ImportHistoryEntry {
3329 id: Uuid::new_v4().to_string(),
3330 format: "curl".to_string(),
3331 timestamp: chrono::Utc::now(),
3332 routes_count: 0,
3333 variables_count: 0,
3334 warnings_count: 0,
3335 success: false,
3336 filename: filename.map(|s| s.to_string()),
3337 environment: None,
3338 base_url: base_url.map(|s| s.to_string()),
3339 error_message: Some(e.clone()),
3340 };
3341 let mut history = state.import_history.write().await;
3342 history.push(entry);
3343
3344 return Json(ApiResponse::error(format!("Curl import failed: {}", e)));
3345 }
3346 };
3347
3348 let workspace_name =
3350 filename.and_then(|f| f.split('.').next()).unwrap_or("Imported Curl Commands");
3351
3352 match mockforge_core::workspace_import::create_workspace_from_curl(
3353 import_result,
3354 Some(workspace_name.to_string()),
3355 ) {
3356 Ok(workspace_result) => {
3357 if let Err(e) =
3359 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3360 {
3361 tracing::error!("Failed to save workspace: {}", e);
3362 return Json(ApiResponse::error(format!(
3363 "Import succeeded but failed to save workspace: {}",
3364 e
3365 )));
3366 }
3367
3368 let entry = ImportHistoryEntry {
3370 id: Uuid::new_v4().to_string(),
3371 format: "curl".to_string(),
3372 timestamp: chrono::Utc::now(),
3373 routes_count: workspace_result.request_count,
3374 variables_count: 0, warnings_count: workspace_result.warnings.len(),
3376 success: true,
3377 filename: filename.map(|s| s.to_string()),
3378 environment: None,
3379 base_url: base_url.map(|s| s.to_string()),
3380 error_message: None,
3381 };
3382 let mut history = state.import_history.write().await;
3383 history.push(entry);
3384
3385 Json(ApiResponse::success(format!(
3386 "Successfully imported {} routes into workspace '{}'",
3387 workspace_result.request_count, workspace_name
3388 )))
3389 }
3390 Err(e) => {
3391 let entry = ImportHistoryEntry {
3393 id: Uuid::new_v4().to_string(),
3394 format: "curl".to_string(),
3395 timestamp: chrono::Utc::now(),
3396 routes_count: 0,
3397 variables_count: 0,
3398 warnings_count: 0,
3399 success: false,
3400 filename: filename.map(|s| s.to_string()),
3401 environment: None,
3402 base_url: base_url.map(|s| s.to_string()),
3403 error_message: Some(e.to_string()),
3404 };
3405 let mut history = state.import_history.write().await;
3406 history.push(entry);
3407
3408 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3409 }
3410 }
3411}
3412
3413pub async fn preview_import(
3414 State(_state): State<AdminState>,
3415 Json(request): Json<serde_json::Value>,
3416) -> Json<ApiResponse<serde_json::Value>> {
3417 use mockforge_core::import::{
3418 import_curl_commands, import_insomnia_export, import_postman_collection,
3419 };
3420
3421 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3422 let filename = request.get("filename").and_then(|v| v.as_str());
3423 let environment = request.get("environment").and_then(|v| v.as_str());
3424 let base_url = request.get("base_url").and_then(|v| v.as_str());
3425
3426 let format = if let Some(fname) = filename {
3428 if fname.to_lowercase().contains("postman")
3429 || fname.to_lowercase().ends_with(".postman_collection")
3430 {
3431 "postman"
3432 } else if fname.to_lowercase().contains("insomnia")
3433 || fname.to_lowercase().ends_with(".insomnia")
3434 {
3435 "insomnia"
3436 } else if fname.to_lowercase().contains("curl")
3437 || fname.to_lowercase().ends_with(".sh")
3438 || fname.to_lowercase().ends_with(".curl")
3439 {
3440 "curl"
3441 } else {
3442 "unknown"
3443 }
3444 } else {
3445 "unknown"
3446 };
3447
3448 match format {
3449 "postman" => match import_postman_collection(content, base_url) {
3450 Ok(import_result) => {
3451 let routes: Vec<serde_json::Value> = import_result
3452 .routes
3453 .into_iter()
3454 .map(|route| {
3455 serde_json::json!({
3456 "method": route.method,
3457 "path": route.path,
3458 "headers": route.headers,
3459 "body": route.body,
3460 "status_code": route.response.status,
3461 "response": serde_json::json!({
3462 "status": route.response.status,
3463 "headers": route.response.headers,
3464 "body": route.response.body
3465 })
3466 })
3467 })
3468 .collect();
3469
3470 let response = serde_json::json!({
3471 "routes": routes,
3472 "variables": import_result.variables,
3473 "warnings": import_result.warnings
3474 });
3475
3476 Json(ApiResponse::success(response))
3477 }
3478 Err(e) => Json(ApiResponse::error(format!("Postman import failed: {}", e))),
3479 },
3480 "insomnia" => match import_insomnia_export(content, environment) {
3481 Ok(import_result) => {
3482 let routes: Vec<serde_json::Value> = import_result
3483 .routes
3484 .into_iter()
3485 .map(|route| {
3486 serde_json::json!({
3487 "method": route.method,
3488 "path": route.path,
3489 "headers": route.headers,
3490 "body": route.body,
3491 "status_code": route.response.status,
3492 "response": serde_json::json!({
3493 "status": route.response.status,
3494 "headers": route.response.headers,
3495 "body": route.response.body
3496 })
3497 })
3498 })
3499 .collect();
3500
3501 let response = serde_json::json!({
3502 "routes": routes,
3503 "variables": import_result.variables,
3504 "warnings": import_result.warnings
3505 });
3506
3507 Json(ApiResponse::success(response))
3508 }
3509 Err(e) => Json(ApiResponse::error(format!("Insomnia import failed: {}", e))),
3510 },
3511 "curl" => match import_curl_commands(content, base_url) {
3512 Ok(import_result) => {
3513 let routes: Vec<serde_json::Value> = import_result
3514 .routes
3515 .into_iter()
3516 .map(|route| {
3517 serde_json::json!({
3518 "method": route.method,
3519 "path": route.path,
3520 "headers": route.headers,
3521 "body": route.body,
3522 "status_code": route.response.status,
3523 "response": serde_json::json!({
3524 "status": route.response.status,
3525 "headers": route.response.headers,
3526 "body": route.response.body
3527 })
3528 })
3529 })
3530 .collect();
3531
3532 let response = serde_json::json!({
3533 "routes": routes,
3534 "variables": serde_json::json!({}),
3535 "warnings": import_result.warnings
3536 });
3537
3538 Json(ApiResponse::success(response))
3539 }
3540 Err(e) => Json(ApiResponse::error(format!("Curl import failed: {}", e))),
3541 },
3542 _ => Json(ApiResponse::error("Unsupported import format".to_string())),
3543 }
3544}
3545
3546pub async fn get_import_history(
3547 State(state): State<AdminState>,
3548) -> Json<ApiResponse<serde_json::Value>> {
3549 let history = state.import_history.read().await;
3550 let total = history.len();
3551
3552 let imports: Vec<serde_json::Value> = history
3553 .iter()
3554 .rev()
3555 .take(50)
3556 .map(|entry| {
3557 serde_json::json!({
3558 "id": entry.id,
3559 "format": entry.format,
3560 "timestamp": entry.timestamp.to_rfc3339(),
3561 "routes_count": entry.routes_count,
3562 "variables_count": entry.variables_count,
3563 "warnings_count": entry.warnings_count,
3564 "success": entry.success,
3565 "filename": entry.filename,
3566 "environment": entry.environment,
3567 "base_url": entry.base_url,
3568 "error_message": entry.error_message
3569 })
3570 })
3571 .collect();
3572
3573 let response = serde_json::json!({
3574 "imports": imports,
3575 "total": total
3576 });
3577
3578 Json(ApiResponse::success(response))
3579}
3580
3581pub async fn get_admin_api_state(
3582 State(_state): State<AdminState>,
3583) -> Json<ApiResponse<serde_json::Value>> {
3584 Json(ApiResponse::success(serde_json::json!({
3585 "status": "active"
3586 })))
3587}
3588
3589pub async fn get_admin_api_replay(
3590 State(_state): State<AdminState>,
3591) -> Json<ApiResponse<serde_json::Value>> {
3592 Json(ApiResponse::success(serde_json::json!({
3593 "replay": []
3594 })))
3595}
3596
3597pub async fn get_sse_status(
3598 State(_state): State<AdminState>,
3599) -> Json<ApiResponse<serde_json::Value>> {
3600 Json(ApiResponse::success(serde_json::json!({
3601 "available": true,
3602 "endpoint": "/sse",
3603 "config": {
3604 "event_type": "status",
3605 "interval_ms": 1000,
3606 "data_template": "{}"
3607 }
3608 })))
3609}
3610
3611pub async fn get_sse_connections(
3612 State(_state): State<AdminState>,
3613) -> Json<ApiResponse<serde_json::Value>> {
3614 Json(ApiResponse::success(serde_json::json!({
3615 "active_connections": 0
3616 })))
3617}
3618
3619pub async fn get_workspaces(
3621 State(_state): State<AdminState>,
3622) -> Json<ApiResponse<Vec<serde_json::Value>>> {
3623 Json(ApiResponse::success(vec![]))
3624}
3625
3626pub async fn create_workspace(
3627 State(_state): State<AdminState>,
3628 Json(_request): Json<serde_json::Value>,
3629) -> Json<ApiResponse<String>> {
3630 Json(ApiResponse::success("Workspace created".to_string()))
3631}
3632
3633pub async fn open_workspace_from_directory(
3634 State(_state): State<AdminState>,
3635 Json(_request): Json<serde_json::Value>,
3636) -> Json<ApiResponse<String>> {
3637 Json(ApiResponse::success("Workspace opened from directory".to_string()))
3638}
3639
3640pub async fn get_reality_level(
3644 State(state): State<AdminState>,
3645) -> Json<ApiResponse<serde_json::Value>> {
3646 let engine = state.reality_engine.read().await;
3647 let level = engine.get_level().await;
3648 let config = engine.get_config().await;
3649
3650 Json(ApiResponse::success(serde_json::json!({
3651 "level": level.value(),
3652 "level_name": level.name(),
3653 "description": level.description(),
3654 "chaos": {
3655 "enabled": config.chaos.enabled,
3656 "error_rate": config.chaos.error_rate,
3657 "delay_rate": config.chaos.delay_rate,
3658 },
3659 "latency": {
3660 "base_ms": config.latency.base_ms,
3661 "jitter_ms": config.latency.jitter_ms,
3662 },
3663 "mockai": {
3664 "enabled": config.mockai.enabled,
3665 },
3666 })))
3667}
3668
3669#[derive(Deserialize)]
3671pub struct SetRealityLevelRequest {
3672 level: u8,
3673}
3674
3675pub async fn set_reality_level(
3676 State(state): State<AdminState>,
3677 Json(request): Json<SetRealityLevelRequest>,
3678) -> Json<ApiResponse<serde_json::Value>> {
3679 let level = match mockforge_core::RealityLevel::from_value(request.level) {
3680 Some(l) => l,
3681 None => {
3682 return Json(ApiResponse::error(format!(
3683 "Invalid reality level: {}. Must be between 1 and 5.",
3684 request.level
3685 )));
3686 }
3687 };
3688
3689 let mut engine = state.reality_engine.write().await;
3691 engine.set_level(level).await;
3692 let config = engine.get_config().await;
3693 drop(engine); let mut update_errors = Vec::new();
3697
3698 if let Some(ref chaos_api_state) = state.chaos_api_state {
3700 use mockforge_chaos::config::ChaosConfig;
3701 let mut chaos_config = chaos_api_state.config.write().await;
3702
3703 use mockforge_chaos::config::{FaultInjectionConfig, LatencyConfig};
3706
3707 let latency_config = if config.latency.base_ms > 0 {
3708 Some(LatencyConfig {
3709 enabled: true,
3710 fixed_delay_ms: Some(config.latency.base_ms),
3711 random_delay_range_ms: config
3712 .latency
3713 .max_ms
3714 .map(|max| (config.latency.min_ms, max)),
3715 jitter_percent: if config.latency.jitter_ms > 0 {
3716 (config.latency.jitter_ms as f64 / config.latency.base_ms as f64).min(1.0)
3717 } else {
3718 0.0
3719 },
3720 probability: 1.0,
3721 })
3722 } else {
3723 None
3724 };
3725
3726 let fault_injection_config = if config.chaos.enabled {
3727 Some(FaultInjectionConfig {
3728 enabled: true,
3729 http_errors: config.chaos.status_codes.clone(),
3730 http_error_probability: config.chaos.error_rate,
3731 connection_errors: false,
3732 connection_error_probability: 0.0,
3733 timeout_errors: config.chaos.inject_timeouts,
3734 timeout_ms: config.chaos.timeout_ms,
3735 timeout_probability: if config.chaos.inject_timeouts {
3736 config.chaos.error_rate
3737 } else {
3738 0.0
3739 },
3740 partial_responses: false,
3741 partial_response_probability: 0.0,
3742 payload_corruption: false,
3743 payload_corruption_probability: 0.0,
3744 corruption_type: mockforge_chaos::config::CorruptionType::None,
3745 error_pattern: Some(mockforge_chaos::config::ErrorPattern::Random {
3746 probability: config.chaos.error_rate,
3747 }),
3748 mockai_enabled: false,
3749 })
3750 } else {
3751 None
3752 };
3753
3754 chaos_config.enabled = config.chaos.enabled;
3756 chaos_config.latency = latency_config;
3757 chaos_config.fault_injection = fault_injection_config;
3758
3759 drop(chaos_config);
3760 tracing::info!("✅ Updated chaos config for reality level {}", level.value());
3761
3762 }
3767
3768 if let Some(ref latency_injector) = state.latency_injector {
3770 match mockforge_core::latency::LatencyInjector::update_profile_async(
3771 latency_injector,
3772 config.latency.clone(),
3773 )
3774 .await
3775 {
3776 Ok(_) => {
3777 tracing::info!("✅ Updated latency injector for reality level {}", level.value());
3778 }
3779 Err(e) => {
3780 let error_msg = format!("Failed to update latency injector: {}", e);
3781 tracing::warn!("{}", error_msg);
3782 update_errors.push(error_msg);
3783 }
3784 }
3785 }
3786
3787 if let Some(ref mockai) = state.mockai {
3789 match mockforge_core::intelligent_behavior::MockAI::update_config_async(
3790 mockai,
3791 config.mockai.clone(),
3792 )
3793 .await
3794 {
3795 Ok(_) => {
3796 tracing::info!("✅ Updated MockAI config for reality level {}", level.value());
3797 }
3798 Err(e) => {
3799 let error_msg = format!("Failed to update MockAI: {}", e);
3800 tracing::warn!("{}", error_msg);
3801 update_errors.push(error_msg);
3802 }
3803 }
3804 }
3805
3806 let mut response = serde_json::json!({
3808 "level": level.value(),
3809 "level_name": level.name(),
3810 "description": level.description(),
3811 "chaos": {
3812 "enabled": config.chaos.enabled,
3813 "error_rate": config.chaos.error_rate,
3814 "delay_rate": config.chaos.delay_rate,
3815 },
3816 "latency": {
3817 "base_ms": config.latency.base_ms,
3818 "jitter_ms": config.latency.jitter_ms,
3819 },
3820 "mockai": {
3821 "enabled": config.mockai.enabled,
3822 },
3823 });
3824
3825 if !update_errors.is_empty() {
3827 response["warnings"] = serde_json::json!(update_errors);
3828 tracing::warn!(
3829 "Reality level updated to {} but some subsystems failed to update: {:?}",
3830 level.value(),
3831 update_errors
3832 );
3833 } else {
3834 tracing::info!(
3835 "✅ Reality level successfully updated to {} (hot-reload applied)",
3836 level.value()
3837 );
3838 }
3839
3840 Json(ApiResponse::success(response))
3841}
3842
3843pub async fn list_reality_presets(
3845 State(state): State<AdminState>,
3846) -> Json<ApiResponse<Vec<serde_json::Value>>> {
3847 let persistence = &state.workspace_persistence;
3848 match persistence.list_reality_presets().await {
3849 Ok(preset_paths) => {
3850 let presets: Vec<serde_json::Value> = preset_paths
3851 .iter()
3852 .map(|path| {
3853 serde_json::json!({
3854 "id": path.file_name().and_then(|n| n.to_str()).unwrap_or("unknown"),
3855 "path": path.to_string_lossy(),
3856 "name": path.file_stem().and_then(|n| n.to_str()).unwrap_or("unknown"),
3857 })
3858 })
3859 .collect();
3860 Json(ApiResponse::success(presets))
3861 }
3862 Err(e) => Json(ApiResponse::error(format!("Failed to list presets: {}", e))),
3863 }
3864}
3865
3866#[derive(Deserialize)]
3868pub struct ImportPresetRequest {
3869 path: String,
3870}
3871
3872pub async fn import_reality_preset(
3873 State(state): State<AdminState>,
3874 Json(request): Json<ImportPresetRequest>,
3875) -> Json<ApiResponse<serde_json::Value>> {
3876 let persistence = &state.workspace_persistence;
3877 let path = std::path::Path::new(&request.path);
3878
3879 match persistence.import_reality_preset(path).await {
3880 Ok(preset) => {
3881 let mut engine = state.reality_engine.write().await;
3883 engine.apply_preset(preset.clone()).await;
3884
3885 Json(ApiResponse::success(serde_json::json!({
3886 "name": preset.name,
3887 "description": preset.description,
3888 "level": preset.config.level.value(),
3889 "level_name": preset.config.level.name(),
3890 })))
3891 }
3892 Err(e) => Json(ApiResponse::error(format!("Failed to import preset: {}", e))),
3893 }
3894}
3895
3896#[derive(Deserialize)]
3898pub struct ExportPresetRequest {
3899 name: String,
3900 description: Option<String>,
3901}
3902
3903pub async fn export_reality_preset(
3904 State(state): State<AdminState>,
3905 Json(request): Json<ExportPresetRequest>,
3906) -> Json<ApiResponse<serde_json::Value>> {
3907 let engine = state.reality_engine.read().await;
3908 let preset = engine.create_preset(request.name.clone(), request.description.clone()).await;
3909
3910 let persistence = &state.workspace_persistence;
3911 let presets_dir = persistence.presets_dir();
3912 let filename = format!("{}.json", request.name.replace(' ', "_").to_lowercase());
3913 let output_path = presets_dir.join(&filename);
3914
3915 match persistence.export_reality_preset(&preset, &output_path).await {
3916 Ok(_) => Json(ApiResponse::success(serde_json::json!({
3917 "name": preset.name,
3918 "description": preset.description,
3919 "path": output_path.to_string_lossy(),
3920 "level": preset.config.level.value(),
3921 }))),
3922 Err(e) => Json(ApiResponse::error(format!("Failed to export preset: {}", e))),
3923 }
3924}
3925
3926pub async fn get_continuum_ratio(
3930 State(state): State<AdminState>,
3931 Query(params): Query<std::collections::HashMap<String, String>>,
3932) -> Json<ApiResponse<serde_json::Value>> {
3933 let path = params.get("path").cloned().unwrap_or_else(|| "/".to_string());
3934 let engine = state.continuum_engine.read().await;
3935 let ratio = engine.get_blend_ratio(&path).await;
3936 let config = engine.get_config().await;
3937 let enabled = engine.is_enabled().await;
3938
3939 Json(ApiResponse::success(serde_json::json!({
3940 "path": path,
3941 "blend_ratio": ratio,
3942 "enabled": enabled,
3943 "transition_mode": format!("{:?}", config.transition_mode),
3944 "merge_strategy": format!("{:?}", config.merge_strategy),
3945 "default_ratio": config.default_ratio,
3946 })))
3947}
3948
3949#[derive(Deserialize)]
3951pub struct SetContinuumRatioRequest {
3952 path: String,
3953 ratio: f64,
3954}
3955
3956pub async fn set_continuum_ratio(
3957 State(state): State<AdminState>,
3958 Json(request): Json<SetContinuumRatioRequest>,
3959) -> Json<ApiResponse<serde_json::Value>> {
3960 let ratio = request.ratio.clamp(0.0, 1.0);
3961 let engine = state.continuum_engine.read().await;
3962 engine.set_blend_ratio(&request.path, ratio).await;
3963
3964 Json(ApiResponse::success(serde_json::json!({
3965 "path": request.path,
3966 "blend_ratio": ratio,
3967 })))
3968}
3969
3970pub async fn get_continuum_schedule(
3972 State(state): State<AdminState>,
3973) -> Json<ApiResponse<serde_json::Value>> {
3974 let engine = state.continuum_engine.read().await;
3975 let schedule = engine.get_time_schedule().await;
3976
3977 match schedule {
3978 Some(s) => Json(ApiResponse::success(serde_json::json!({
3979 "start_time": s.start_time.to_rfc3339(),
3980 "end_time": s.end_time.to_rfc3339(),
3981 "start_ratio": s.start_ratio,
3982 "end_ratio": s.end_ratio,
3983 "curve": format!("{:?}", s.curve),
3984 "duration_days": s.duration().num_days(),
3985 }))),
3986 None => Json(ApiResponse::success(serde_json::json!(null))),
3987 }
3988}
3989
3990#[derive(Deserialize)]
3992pub struct SetContinuumScheduleRequest {
3993 start_time: String,
3994 end_time: String,
3995 start_ratio: f64,
3996 end_ratio: f64,
3997 curve: Option<String>,
3998}
3999
4000pub async fn set_continuum_schedule(
4001 State(state): State<AdminState>,
4002 Json(request): Json<SetContinuumScheduleRequest>,
4003) -> Json<ApiResponse<serde_json::Value>> {
4004 let start_time = chrono::DateTime::parse_from_rfc3339(&request.start_time)
4005 .map_err(|e| format!("Invalid start_time: {}", e))
4006 .and_then(|dt| Ok(dt.with_timezone(&chrono::Utc)));
4007
4008 let end_time = chrono::DateTime::parse_from_rfc3339(&request.end_time)
4009 .map_err(|e| format!("Invalid end_time: {}", e))
4010 .and_then(|dt| Ok(dt.with_timezone(&chrono::Utc)));
4011
4012 match (start_time, end_time) {
4013 (Ok(start), Ok(end)) => {
4014 let curve = request
4015 .curve
4016 .as_deref()
4017 .map(|c| match c {
4018 "linear" => mockforge_core::TransitionCurve::Linear,
4019 "exponential" => mockforge_core::TransitionCurve::Exponential,
4020 "sigmoid" => mockforge_core::TransitionCurve::Sigmoid,
4021 _ => mockforge_core::TransitionCurve::Linear,
4022 })
4023 .unwrap_or(mockforge_core::TransitionCurve::Linear);
4024
4025 let schedule = mockforge_core::TimeSchedule::with_curve(
4026 start,
4027 end,
4028 request.start_ratio.clamp(0.0, 1.0),
4029 request.end_ratio.clamp(0.0, 1.0),
4030 curve,
4031 );
4032
4033 let engine = state.continuum_engine.read().await;
4034 engine.set_time_schedule(schedule.clone()).await;
4035
4036 Json(ApiResponse::success(serde_json::json!({
4037 "start_time": schedule.start_time.to_rfc3339(),
4038 "end_time": schedule.end_time.to_rfc3339(),
4039 "start_ratio": schedule.start_ratio,
4040 "end_ratio": schedule.end_ratio,
4041 "curve": format!("{:?}", schedule.curve),
4042 })))
4043 }
4044 (Err(e), _) | (_, Err(e)) => Json(ApiResponse::error(e)),
4045 }
4046}
4047
4048#[derive(Deserialize)]
4050pub struct AdvanceContinuumRatioRequest {
4051 increment: Option<f64>,
4052}
4053
4054pub async fn advance_continuum_ratio(
4055 State(state): State<AdminState>,
4056 Json(request): Json<AdvanceContinuumRatioRequest>,
4057) -> Json<ApiResponse<serde_json::Value>> {
4058 let increment = request.increment.unwrap_or(0.1);
4059 let engine = state.continuum_engine.read().await;
4060 engine.advance_ratio(increment).await;
4061 let config = engine.get_config().await;
4062
4063 Json(ApiResponse::success(serde_json::json!({
4064 "default_ratio": config.default_ratio,
4065 "increment": increment,
4066 })))
4067}
4068
4069#[derive(Deserialize)]
4071pub struct SetContinuumEnabledRequest {
4072 enabled: bool,
4073}
4074
4075pub async fn set_continuum_enabled(
4076 State(state): State<AdminState>,
4077 Json(request): Json<SetContinuumEnabledRequest>,
4078) -> Json<ApiResponse<serde_json::Value>> {
4079 let engine = state.continuum_engine.read().await;
4080 engine.set_enabled(request.enabled).await;
4081
4082 Json(ApiResponse::success(serde_json::json!({
4083 "enabled": request.enabled,
4084 })))
4085}
4086
4087pub async fn get_continuum_overrides(
4089 State(state): State<AdminState>,
4090) -> Json<ApiResponse<serde_json::Value>> {
4091 let engine = state.continuum_engine.read().await;
4092 let overrides = engine.get_manual_overrides().await;
4093
4094 Json(ApiResponse::success(serde_json::json!(overrides)))
4095}
4096
4097pub async fn clear_continuum_overrides(
4099 State(state): State<AdminState>,
4100) -> Json<ApiResponse<serde_json::Value>> {
4101 let engine = state.continuum_engine.read().await;
4102 engine.clear_manual_overrides().await;
4103
4104 Json(ApiResponse::success(serde_json::json!({
4105 "message": "All manual overrides cleared",
4106 })))
4107}
4108
4109pub async fn get_workspace(
4110 State(_state): State<AdminState>,
4111 axum::extract::Path(workspace_id): axum::extract::Path<String>,
4112) -> Json<ApiResponse<serde_json::Value>> {
4113 Json(ApiResponse::success(serde_json::json!({
4114 "workspace": {
4115 "summary": {
4116 "id": workspace_id,
4117 "name": "Mock Workspace",
4118 "description": "A mock workspace"
4119 },
4120 "folders": [],
4121 "requests": []
4122 }
4123 })))
4124}
4125
4126pub async fn delete_workspace(
4127 State(_state): State<AdminState>,
4128 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4129) -> Json<ApiResponse<String>> {
4130 Json(ApiResponse::success("Workspace deleted".to_string()))
4131}
4132
4133pub async fn set_active_workspace(
4134 State(_state): State<AdminState>,
4135 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4136) -> Json<ApiResponse<String>> {
4137 Json(ApiResponse::success("Workspace activated".to_string()))
4138}
4139
4140pub async fn create_folder(
4141 State(_state): State<AdminState>,
4142 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4143 Json(_request): Json<serde_json::Value>,
4144) -> Json<ApiResponse<String>> {
4145 Json(ApiResponse::success("Folder created".to_string()))
4146}
4147
4148pub async fn create_request(
4149 State(_state): State<AdminState>,
4150 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4151 Json(_request): Json<serde_json::Value>,
4152) -> Json<ApiResponse<String>> {
4153 Json(ApiResponse::success("Request created".to_string()))
4154}
4155
4156pub async fn execute_workspace_request(
4157 State(_state): State<AdminState>,
4158 axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
4159 Json(_request): Json<serde_json::Value>,
4160) -> Json<ApiResponse<serde_json::Value>> {
4161 Json(ApiResponse::success(serde_json::json!({
4162 "status": "executed",
4163 "response": {}
4164 })))
4165}
4166
4167pub async fn get_request_history(
4168 State(_state): State<AdminState>,
4169 axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
4170) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4171 Json(ApiResponse::success(vec![]))
4172}
4173
4174pub async fn get_folder(
4175 State(_state): State<AdminState>,
4176 axum::extract::Path((_workspace_id, folder_id)): axum::extract::Path<(String, String)>,
4177) -> Json<ApiResponse<serde_json::Value>> {
4178 Json(ApiResponse::success(serde_json::json!({
4179 "folder": {
4180 "summary": {
4181 "id": folder_id,
4182 "name": "Mock Folder",
4183 "description": "A mock folder"
4184 },
4185 "requests": []
4186 }
4187 })))
4188}
4189
4190pub async fn import_to_workspace(
4191 State(_state): State<AdminState>,
4192 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4193 Json(_request): Json<serde_json::Value>,
4194) -> Json<ApiResponse<String>> {
4195 Json(ApiResponse::success("Import to workspace completed".to_string()))
4196}
4197
4198pub async fn export_workspaces(
4199 State(_state): State<AdminState>,
4200 Json(_request): Json<serde_json::Value>,
4201) -> Json<ApiResponse<String>> {
4202 Json(ApiResponse::success("Workspaces exported".to_string()))
4203}
4204
4205pub async fn get_environments(
4207 State(_state): State<AdminState>,
4208 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4209) -> Json<ApiResponse<serde_json::Value>> {
4210 let environments = vec![serde_json::json!({
4212 "id": "global",
4213 "name": "Global",
4214 "description": "Global environment variables",
4215 "variable_count": 0,
4216 "is_global": true,
4217 "active": true,
4218 "order": 0
4219 })];
4220
4221 Json(ApiResponse::success(serde_json::json!({
4222 "environments": environments,
4223 "total": 1
4224 })))
4225}
4226
4227pub async fn create_environment(
4228 State(_state): State<AdminState>,
4229 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4230 Json(_request): Json<serde_json::Value>,
4231) -> Json<ApiResponse<String>> {
4232 Json(ApiResponse::success("Environment created".to_string()))
4233}
4234
4235pub async fn update_environment(
4236 State(_state): State<AdminState>,
4237 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4238 Json(_request): Json<serde_json::Value>,
4239) -> Json<ApiResponse<String>> {
4240 Json(ApiResponse::success("Environment updated".to_string()))
4241}
4242
4243pub async fn delete_environment(
4244 State(_state): State<AdminState>,
4245 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4246) -> Json<ApiResponse<String>> {
4247 Json(ApiResponse::success("Environment deleted".to_string()))
4248}
4249
4250pub async fn set_active_environment(
4251 State(_state): State<AdminState>,
4252 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4253) -> Json<ApiResponse<String>> {
4254 Json(ApiResponse::success("Environment activated".to_string()))
4255}
4256
4257pub async fn update_environments_order(
4258 State(_state): State<AdminState>,
4259 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4260 Json(_request): Json<serde_json::Value>,
4261) -> Json<ApiResponse<String>> {
4262 Json(ApiResponse::success("Environment order updated".to_string()))
4263}
4264
4265pub async fn get_environment_variables(
4266 State(_state): State<AdminState>,
4267 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4268) -> Json<ApiResponse<serde_json::Value>> {
4269 Json(ApiResponse::success(serde_json::json!({
4270 "variables": []
4271 })))
4272}
4273
4274pub async fn set_environment_variable(
4275 State(_state): State<AdminState>,
4276 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
4277 Json(_request): Json<serde_json::Value>,
4278) -> Json<ApiResponse<String>> {
4279 Json(ApiResponse::success("Environment variable set".to_string()))
4280}
4281
4282pub async fn remove_environment_variable(
4283 State(_state): State<AdminState>,
4284 axum::extract::Path((_workspace_id, _environment_id, _variable_name)): axum::extract::Path<(
4285 String,
4286 String,
4287 String,
4288 )>,
4289) -> Json<ApiResponse<String>> {
4290 Json(ApiResponse::success("Environment variable removed".to_string()))
4291}
4292
4293pub async fn get_autocomplete_suggestions(
4295 State(_state): State<AdminState>,
4296 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4297 Json(_request): Json<serde_json::Value>,
4298) -> Json<ApiResponse<serde_json::Value>> {
4299 Json(ApiResponse::success(serde_json::json!({
4300 "suggestions": [],
4301 "start_position": 0,
4302 "end_position": 0
4303 })))
4304}
4305
4306pub async fn get_sync_status(
4308 State(_state): State<AdminState>,
4309 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4310) -> Json<ApiResponse<serde_json::Value>> {
4311 Json(ApiResponse::success(serde_json::json!({
4312 "status": "disabled"
4313 })))
4314}
4315
4316pub async fn configure_sync(
4317 State(_state): State<AdminState>,
4318 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4319 Json(_request): Json<serde_json::Value>,
4320) -> Json<ApiResponse<String>> {
4321 Json(ApiResponse::success("Sync configured".to_string()))
4322}
4323
4324pub async fn disable_sync(
4325 State(_state): State<AdminState>,
4326 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4327) -> Json<ApiResponse<String>> {
4328 Json(ApiResponse::success("Sync disabled".to_string()))
4329}
4330
4331pub async fn trigger_sync(
4332 State(_state): State<AdminState>,
4333 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4334) -> Json<ApiResponse<String>> {
4335 Json(ApiResponse::success("Sync triggered".to_string()))
4336}
4337
4338pub async fn get_sync_changes(
4339 State(_state): State<AdminState>,
4340 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4341) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4342 Json(ApiResponse::success(vec![]))
4343}
4344
4345pub async fn confirm_sync_changes(
4346 State(_state): State<AdminState>,
4347 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
4348 Json(_request): Json<serde_json::Value>,
4349) -> Json<ApiResponse<String>> {
4350 Json(ApiResponse::success("Sync changes confirmed".to_string()))
4351}
4352
4353pub async fn validate_plugin(
4355 State(_state): State<AdminState>,
4356 Json(_request): Json<serde_json::Value>,
4357) -> Json<ApiResponse<String>> {
4358 Json(ApiResponse::success("Plugin validated".to_string()))
4359}
4360
4361pub async fn clear_import_history(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
4363 let mut history = state.import_history.write().await;
4364 history.clear();
4365 Json(ApiResponse::success("Import history cleared".to_string()))
4366}
4367
4368#[cfg(test)]
4369mod tests {
4370 use super::*;
4371
4372 #[test]
4373 fn test_request_metrics_creation() {
4374 use std::collections::HashMap;
4375
4376 let metrics = RequestMetrics {
4377 total_requests: 100,
4378 active_connections: 5,
4379 requests_by_endpoint: HashMap::new(),
4380 response_times: vec![10, 20, 30],
4381 response_times_by_endpoint: HashMap::new(),
4382 errors_by_endpoint: HashMap::new(),
4383 last_request_by_endpoint: HashMap::new(),
4384 };
4385
4386 assert_eq!(metrics.total_requests, 100);
4387 assert_eq!(metrics.active_connections, 5);
4388 assert_eq!(metrics.response_times.len(), 3);
4389 }
4390
4391 #[test]
4392 fn test_system_metrics_creation() {
4393 let metrics = SystemMetrics {
4394 cpu_usage_percent: 45.5,
4395 memory_usage_mb: 100,
4396 active_threads: 10,
4397 };
4398
4399 assert_eq!(metrics.active_threads, 10);
4400 assert!(metrics.cpu_usage_percent > 0.0);
4401 assert_eq!(metrics.memory_usage_mb, 100);
4402 }
4403
4404 #[test]
4405 fn test_time_series_point() {
4406 let point = TimeSeriesPoint {
4407 timestamp: chrono::Utc::now(),
4408 value: 42.5,
4409 };
4410
4411 assert_eq!(point.value, 42.5);
4412 }
4413
4414 #[test]
4415 fn test_restart_status() {
4416 let status = RestartStatus {
4417 in_progress: true,
4418 initiated_at: Some(chrono::Utc::now()),
4419 reason: Some("Manual restart".to_string()),
4420 success: None,
4421 };
4422
4423 assert!(status.in_progress);
4424 assert!(status.reason.is_some());
4425 }
4426
4427 #[test]
4428 fn test_configuration_state() {
4429 use std::collections::HashMap;
4430
4431 let state = ConfigurationState {
4432 latency_profile: crate::models::LatencyProfile {
4433 name: "default".to_string(),
4434 base_ms: 100,
4435 jitter_ms: 10,
4436 tag_overrides: HashMap::new(),
4437 },
4438 fault_config: crate::models::FaultConfig {
4439 enabled: false,
4440 failure_rate: 0.0,
4441 status_codes: vec![],
4442 active_failures: 0,
4443 },
4444 proxy_config: crate::models::ProxyConfig {
4445 enabled: false,
4446 upstream_url: None,
4447 timeout_seconds: 30,
4448 requests_proxied: 0,
4449 },
4450 validation_settings: crate::models::ValidationSettings {
4451 mode: "off".to_string(),
4452 aggregate_errors: false,
4453 validate_responses: false,
4454 overrides: HashMap::new(),
4455 },
4456 };
4457
4458 assert_eq!(state.latency_profile.name, "default");
4459 assert!(!state.fault_config.enabled);
4460 assert!(!state.proxy_config.enabled);
4461 }
4462
4463 #[test]
4464 fn test_admin_state_new() {
4465 let http_addr: std::net::SocketAddr = "127.0.0.1:3000".parse().unwrap();
4466 let state =
4467 AdminState::new(Some(http_addr), None, None, None, true, 8080, None, None, None);
4468
4469 assert_eq!(state.http_server_addr, Some(http_addr));
4470 assert!(state.api_enabled);
4471 assert_eq!(state.admin_port, 8080);
4472 }
4473}