1use axum::{
13 extract::{Query, State},
14 http::{self, StatusCode},
15 response::{
16 sse::{Event, Sse},
17 Html, IntoResponse, Json,
18 },
19};
20use chrono::Utc;
21use futures_util::stream::{self, Stream};
22use mockforge_core::{Error, Result};
23use mockforge_plugin_loader::PluginRegistry;
24use serde::{Deserialize, Serialize};
25use serde_json::json;
26use std::collections::HashMap;
27use std::convert::Infallible;
28use std::process::Command;
29use std::process::Stdio;
30use std::sync::Arc;
31use std::time::Duration;
32use sysinfo::System;
33use tokio::sync::RwLock;
34
35use crate::models::{
37 ApiResponse, ConfigUpdate, DashboardData, DashboardSystemInfo, FaultConfig, HealthCheck,
38 LatencyProfile, LogFilter, MetricsData, ProxyConfig, RequestLog, RouteInfo, ServerInfo,
39 ServerStatus, SimpleMetricsData, SystemInfo, ValidationSettings, ValidationUpdate,
40};
41
42use mockforge_core::workspace_import::{ImportResponse, ImportRoute};
44
45pub mod admin;
47pub mod analytics;
48pub mod analytics_stream;
49pub mod analytics_v2;
50pub mod assets;
51pub mod chains;
52pub mod health;
53pub mod plugin;
54
55pub use assets::*;
57pub use chains::*;
58pub use plugin::*;
59
60use mockforge_core::workspace_import::WorkspaceImportConfig;
62use mockforge_core::workspace_persistence::WorkspacePersistence;
63
64#[derive(Debug, Clone, Default)]
66pub struct RequestMetrics {
67 pub total_requests: u64,
69 pub active_connections: u64,
71 pub requests_by_endpoint: HashMap<String, u64>,
73 pub response_times: Vec<u64>,
75 pub response_times_by_endpoint: HashMap<String, Vec<u64>>,
77 pub errors_by_endpoint: HashMap<String, u64>,
79 pub last_request_by_endpoint: HashMap<String, chrono::DateTime<chrono::Utc>>,
81}
82
83#[derive(Debug, Clone)]
85pub struct SystemMetrics {
86 pub memory_usage_mb: u64,
88 pub cpu_usage_percent: f64,
90 pub active_threads: u32,
92}
93
94#[derive(Debug, Clone)]
96pub struct TimeSeriesPoint {
97 pub timestamp: chrono::DateTime<chrono::Utc>,
99 pub value: f64,
101}
102
103#[derive(Debug, Clone, Default)]
105pub struct TimeSeriesData {
106 pub memory_usage: Vec<TimeSeriesPoint>,
108 pub cpu_usage: Vec<TimeSeriesPoint>,
110 pub request_count: Vec<TimeSeriesPoint>,
112 pub response_time: Vec<TimeSeriesPoint>,
114}
115
116#[derive(Debug, Clone, Serialize, Deserialize)]
118pub struct RestartStatus {
119 pub in_progress: bool,
121 pub initiated_at: Option<chrono::DateTime<chrono::Utc>>,
123 pub reason: Option<String>,
125 pub success: Option<bool>,
127}
128
129#[derive(Debug, Clone, Serialize, Deserialize)]
131pub struct FixtureInfo {
132 pub id: String,
134 pub protocol: String,
136 pub method: String,
138 pub path: String,
140 pub saved_at: chrono::DateTime<chrono::Utc>,
142 pub file_size: u64,
144 pub file_path: String,
146 pub fingerprint: String,
148 pub metadata: serde_json::Value,
150}
151
152#[derive(Debug, Clone, Serialize, Deserialize)]
154pub struct SmokeTestResult {
155 pub id: String,
157 pub name: String,
159 pub method: String,
161 pub path: String,
163 pub description: String,
165 pub last_run: Option<chrono::DateTime<chrono::Utc>>,
167 pub status: String,
169 pub response_time_ms: Option<u64>,
171 pub error_message: Option<String>,
173 pub status_code: Option<u16>,
175 pub duration_seconds: Option<f64>,
177}
178
179#[derive(Debug, Clone)]
181pub struct SmokeTestContext {
182 pub base_url: String,
184 pub timeout_seconds: u64,
186 pub parallel: bool,
188}
189
190#[derive(Debug, Clone, Serialize)]
192pub struct ConfigurationState {
193 pub latency_profile: LatencyProfile,
195 pub fault_config: FaultConfig,
197 pub proxy_config: ProxyConfig,
199 pub validation_settings: ValidationSettings,
201}
202
203#[derive(Debug, Clone, Serialize, Deserialize)]
205pub struct ImportHistoryEntry {
206 pub id: String,
208 pub format: String,
210 pub timestamp: chrono::DateTime<chrono::Utc>,
212 pub routes_count: usize,
214 pub variables_count: usize,
216 pub warnings_count: usize,
218 pub success: bool,
220 pub filename: Option<String>,
222 pub environment: Option<String>,
224 pub base_url: Option<String>,
226 pub error_message: Option<String>,
228}
229
230#[derive(Clone)]
232pub struct AdminState {
233 pub http_server_addr: Option<std::net::SocketAddr>,
235 pub ws_server_addr: Option<std::net::SocketAddr>,
237 pub grpc_server_addr: Option<std::net::SocketAddr>,
239 pub graphql_server_addr: Option<std::net::SocketAddr>,
241 pub api_enabled: bool,
243 pub admin_port: u16,
245 pub start_time: chrono::DateTime<chrono::Utc>,
247 pub metrics: Arc<RwLock<RequestMetrics>>,
249 pub system_metrics: Arc<RwLock<SystemMetrics>>,
251 pub config: Arc<RwLock<ConfigurationState>>,
253 pub logs: Arc<RwLock<Vec<RequestLog>>>,
255 pub time_series: Arc<RwLock<TimeSeriesData>>,
257 pub restart_status: Arc<RwLock<RestartStatus>>,
259 pub smoke_test_results: Arc<RwLock<Vec<SmokeTestResult>>>,
261 pub import_history: Arc<RwLock<Vec<ImportHistoryEntry>>>,
263 pub workspace_persistence: Arc<WorkspacePersistence>,
265 pub plugin_registry: Arc<RwLock<PluginRegistry>>,
267}
268
269impl AdminState {
270 pub async fn start_system_monitoring(&self) {
272 let state_clone = self.clone();
273 tokio::spawn(async move {
274 let mut sys = System::new_all();
275 let mut refresh_count = 0u64;
276
277 tracing::info!("Starting system monitoring background task");
278
279 loop {
280 sys.refresh_all();
282
283 let cpu_usage = sys.global_cpu_usage();
285
286 let _total_memory = sys.total_memory() as f64;
288 let used_memory = sys.used_memory() as f64;
289 let memory_usage_mb = used_memory / 1024.0 / 1024.0;
290
291 let active_threads = sys.cpus().len() as u32;
293
294 let memory_mb_u64 = memory_usage_mb as u64;
296
297 if refresh_count.is_multiple_of(10) {
299 tracing::debug!(
300 "System metrics updated: CPU={:.1}%, Mem={}MB, Threads={}",
301 cpu_usage,
302 memory_mb_u64,
303 active_threads
304 );
305 }
306
307 state_clone
308 .update_system_metrics(memory_mb_u64, cpu_usage as f64, active_threads)
309 .await;
310
311 refresh_count += 1;
312
313 tokio::time::sleep(Duration::from_secs(10)).await;
315 }
316 });
317 }
318
319 pub fn new(
321 http_server_addr: Option<std::net::SocketAddr>,
322 ws_server_addr: Option<std::net::SocketAddr>,
323 grpc_server_addr: Option<std::net::SocketAddr>,
324 graphql_server_addr: Option<std::net::SocketAddr>,
325 api_enabled: bool,
326 admin_port: u16,
327 ) -> Self {
328 let start_time = chrono::Utc::now();
329
330 Self {
331 http_server_addr,
332 ws_server_addr,
333 grpc_server_addr,
334 graphql_server_addr,
335 api_enabled,
336 admin_port,
337 start_time,
338 metrics: Arc::new(RwLock::new(RequestMetrics::default())),
339 system_metrics: Arc::new(RwLock::new(SystemMetrics {
340 memory_usage_mb: 0,
341 cpu_usage_percent: 0.0,
342 active_threads: 0,
343 })),
344 config: Arc::new(RwLock::new(ConfigurationState {
345 latency_profile: LatencyProfile {
346 name: "default".to_string(),
347 base_ms: 50,
348 jitter_ms: 20,
349 tag_overrides: HashMap::new(),
350 },
351 fault_config: FaultConfig {
352 enabled: false,
353 failure_rate: 0.0,
354 status_codes: vec![500, 502, 503],
355 active_failures: 0,
356 },
357 proxy_config: ProxyConfig {
358 enabled: false,
359 upstream_url: None,
360 timeout_seconds: 30,
361 requests_proxied: 0,
362 },
363 validation_settings: ValidationSettings {
364 mode: "enforce".to_string(),
365 aggregate_errors: true,
366 validate_responses: false,
367 overrides: HashMap::new(),
368 },
369 })),
370 logs: Arc::new(RwLock::new(Vec::new())),
371 time_series: Arc::new(RwLock::new(TimeSeriesData::default())),
372 restart_status: Arc::new(RwLock::new(RestartStatus {
373 in_progress: false,
374 initiated_at: None,
375 reason: None,
376 success: None,
377 })),
378 smoke_test_results: Arc::new(RwLock::new(Vec::new())),
379 import_history: Arc::new(RwLock::new(Vec::new())),
380 workspace_persistence: Arc::new(WorkspacePersistence::new("./workspaces")),
381 plugin_registry: Arc::new(RwLock::new(PluginRegistry::new())),
382 }
383 }
384
385 pub async fn record_request(
387 &self,
388 method: &str,
389 path: &str,
390 status_code: u16,
391 response_time_ms: u64,
392 error: Option<String>,
393 ) {
394 let mut metrics = self.metrics.write().await;
395
396 metrics.total_requests += 1;
397 let endpoint = format!("{} {}", method, path);
398 *metrics.requests_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
399
400 if status_code >= 400 {
401 *metrics.errors_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
402 }
403
404 metrics.response_times.push(response_time_ms);
406 if metrics.response_times.len() > 100 {
407 metrics.response_times.remove(0);
408 }
409
410 let endpoint_times = metrics
412 .response_times_by_endpoint
413 .entry(endpoint.clone())
414 .or_insert_with(Vec::new);
415 endpoint_times.push(response_time_ms);
416 if endpoint_times.len() > 50 {
417 endpoint_times.remove(0);
418 }
419
420 metrics.last_request_by_endpoint.insert(endpoint, chrono::Utc::now());
422
423 let total_requests = metrics.total_requests;
425
426 drop(metrics);
428
429 self.update_time_series_on_request(response_time_ms, total_requests).await;
431
432 let mut logs = self.logs.write().await;
434 let log_entry = RequestLog {
435 id: format!("req_{}", total_requests),
436 timestamp: Utc::now(),
437 method: method.to_string(),
438 path: path.to_string(),
439 status_code,
440 response_time_ms,
441 client_ip: None,
442 user_agent: None,
443 headers: HashMap::new(),
444 response_size_bytes: 0,
445 error_message: error,
446 };
447
448 logs.push(log_entry);
449
450 if logs.len() > 1000 {
452 logs.remove(0);
453 }
454 }
455
456 pub async fn get_metrics(&self) -> RequestMetrics {
458 self.metrics.read().await.clone()
459 }
460
461 pub async fn update_system_metrics(&self, memory_mb: u64, cpu_percent: f64, threads: u32) {
463 let mut system_metrics = self.system_metrics.write().await;
464 system_metrics.memory_usage_mb = memory_mb;
465 system_metrics.cpu_usage_percent = cpu_percent;
466 system_metrics.active_threads = threads;
467
468 self.update_time_series_data(memory_mb as f64, cpu_percent).await;
470 }
471
472 async fn update_time_series_data(&self, memory_mb: f64, cpu_percent: f64) {
474 let now = chrono::Utc::now();
475 let mut time_series = self.time_series.write().await;
476
477 time_series.memory_usage.push(TimeSeriesPoint {
479 timestamp: now,
480 value: memory_mb,
481 });
482
483 time_series.cpu_usage.push(TimeSeriesPoint {
485 timestamp: now,
486 value: cpu_percent,
487 });
488
489 let metrics = self.metrics.read().await;
491 time_series.request_count.push(TimeSeriesPoint {
492 timestamp: now,
493 value: metrics.total_requests as f64,
494 });
495
496 let avg_response_time = if !metrics.response_times.is_empty() {
498 metrics.response_times.iter().sum::<u64>() as f64 / metrics.response_times.len() as f64
499 } else {
500 0.0
501 };
502 time_series.response_time.push(TimeSeriesPoint {
503 timestamp: now,
504 value: avg_response_time,
505 });
506
507 const MAX_POINTS: usize = 100;
509 if time_series.memory_usage.len() > MAX_POINTS {
510 time_series.memory_usage.remove(0);
511 }
512 if time_series.cpu_usage.len() > MAX_POINTS {
513 time_series.cpu_usage.remove(0);
514 }
515 if time_series.request_count.len() > MAX_POINTS {
516 time_series.request_count.remove(0);
517 }
518 if time_series.response_time.len() > MAX_POINTS {
519 time_series.response_time.remove(0);
520 }
521 }
522
523 pub async fn get_system_metrics(&self) -> SystemMetrics {
525 self.system_metrics.read().await.clone()
526 }
527
528 pub async fn get_time_series_data(&self) -> TimeSeriesData {
530 self.time_series.read().await.clone()
531 }
532
533 pub async fn get_restart_status(&self) -> RestartStatus {
535 self.restart_status.read().await.clone()
536 }
537
538 pub async fn initiate_restart(&self, reason: String) -> Result<()> {
540 let mut status = self.restart_status.write().await;
541
542 if status.in_progress {
543 return Err(Error::generic("Restart already in progress".to_string()));
544 }
545
546 status.in_progress = true;
547 status.initiated_at = Some(chrono::Utc::now());
548 status.reason = Some(reason);
549 status.success = None;
550
551 Ok(())
552 }
553
554 pub async fn complete_restart(&self, success: bool) {
556 let mut status = self.restart_status.write().await;
557 status.in_progress = false;
558 status.success = Some(success);
559 }
560
561 pub async fn get_smoke_test_results(&self) -> Vec<SmokeTestResult> {
563 self.smoke_test_results.read().await.clone()
564 }
565
566 pub async fn update_smoke_test_result(&self, result: SmokeTestResult) {
568 let mut results = self.smoke_test_results.write().await;
569
570 if let Some(existing) = results.iter_mut().find(|r| r.id == result.id) {
572 *existing = result;
573 } else {
574 results.push(result);
575 }
576
577 if results.len() > 100 {
579 results.remove(0);
580 }
581 }
582
583 pub async fn clear_smoke_test_results(&self) {
585 let mut results = self.smoke_test_results.write().await;
586 results.clear();
587 }
588
589 async fn update_time_series_on_request(&self, response_time_ms: u64, total_requests: u64) {
591 let now = chrono::Utc::now();
592 let mut time_series = self.time_series.write().await;
593
594 time_series.request_count.push(TimeSeriesPoint {
596 timestamp: now,
597 value: total_requests as f64,
598 });
599
600 time_series.response_time.push(TimeSeriesPoint {
602 timestamp: now,
603 value: response_time_ms as f64,
604 });
605
606 const MAX_POINTS: usize = 100;
608 if time_series.request_count.len() > MAX_POINTS {
609 time_series.request_count.remove(0);
610 }
611 if time_series.response_time.len() > MAX_POINTS {
612 time_series.response_time.remove(0);
613 }
614 }
615
616 pub async fn get_config(&self) -> ConfigurationState {
618 self.config.read().await.clone()
619 }
620
621 pub async fn update_latency_config(
623 &self,
624 base_ms: u64,
625 jitter_ms: u64,
626 tag_overrides: HashMap<String, u64>,
627 ) {
628 let mut config = self.config.write().await;
629 config.latency_profile.base_ms = base_ms;
630 config.latency_profile.jitter_ms = jitter_ms;
631 config.latency_profile.tag_overrides = tag_overrides;
632 }
633
634 pub async fn update_fault_config(
636 &self,
637 enabled: bool,
638 failure_rate: f64,
639 status_codes: Vec<u16>,
640 ) {
641 let mut config = self.config.write().await;
642 config.fault_config.enabled = enabled;
643 config.fault_config.failure_rate = failure_rate;
644 config.fault_config.status_codes = status_codes;
645 }
646
647 pub async fn update_proxy_config(
649 &self,
650 enabled: bool,
651 upstream_url: Option<String>,
652 timeout_seconds: u64,
653 ) {
654 let mut config = self.config.write().await;
655 config.proxy_config.enabled = enabled;
656 config.proxy_config.upstream_url = upstream_url;
657 config.proxy_config.timeout_seconds = timeout_seconds;
658 }
659
660 pub async fn update_validation_config(
662 &self,
663 mode: String,
664 aggregate_errors: bool,
665 validate_responses: bool,
666 overrides: HashMap<String, String>,
667 ) {
668 let mut config = self.config.write().await;
669 config.validation_settings.mode = mode;
670 config.validation_settings.aggregate_errors = aggregate_errors;
671 config.validation_settings.validate_responses = validate_responses;
672 config.validation_settings.overrides = overrides;
673 }
674
675 pub async fn get_logs_filtered(&self, filter: &LogFilter) -> Vec<RequestLog> {
677 let logs = self.logs.read().await;
678
679 logs.iter()
680 .rev() .filter(|log| {
682 if let Some(ref method) = filter.method {
683 if log.method != *method {
684 return false;
685 }
686 }
687 if let Some(ref path_pattern) = filter.path_pattern {
688 if !log.path.contains(path_pattern) {
689 return false;
690 }
691 }
692 if let Some(status) = filter.status_code {
693 if log.status_code != status {
694 return false;
695 }
696 }
697 true
698 })
699 .take(filter.limit.unwrap_or(100))
700 .cloned()
701 .collect()
702 }
703
704 pub async fn clear_logs(&self) {
706 let mut logs = self.logs.write().await;
707 logs.clear();
708 }
709}
710
711pub async fn serve_admin_html() -> Html<&'static str> {
713 Html(crate::get_admin_html())
714}
715
716pub async fn serve_admin_css() -> ([(http::HeaderName, &'static str); 1], &'static str) {
718 ([(http::header::CONTENT_TYPE, "text/css")], crate::get_admin_css())
719}
720
721pub async fn serve_admin_js() -> ([(http::HeaderName, &'static str); 1], &'static str) {
723 ([(http::header::CONTENT_TYPE, "application/javascript")], crate::get_admin_js())
724}
725
726pub async fn get_dashboard(State(state): State<AdminState>) -> Json<ApiResponse<DashboardData>> {
728 let uptime = Utc::now().signed_duration_since(state.start_time).num_seconds() as u64;
729
730 let system_metrics = state.get_system_metrics().await;
732 let _config = state.get_config().await;
733
734 let (recent_logs, calculated_metrics): (Vec<RequestLog>, RequestMetrics) =
736 if let Some(global_logger) = mockforge_core::get_global_logger() {
737 let all_logs = global_logger.get_recent_logs(None).await;
739 let recent_logs_subset = global_logger.get_recent_logs(Some(20)).await;
740
741 let total_requests = all_logs.len() as u64;
743 let mut requests_by_endpoint = HashMap::new();
744 let mut errors_by_endpoint = HashMap::new();
745 let mut response_times = Vec::new();
746 let mut last_request_by_endpoint = HashMap::new();
747
748 for log in &all_logs {
749 let endpoint_key = format!("{} {}", log.method, log.path);
750 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
751
752 if log.status_code >= 400 {
753 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
754 }
755
756 response_times.push(log.response_time_ms);
757 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
758 }
759
760 let calculated_metrics = RequestMetrics {
761 total_requests,
762 active_connections: 0, requests_by_endpoint,
764 response_times,
765 response_times_by_endpoint: HashMap::new(), errors_by_endpoint,
767 last_request_by_endpoint,
768 };
769
770 let recent_logs = recent_logs_subset
772 .into_iter()
773 .map(|log| RequestLog {
774 id: log.id,
775 timestamp: log.timestamp,
776 method: log.method,
777 path: log.path,
778 status_code: log.status_code,
779 response_time_ms: log.response_time_ms,
780 client_ip: log.client_ip,
781 user_agent: log.user_agent,
782 headers: log.headers,
783 response_size_bytes: log.response_size_bytes,
784 error_message: log.error_message,
785 })
786 .collect();
787
788 (recent_logs, calculated_metrics)
789 } else {
790 let logs = state.logs.read().await;
792 let recent_logs = logs.iter().rev().take(10).cloned().collect();
793 let metrics = state.get_metrics().await;
794 (recent_logs, metrics)
795 };
796
797 let metrics = calculated_metrics;
798
799 let system_info = SystemInfo {
800 version: env!("CARGO_PKG_VERSION").to_string(),
801 uptime_seconds: uptime,
802 memory_usage_mb: system_metrics.memory_usage_mb,
803 cpu_usage_percent: system_metrics.cpu_usage_percent,
804 active_threads: system_metrics.active_threads as usize,
805 total_routes: metrics.requests_by_endpoint.len(),
806 total_fixtures: count_fixtures().unwrap_or(0),
807 };
808
809 let servers = vec![
810 ServerStatus {
811 server_type: "HTTP".to_string(),
812 address: state.http_server_addr.map(|addr| addr.to_string()),
813 running: state.http_server_addr.is_some(),
814 start_time: Some(state.start_time),
815 uptime_seconds: Some(uptime),
816 active_connections: metrics.active_connections,
817 total_requests: count_requests_by_server_type(&metrics, "HTTP"),
818 },
819 ServerStatus {
820 server_type: "WebSocket".to_string(),
821 address: state.ws_server_addr.map(|addr| addr.to_string()),
822 running: state.ws_server_addr.is_some(),
823 start_time: Some(state.start_time),
824 uptime_seconds: Some(uptime),
825 active_connections: metrics.active_connections / 2, total_requests: count_requests_by_server_type(&metrics, "WebSocket"),
827 },
828 ServerStatus {
829 server_type: "gRPC".to_string(),
830 address: state.grpc_server_addr.map(|addr| addr.to_string()),
831 running: state.grpc_server_addr.is_some(),
832 start_time: Some(state.start_time),
833 uptime_seconds: Some(uptime),
834 active_connections: metrics.active_connections / 3, total_requests: count_requests_by_server_type(&metrics, "gRPC"),
836 },
837 ];
838
839 let mut routes = Vec::new();
841 for (endpoint, count) in &metrics.requests_by_endpoint {
842 let parts: Vec<&str> = endpoint.splitn(2, ' ').collect();
843 if parts.len() == 2 {
844 let method = parts[0].to_string();
845 let path = parts[1].to_string();
846 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
847
848 routes.push(RouteInfo {
849 method: Some(method.clone()),
850 path: path.clone(),
851 priority: 0,
852 has_fixtures: route_has_fixtures(&method, &path),
853 latency_ms: calculate_endpoint_latency(&metrics, endpoint),
854 request_count: *count,
855 last_request: get_endpoint_last_request(&metrics, endpoint),
856 error_count,
857 });
858 }
859 }
860
861 let dashboard = DashboardData {
862 server_info: ServerInfo {
863 version: env!("CARGO_PKG_VERSION").to_string(),
864 build_time: option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown").to_string(),
865 git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("unknown").to_string(),
866 http_server: state.http_server_addr.map(|addr| addr.to_string()),
867 ws_server: state.ws_server_addr.map(|addr| addr.to_string()),
868 grpc_server: state.grpc_server_addr.map(|addr| addr.to_string()),
869 graphql_server: state.graphql_server_addr.map(|addr| addr.to_string()),
870 api_enabled: state.api_enabled,
871 admin_port: state.admin_port,
872 },
873 system_info: DashboardSystemInfo {
874 os: std::env::consts::OS.to_string(),
875 arch: std::env::consts::ARCH.to_string(),
876 uptime,
877 memory_usage: system_metrics.memory_usage_mb * 1024 * 1024, },
879 metrics: SimpleMetricsData {
880 total_requests: metrics.requests_by_endpoint.values().sum(),
881 active_requests: metrics.active_connections,
882 average_response_time: if metrics.response_times.is_empty() {
883 0.0
884 } else {
885 metrics.response_times.iter().sum::<u64>() as f64
886 / metrics.response_times.len() as f64
887 },
888 error_rate: {
889 let total_requests = metrics.requests_by_endpoint.values().sum::<u64>();
890 let total_errors = metrics.errors_by_endpoint.values().sum::<u64>();
891 if total_requests == 0 {
892 0.0
893 } else {
894 total_errors as f64 / total_requests as f64
895 }
896 },
897 },
898 servers,
899 recent_logs,
900 system: system_info,
901 };
902
903 Json(ApiResponse::success(dashboard))
904}
905
906pub async fn get_routes(State(state): State<AdminState>) -> impl IntoResponse {
908 if let Some(http_addr) = state.http_server_addr {
909 let url = format!("http://{}/__mockforge/routes", http_addr);
911 if let Ok(response) = reqwest::get(&url).await {
912 if response.status().is_success() {
913 if let Ok(body) = response.text().await {
914 return (StatusCode::OK, [("content-type", "application/json")], body);
915 }
916 }
917 }
918 }
919
920 (
922 StatusCode::OK,
923 [("content-type", "application/json")],
924 r#"{"routes":[]}"#.to_string(),
925 )
926}
927
928pub async fn get_server_info(State(state): State<AdminState>) -> Json<serde_json::Value> {
930 Json(json!({
931 "http_server": state.http_server_addr.map(|addr| addr.to_string()),
932 "ws_server": state.ws_server_addr.map(|addr| addr.to_string()),
933 "grpc_server": state.grpc_server_addr.map(|addr| addr.to_string()),
934 "admin_port": state.admin_port
935 }))
936}
937
938pub async fn get_health() -> Json<HealthCheck> {
940 Json(
941 HealthCheck::healthy()
942 .with_service("http".to_string(), "healthy".to_string())
943 .with_service("websocket".to_string(), "healthy".to_string())
944 .with_service("grpc".to_string(), "healthy".to_string()),
945 )
946}
947
948pub async fn get_logs(
950 State(state): State<AdminState>,
951 Query(params): Query<HashMap<String, String>>,
952) -> Json<ApiResponse<Vec<RequestLog>>> {
953 let mut filter = LogFilter::default();
954
955 if let Some(method) = params.get("method") {
956 filter.method = Some(method.clone());
957 }
958 if let Some(path) = params.get("path") {
959 filter.path_pattern = Some(path.clone());
960 }
961 if let Some(status) = params.get("status").and_then(|s| s.parse().ok()) {
962 filter.status_code = Some(status);
963 }
964 if let Some(limit) = params.get("limit").and_then(|s| s.parse().ok()) {
965 filter.limit = Some(limit);
966 }
967
968 let logs = if let Some(global_logger) = mockforge_core::get_global_logger() {
970 let centralized_logs = global_logger.get_recent_logs(filter.limit).await;
972
973 centralized_logs
975 .into_iter()
976 .filter(|log| {
977 if let Some(ref method) = filter.method {
978 if log.method != *method {
979 return false;
980 }
981 }
982 if let Some(ref path_pattern) = filter.path_pattern {
983 if !log.path.contains(path_pattern) {
984 return false;
985 }
986 }
987 if let Some(status) = filter.status_code {
988 if log.status_code != status {
989 return false;
990 }
991 }
992 true
993 })
994 .map(|log| RequestLog {
995 id: log.id,
996 timestamp: log.timestamp,
997 method: log.method,
998 path: log.path,
999 status_code: log.status_code,
1000 response_time_ms: log.response_time_ms,
1001 client_ip: log.client_ip,
1002 user_agent: log.user_agent,
1003 headers: log.headers,
1004 response_size_bytes: log.response_size_bytes,
1005 error_message: log.error_message,
1006 })
1007 .collect()
1008 } else {
1009 state.get_logs_filtered(&filter).await
1011 };
1012
1013 Json(ApiResponse::success(logs))
1014}
1015
1016const RECENT_LOGS_LIMIT: usize = 20;
1018const RECENT_LOGS_TTL_MINUTES: i64 = 5;
1019
1020pub async fn logs_sse(
1022 State(_state): State<AdminState>,
1023) -> Sse<impl Stream<Item = std::result::Result<Event, Infallible>>> {
1024 tracing::info!("SSE endpoint /logs/sse accessed - starting real-time log streaming for recent requests only");
1025
1026 let stream = stream::unfold(std::collections::HashSet::new(), |mut seen_ids| async move {
1027 tokio::time::sleep(Duration::from_millis(500)).await;
1028
1029 if let Some(global_logger) = mockforge_core::get_global_logger() {
1031 let centralized_logs = global_logger.get_recent_logs(Some(RECENT_LOGS_LIMIT)).await;
1032
1033 tracing::debug!(
1034 "SSE: Checking logs - total logs: {}, seen logs: {}",
1035 centralized_logs.len(),
1036 seen_ids.len()
1037 );
1038
1039 let now = chrono::Utc::now();
1041 let ttl_cutoff = now - chrono::Duration::minutes(RECENT_LOGS_TTL_MINUTES);
1042
1043 let new_logs: Vec<RequestLog> = centralized_logs
1045 .into_iter()
1046 .filter(|log| {
1047 log.timestamp > ttl_cutoff && !seen_ids.contains(&log.id)
1049 })
1050 .map(|log| RequestLog {
1051 id: log.id,
1052 timestamp: log.timestamp,
1053 method: log.method,
1054 path: log.path,
1055 status_code: log.status_code,
1056 response_time_ms: log.response_time_ms,
1057 client_ip: log.client_ip,
1058 user_agent: log.user_agent,
1059 headers: log.headers,
1060 response_size_bytes: log.response_size_bytes,
1061 error_message: log.error_message,
1062 })
1063 .collect();
1064
1065 for log in &new_logs {
1067 seen_ids.insert(log.id.clone());
1068 }
1069
1070 if !new_logs.is_empty() {
1072 tracing::info!("SSE: Sending {} new logs to client", new_logs.len());
1073
1074 let event_data = serde_json::to_string(&new_logs).unwrap_or_default();
1075 let event = Ok(Event::default().event("new_logs").data(event_data));
1076
1077 return Some((event, seen_ids));
1078 }
1079 }
1080
1081 let event = Ok(Event::default().event("keep_alive").data(""));
1083 Some((event, seen_ids))
1084 });
1085
1086 Sse::new(stream).keep_alive(
1087 axum::response::sse::KeepAlive::new()
1088 .interval(Duration::from_secs(15))
1089 .text("keep-alive-text"),
1090 )
1091}
1092
1093pub async fn get_metrics(State(state): State<AdminState>) -> Json<ApiResponse<MetricsData>> {
1095 let metrics = if let Some(global_logger) = mockforge_core::get_global_logger() {
1097 let all_logs = global_logger.get_recent_logs(None).await;
1098
1099 let total_requests = all_logs.len() as u64;
1100 let mut requests_by_endpoint = HashMap::new();
1101 let mut errors_by_endpoint = HashMap::new();
1102 let mut response_times = Vec::new();
1103 let mut last_request_by_endpoint = HashMap::new();
1104
1105 for log in &all_logs {
1106 let endpoint_key = format!("{} {}", log.method, log.path);
1107 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1108
1109 if log.status_code >= 400 {
1110 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1111 }
1112
1113 response_times.push(log.response_time_ms);
1114 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
1115 }
1116
1117 RequestMetrics {
1118 total_requests,
1119 active_connections: 0,
1120 requests_by_endpoint,
1121 response_times,
1122 response_times_by_endpoint: HashMap::new(),
1123 errors_by_endpoint,
1124 last_request_by_endpoint,
1125 }
1126 } else {
1127 state.get_metrics().await
1128 };
1129
1130 let system_metrics = state.get_system_metrics().await;
1131 let time_series = state.get_time_series_data().await;
1132
1133 let mut response_times = metrics.response_times.clone();
1135 response_times.sort();
1136
1137 let p50 = if !response_times.is_empty() {
1138 response_times[response_times.len() / 2] as u64
1139 } else {
1140 0
1141 };
1142
1143 let p95 = if !response_times.is_empty() {
1144 let idx = (response_times.len() as f64 * 0.95) as usize;
1145 response_times[response_times.len().min(idx)] as u64
1146 } else {
1147 0
1148 };
1149
1150 let p99 = if !response_times.is_empty() {
1151 let idx = (response_times.len() as f64 * 0.99) as usize;
1152 response_times[response_times.len().min(idx)] as u64
1153 } else {
1154 0
1155 };
1156
1157 let mut error_rate_by_endpoint = HashMap::new();
1159 for (endpoint, total_count) in &metrics.requests_by_endpoint {
1160 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
1161 let error_rate = if *total_count > 0 {
1162 error_count as f64 / *total_count as f64
1163 } else {
1164 0.0
1165 };
1166 error_rate_by_endpoint.insert(endpoint.clone(), error_rate);
1167 }
1168
1169 let memory_usage_over_time = if time_series.memory_usage.is_empty() {
1172 vec![(Utc::now(), system_metrics.memory_usage_mb)]
1173 } else {
1174 time_series
1175 .memory_usage
1176 .iter()
1177 .map(|point| (point.timestamp, point.value as u64))
1178 .collect()
1179 };
1180
1181 let cpu_usage_over_time = if time_series.cpu_usage.is_empty() {
1182 vec![(Utc::now(), system_metrics.cpu_usage_percent)]
1183 } else {
1184 time_series
1185 .cpu_usage
1186 .iter()
1187 .map(|point| (point.timestamp, point.value))
1188 .collect()
1189 };
1190
1191 let metrics_data = MetricsData {
1192 requests_by_endpoint: metrics.requests_by_endpoint,
1193 response_time_percentiles: HashMap::from([
1194 ("p50".to_string(), p50),
1195 ("p95".to_string(), p95),
1196 ("p99".to_string(), p99),
1197 ]),
1198 error_rate_by_endpoint,
1199 memory_usage_over_time,
1200 cpu_usage_over_time,
1201 };
1202
1203 Json(ApiResponse::success(metrics_data))
1204}
1205
1206pub async fn update_latency(
1208 State(state): State<AdminState>,
1209 Json(update): Json<ConfigUpdate>,
1210) -> Json<ApiResponse<String>> {
1211 if update.config_type != "latency" {
1212 return Json(ApiResponse::error("Invalid config type".to_string()));
1213 }
1214
1215 let base_ms = update.data.get("base_ms").and_then(|v| v.as_u64()).unwrap_or(50);
1217
1218 let jitter_ms = update.data.get("jitter_ms").and_then(|v| v.as_u64()).unwrap_or(20);
1219
1220 let tag_overrides = update
1221 .data
1222 .get("tag_overrides")
1223 .and_then(|v| v.as_object())
1224 .map(|obj| obj.iter().filter_map(|(k, v)| v.as_u64().map(|val| (k.clone(), val))).collect())
1225 .unwrap_or_default();
1226
1227 state.update_latency_config(base_ms, jitter_ms, tag_overrides).await;
1229
1230 tracing::info!("Updated latency profile: base_ms={}, jitter_ms={}", base_ms, jitter_ms);
1231
1232 Json(ApiResponse::success("Latency profile updated".to_string()))
1233}
1234
1235pub async fn update_faults(
1237 State(state): State<AdminState>,
1238 Json(update): Json<ConfigUpdate>,
1239) -> Json<ApiResponse<String>> {
1240 if update.config_type != "faults" {
1241 return Json(ApiResponse::error("Invalid config type".to_string()));
1242 }
1243
1244 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1246
1247 let failure_rate = update.data.get("failure_rate").and_then(|v| v.as_f64()).unwrap_or(0.0);
1248
1249 let status_codes = update
1250 .data
1251 .get("status_codes")
1252 .and_then(|v| v.as_array())
1253 .map(|arr| arr.iter().filter_map(|v| v.as_u64().map(|n| n as u16)).collect())
1254 .unwrap_or_else(|| vec![500, 502, 503]);
1255
1256 state.update_fault_config(enabled, failure_rate, status_codes).await;
1258
1259 tracing::info!(
1260 "Updated fault configuration: enabled={}, failure_rate={}",
1261 enabled,
1262 failure_rate
1263 );
1264
1265 Json(ApiResponse::success("Fault configuration updated".to_string()))
1266}
1267
1268pub async fn update_proxy(
1270 State(state): State<AdminState>,
1271 Json(update): Json<ConfigUpdate>,
1272) -> Json<ApiResponse<String>> {
1273 if update.config_type != "proxy" {
1274 return Json(ApiResponse::error("Invalid config type".to_string()));
1275 }
1276
1277 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1279
1280 let upstream_url =
1281 update.data.get("upstream_url").and_then(|v| v.as_str()).map(|s| s.to_string());
1282
1283 let timeout_seconds = update.data.get("timeout_seconds").and_then(|v| v.as_u64()).unwrap_or(30);
1284
1285 state.update_proxy_config(enabled, upstream_url.clone(), timeout_seconds).await;
1287
1288 tracing::info!(
1289 "Updated proxy configuration: enabled={}, upstream_url={:?}",
1290 enabled,
1291 upstream_url
1292 );
1293
1294 Json(ApiResponse::success("Proxy configuration updated".to_string()))
1295}
1296
1297pub async fn clear_logs(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1299 state.clear_logs().await;
1301 tracing::info!("Cleared all request logs");
1302
1303 Json(ApiResponse::success("Logs cleared".to_string()))
1304}
1305
1306pub async fn restart_servers(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1308 let current_status = state.get_restart_status().await;
1310 if current_status.in_progress {
1311 return Json(ApiResponse::error("Server restart already in progress".to_string()));
1312 }
1313
1314 if let Err(e) = state
1316 .initiate_restart("Manual restart requested via admin UI".to_string())
1317 .await
1318 {
1319 return Json(ApiResponse::error(format!("Failed to initiate restart: {}", e)));
1320 }
1321
1322 let state_clone = state.clone();
1324 tokio::spawn(async move {
1325 if let Err(e) = perform_server_restart(&state_clone).await {
1326 tracing::error!("Server restart failed: {}", e);
1327 state_clone.complete_restart(false).await;
1328 } else {
1329 tracing::info!("Server restart completed successfully");
1330 state_clone.complete_restart(true).await;
1331 }
1332 });
1333
1334 tracing::info!("Server restart initiated via admin UI");
1335 Json(ApiResponse::success(
1336 "Server restart initiated. Please wait for completion.".to_string(),
1337 ))
1338}
1339
1340async fn perform_server_restart(_state: &AdminState) -> Result<()> {
1342 let current_pid = std::process::id();
1344 tracing::info!("Initiating restart for process PID: {}", current_pid);
1345
1346 let parent_pid = get_parent_process_id(current_pid).await?;
1348 tracing::info!("Found parent process PID: {}", parent_pid);
1349
1350 if let Ok(()) = restart_via_parent_signal(parent_pid).await {
1352 tracing::info!("Restart initiated via parent process signal");
1353 return Ok(());
1354 }
1355
1356 if let Ok(()) = restart_via_process_replacement().await {
1358 tracing::info!("Restart initiated via process replacement");
1359 return Ok(());
1360 }
1361
1362 restart_via_script().await
1364}
1365
1366async fn get_parent_process_id(pid: u32) -> Result<u32> {
1368 #[cfg(target_os = "linux")]
1370 {
1371 let stat_path = format!("/proc/{}/stat", pid);
1373 if let Ok(ppid) = tokio::task::spawn_blocking(move || -> Result<u32> {
1374 let content = std::fs::read_to_string(&stat_path)
1375 .map_err(|e| Error::generic(format!("Failed to read {}: {}", stat_path, e)))?;
1376
1377 let fields: Vec<&str> = content.split_whitespace().collect();
1378 if fields.len() > 3 {
1379 fields[3]
1380 .parse::<u32>()
1381 .map_err(|e| Error::generic(format!("Failed to parse PPID: {}", e)))
1382 } else {
1383 Err(Error::generic("Insufficient fields in /proc/pid/stat".to_string()))
1384 }
1385 })
1386 .await
1387 {
1388 return ppid;
1389 }
1390 }
1391
1392 Ok(1) }
1395
1396async fn restart_via_parent_signal(parent_pid: u32) -> Result<()> {
1398 #[cfg(unix)]
1399 {
1400 use std::process::Command;
1401
1402 let output = Command::new("kill")
1404 .args(["-TERM", &parent_pid.to_string()])
1405 .output()
1406 .map_err(|e| Error::generic(format!("Failed to send signal: {}", e)))?;
1407
1408 if !output.status.success() {
1409 return Err(Error::generic(
1410 "Failed to send restart signal to parent process".to_string(),
1411 ));
1412 }
1413
1414 tokio::time::sleep(Duration::from_millis(100)).await;
1416 Ok(())
1417 }
1418
1419 #[cfg(not(unix))]
1420 {
1421 Err(Error::generic(
1422 "Signal-based restart not supported on this platform".to_string(),
1423 ))
1424 }
1425}
1426
1427async fn restart_via_process_replacement() -> Result<()> {
1429 let current_exe = std::env::current_exe()
1431 .map_err(|e| Error::generic(format!("Failed to get current executable: {}", e)))?;
1432
1433 let args: Vec<String> = std::env::args().collect();
1435
1436 tracing::info!("Restarting with command: {:?}", args);
1437
1438 let mut child = Command::new(¤t_exe)
1440 .args(&args[1..]) .stdout(Stdio::inherit())
1442 .stderr(Stdio::inherit())
1443 .spawn()
1444 .map_err(|e| Error::generic(format!("Failed to start new process: {}", e)))?;
1445
1446 tokio::time::sleep(Duration::from_millis(500)).await;
1448
1449 match child.try_wait() {
1451 Ok(Some(status)) => {
1452 if status.success() {
1453 tracing::info!("New process started successfully");
1454 Ok(())
1455 } else {
1456 Err(Error::generic("New process exited with error".to_string()))
1457 }
1458 }
1459 Ok(None) => {
1460 tracing::info!("New process is running, exiting current process");
1461 std::process::exit(0);
1463 }
1464 Err(e) => Err(Error::generic(format!("Failed to check new process status: {}", e))),
1465 }
1466}
1467
1468async fn restart_via_script() -> Result<()> {
1470 let script_paths = ["./scripts/restart.sh", "./restart.sh", "restart.sh"];
1472
1473 for script_path in &script_paths {
1474 if std::path::Path::new(script_path).exists() {
1475 tracing::info!("Using restart script: {}", script_path);
1476
1477 let output = Command::new("bash")
1478 .arg(script_path)
1479 .output()
1480 .map_err(|e| Error::generic(format!("Failed to execute restart script: {}", e)))?;
1481
1482 if output.status.success() {
1483 return Ok(());
1484 } else {
1485 tracing::warn!(
1486 "Restart script failed: {}",
1487 String::from_utf8_lossy(&output.stderr)
1488 );
1489 }
1490 }
1491 }
1492
1493 let clear_script = "./scripts/clear-ports.sh";
1495 if std::path::Path::new(clear_script).exists() {
1496 tracing::info!("Using clear-ports script as fallback");
1497
1498 let _ = Command::new("bash").arg(clear_script).output();
1499 }
1500
1501 Err(Error::generic(
1502 "No restart mechanism available. Please restart manually.".to_string(),
1503 ))
1504}
1505
1506pub async fn get_restart_status(
1508 State(state): State<AdminState>,
1509) -> Json<ApiResponse<RestartStatus>> {
1510 let status = state.get_restart_status().await;
1511 Json(ApiResponse::success(status))
1512}
1513
1514pub async fn get_config(State(state): State<AdminState>) -> Json<ApiResponse<serde_json::Value>> {
1516 let config_state = state.get_config().await;
1517
1518 let config = json!({
1519 "latency": {
1520 "enabled": true,
1521 "base_ms": config_state.latency_profile.base_ms,
1522 "jitter_ms": config_state.latency_profile.jitter_ms,
1523 "tag_overrides": config_state.latency_profile.tag_overrides
1524 },
1525 "faults": {
1526 "enabled": config_state.fault_config.enabled,
1527 "failure_rate": config_state.fault_config.failure_rate,
1528 "status_codes": config_state.fault_config.status_codes
1529 },
1530 "proxy": {
1531 "enabled": config_state.proxy_config.enabled,
1532 "upstream_url": config_state.proxy_config.upstream_url,
1533 "timeout_seconds": config_state.proxy_config.timeout_seconds
1534 },
1535 "validation": {
1536 "mode": config_state.validation_settings.mode,
1537 "aggregate_errors": config_state.validation_settings.aggregate_errors,
1538 "validate_responses": config_state.validation_settings.validate_responses,
1539 "overrides": config_state.validation_settings.overrides
1540 }
1541 });
1542
1543 Json(ApiResponse::success(config))
1544}
1545
1546pub fn count_fixtures() -> Result<usize> {
1548 let fixtures_dir =
1550 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1551 let fixtures_path = std::path::Path::new(&fixtures_dir);
1552
1553 if !fixtures_path.exists() {
1554 return Ok(0);
1555 }
1556
1557 let mut total_count = 0;
1558
1559 let http_fixtures_path = fixtures_path.join("http");
1561 if http_fixtures_path.exists() {
1562 total_count += count_fixtures_in_directory(&http_fixtures_path)?;
1563 }
1564
1565 let ws_fixtures_path = fixtures_path.join("websocket");
1567 if ws_fixtures_path.exists() {
1568 total_count += count_fixtures_in_directory(&ws_fixtures_path)?;
1569 }
1570
1571 let grpc_fixtures_path = fixtures_path.join("grpc");
1573 if grpc_fixtures_path.exists() {
1574 total_count += count_fixtures_in_directory(&grpc_fixtures_path)?;
1575 }
1576
1577 Ok(total_count)
1578}
1579
1580fn count_fixtures_in_directory(dir_path: &std::path::Path) -> Result<usize> {
1582 let mut count = 0;
1583
1584 if let Ok(entries) = std::fs::read_dir(dir_path) {
1585 for entry in entries {
1586 let entry = entry
1587 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1588 let path = entry.path();
1589
1590 if path.is_dir() {
1591 count += count_fixtures_in_directory(&path)?;
1593 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1594 count += 1;
1596 }
1597 }
1598 }
1599
1600 Ok(count)
1601}
1602
1603pub fn route_has_fixtures(method: &str, path: &str) -> bool {
1605 let fixtures_dir =
1607 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1608 let fixtures_path = std::path::Path::new(&fixtures_dir);
1609
1610 if !fixtures_path.exists() {
1611 return false;
1612 }
1613
1614 let method_lower = method.to_lowercase();
1616 let path_hash = path.replace(['/', ':'], "_");
1617 let http_fixtures_path = fixtures_path.join("http").join(&method_lower).join(&path_hash);
1618
1619 if http_fixtures_path.exists() {
1620 if let Ok(entries) = std::fs::read_dir(&http_fixtures_path) {
1622 for entry in entries.flatten() {
1623 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1624 return true;
1625 }
1626 }
1627 }
1628 }
1629
1630 if method.to_uppercase() == "WS" {
1632 let ws_fixtures_path = fixtures_path.join("websocket").join(&path_hash);
1633
1634 if ws_fixtures_path.exists() {
1635 if let Ok(entries) = std::fs::read_dir(&ws_fixtures_path) {
1636 for entry in entries.flatten() {
1637 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1638 return true;
1639 }
1640 }
1641 }
1642 }
1643 }
1644
1645 false
1646}
1647
1648fn calculate_endpoint_latency(metrics: &RequestMetrics, endpoint: &str) -> Option<u64> {
1650 metrics.response_times_by_endpoint.get(endpoint).and_then(|times| {
1651 if times.is_empty() {
1652 None
1653 } else {
1654 let sum: u64 = times.iter().sum();
1655 Some(sum / times.len() as u64)
1656 }
1657 })
1658}
1659
1660fn get_endpoint_last_request(
1662 metrics: &RequestMetrics,
1663 endpoint: &str,
1664) -> Option<chrono::DateTime<chrono::Utc>> {
1665 metrics.last_request_by_endpoint.get(endpoint).copied()
1666}
1667
1668fn count_requests_by_server_type(metrics: &RequestMetrics, server_type: &str) -> u64 {
1670 match server_type {
1671 "HTTP" => {
1672 metrics
1674 .requests_by_endpoint
1675 .iter()
1676 .filter(|(endpoint, _)| {
1677 let method = endpoint.split(' ').next().unwrap_or("");
1678 matches!(
1679 method,
1680 "GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "HEAD" | "OPTIONS"
1681 )
1682 })
1683 .map(|(_, count)| count)
1684 .sum()
1685 }
1686 "WebSocket" => {
1687 metrics
1689 .requests_by_endpoint
1690 .iter()
1691 .filter(|(endpoint, _)| {
1692 let method = endpoint.split(' ').next().unwrap_or("");
1693 method == "WS"
1694 })
1695 .map(|(_, count)| count)
1696 .sum()
1697 }
1698 "gRPC" => {
1699 metrics
1701 .requests_by_endpoint
1702 .iter()
1703 .filter(|(endpoint, _)| {
1704 let method = endpoint.split(' ').next().unwrap_or("");
1705 method == "gRPC"
1706 })
1707 .map(|(_, count)| count)
1708 .sum()
1709 }
1710 _ => 0,
1711 }
1712}
1713
1714pub async fn get_fixtures() -> Json<ApiResponse<Vec<FixtureInfo>>> {
1716 match scan_fixtures_directory() {
1717 Ok(fixtures) => Json(ApiResponse::success(fixtures)),
1718 Err(e) => {
1719 tracing::error!("Failed to scan fixtures directory: {}", e);
1720 Json(ApiResponse::error(format!("Failed to load fixtures: {}", e)))
1721 }
1722 }
1723}
1724
1725fn scan_fixtures_directory() -> Result<Vec<FixtureInfo>> {
1727 let fixtures_dir =
1728 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1729 let fixtures_path = std::path::Path::new(&fixtures_dir);
1730
1731 if !fixtures_path.exists() {
1732 tracing::warn!("Fixtures directory does not exist: {}", fixtures_dir);
1733 return Ok(Vec::new());
1734 }
1735
1736 let mut all_fixtures = Vec::new();
1737
1738 let http_fixtures = scan_protocol_fixtures(fixtures_path, "http")?;
1740 all_fixtures.extend(http_fixtures);
1741
1742 let ws_fixtures = scan_protocol_fixtures(fixtures_path, "websocket")?;
1744 all_fixtures.extend(ws_fixtures);
1745
1746 let grpc_fixtures = scan_protocol_fixtures(fixtures_path, "grpc")?;
1748 all_fixtures.extend(grpc_fixtures);
1749
1750 all_fixtures.sort_by(|a, b| b.saved_at.cmp(&a.saved_at));
1752
1753 tracing::info!("Found {} fixtures in directory: {}", all_fixtures.len(), fixtures_dir);
1754 Ok(all_fixtures)
1755}
1756
1757fn scan_protocol_fixtures(
1759 fixtures_path: &std::path::Path,
1760 protocol: &str,
1761) -> Result<Vec<FixtureInfo>> {
1762 let protocol_path = fixtures_path.join(protocol);
1763 let mut fixtures = Vec::new();
1764
1765 if !protocol_path.exists() {
1766 return Ok(fixtures);
1767 }
1768
1769 if let Ok(entries) = std::fs::read_dir(&protocol_path) {
1771 for entry in entries {
1772 let entry = entry
1773 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1774 let path = entry.path();
1775
1776 if path.is_dir() {
1777 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
1779 fixtures.extend(sub_fixtures);
1780 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1781 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
1783 fixtures.push(fixture);
1784 }
1785 }
1786 }
1787 }
1788
1789 Ok(fixtures)
1790}
1791
1792fn scan_directory_recursive(
1794 dir_path: &std::path::Path,
1795 protocol: &str,
1796) -> Result<Vec<FixtureInfo>> {
1797 let mut fixtures = Vec::new();
1798
1799 if let Ok(entries) = std::fs::read_dir(dir_path) {
1800 for entry in entries {
1801 let entry = entry
1802 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1803 let path = entry.path();
1804
1805 if path.is_dir() {
1806 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
1808 fixtures.extend(sub_fixtures);
1809 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1810 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
1812 fixtures.push(fixture);
1813 }
1814 }
1815 }
1816 }
1817
1818 Ok(fixtures)
1819}
1820
1821fn parse_fixture_file_sync(file_path: &std::path::Path, protocol: &str) -> Result<FixtureInfo> {
1823 let metadata = std::fs::metadata(file_path)
1825 .map_err(|e| Error::generic(format!("Failed to read file metadata: {}", e)))?;
1826
1827 let file_size = metadata.len();
1828 let modified_time = metadata
1829 .modified()
1830 .map_err(|e| Error::generic(format!("Failed to get file modification time: {}", e)))?;
1831
1832 let saved_at = chrono::DateTime::from(modified_time);
1833
1834 let content = std::fs::read_to_string(file_path)
1836 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
1837
1838 let fixture_data: serde_json::Value = serde_json::from_str(&content)
1839 .map_err(|e| Error::generic(format!("Failed to parse fixture JSON: {}", e)))?;
1840
1841 let (method, path) = extract_method_and_path(&fixture_data, protocol)?;
1843
1844 let id = generate_fixture_id(file_path, &content);
1846
1847 let fingerprint = extract_fingerprint(file_path, &fixture_data)?;
1849
1850 let fixtures_dir =
1852 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1853 let fixtures_path = std::path::Path::new(&fixtures_dir);
1854 let file_path_str = file_path
1855 .strip_prefix(fixtures_path)
1856 .unwrap_or(file_path)
1857 .to_string_lossy()
1858 .to_string();
1859
1860 Ok(FixtureInfo {
1861 id,
1862 protocol: protocol.to_string(),
1863 method,
1864 path,
1865 saved_at,
1866 file_size,
1867 file_path: file_path_str,
1868 fingerprint,
1869 metadata: fixture_data,
1870 })
1871}
1872
1873fn extract_method_and_path(
1875 fixture_data: &serde_json::Value,
1876 protocol: &str,
1877) -> Result<(String, String)> {
1878 match protocol {
1879 "http" => {
1880 let method = fixture_data
1882 .get("request")
1883 .and_then(|req| req.get("method"))
1884 .and_then(|m| m.as_str())
1885 .unwrap_or("UNKNOWN")
1886 .to_uppercase();
1887
1888 let path = fixture_data
1889 .get("request")
1890 .and_then(|req| req.get("path"))
1891 .and_then(|p| p.as_str())
1892 .unwrap_or("/unknown")
1893 .to_string();
1894
1895 Ok((method, path))
1896 }
1897 "websocket" => {
1898 let path = fixture_data
1900 .get("path")
1901 .and_then(|p| p.as_str())
1902 .or_else(|| {
1903 fixture_data
1904 .get("request")
1905 .and_then(|req| req.get("path"))
1906 .and_then(|p| p.as_str())
1907 })
1908 .unwrap_or("/ws")
1909 .to_string();
1910
1911 Ok(("WS".to_string(), path))
1912 }
1913 "grpc" => {
1914 let service =
1916 fixture_data.get("service").and_then(|s| s.as_str()).unwrap_or("UnknownService");
1917
1918 let method =
1919 fixture_data.get("method").and_then(|m| m.as_str()).unwrap_or("UnknownMethod");
1920
1921 let path = format!("/{}/{}", service, method);
1922 Ok(("gRPC".to_string(), path))
1923 }
1924 _ => {
1925 let path = fixture_data
1926 .get("path")
1927 .and_then(|p| p.as_str())
1928 .unwrap_or("/unknown")
1929 .to_string();
1930 Ok((protocol.to_uppercase(), path))
1931 }
1932 }
1933}
1934
1935fn generate_fixture_id(file_path: &std::path::Path, content: &str) -> String {
1937 use std::collections::hash_map::DefaultHasher;
1938 use std::hash::{Hash, Hasher};
1939
1940 let mut hasher = DefaultHasher::new();
1941 file_path.hash(&mut hasher);
1942 content.hash(&mut hasher);
1943 format!("fixture_{:x}", hasher.finish())
1944}
1945
1946fn extract_fingerprint(
1948 file_path: &std::path::Path,
1949 fixture_data: &serde_json::Value,
1950) -> Result<String> {
1951 if let Some(fingerprint) = fixture_data.get("fingerprint").and_then(|f| f.as_str()) {
1953 return Ok(fingerprint.to_string());
1954 }
1955
1956 if let Some(file_name) = file_path.file_stem().and_then(|s| s.to_str()) {
1958 if let Some(hash) = file_name.split('_').next_back() {
1960 if hash.len() >= 8 && hash.chars().all(|c| c.is_alphanumeric()) {
1961 return Ok(hash.to_string());
1962 }
1963 }
1964 }
1965
1966 use std::collections::hash_map::DefaultHasher;
1968 use std::hash::{Hash, Hasher};
1969
1970 let mut hasher = DefaultHasher::new();
1971 file_path.hash(&mut hasher);
1972 Ok(format!("{:x}", hasher.finish()))
1973}
1974
1975pub async fn delete_fixture(
1977 Json(payload): Json<FixtureDeleteRequest>,
1978) -> Json<ApiResponse<String>> {
1979 match delete_fixture_by_id(&payload.fixture_id).await {
1980 Ok(_) => {
1981 tracing::info!("Successfully deleted fixture: {}", payload.fixture_id);
1982 Json(ApiResponse::success("Fixture deleted successfully".to_string()))
1983 }
1984 Err(e) => {
1985 tracing::error!("Failed to delete fixture {}: {}", payload.fixture_id, e);
1986 Json(ApiResponse::error(format!("Failed to delete fixture: {}", e)))
1987 }
1988 }
1989}
1990
1991pub async fn delete_fixtures_bulk(
1993 Json(payload): Json<FixtureBulkDeleteRequest>,
1994) -> Json<ApiResponse<FixtureBulkDeleteResult>> {
1995 let mut deleted_count = 0;
1996 let mut errors = Vec::new();
1997
1998 for fixture_id in &payload.fixture_ids {
1999 match delete_fixture_by_id(fixture_id).await {
2000 Ok(_) => {
2001 deleted_count += 1;
2002 tracing::info!("Successfully deleted fixture: {}", fixture_id);
2003 }
2004 Err(e) => {
2005 errors.push(format!("Failed to delete {}: {}", fixture_id, e));
2006 tracing::error!("Failed to delete fixture {}: {}", fixture_id, e);
2007 }
2008 }
2009 }
2010
2011 let result = FixtureBulkDeleteResult {
2012 deleted_count,
2013 total_requested: payload.fixture_ids.len(),
2014 errors: errors.clone(),
2015 };
2016
2017 if errors.is_empty() {
2018 Json(ApiResponse::success(result))
2019 } else {
2020 Json(ApiResponse::error(format!(
2021 "Partial success: {} deleted, {} errors",
2022 deleted_count,
2023 errors.len()
2024 )))
2025 }
2026}
2027
2028async fn delete_fixture_by_id(fixture_id: &str) -> Result<()> {
2030 let fixtures_dir =
2033 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2034 let fixtures_path = std::path::Path::new(&fixtures_dir);
2035
2036 if !fixtures_path.exists() {
2037 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2038 }
2039
2040 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2042
2043 let file_path_clone = file_path.clone();
2045 tokio::task::spawn_blocking(move || {
2046 if file_path_clone.exists() {
2047 std::fs::remove_file(&file_path_clone).map_err(|e| {
2048 Error::generic(format!(
2049 "Failed to delete fixture file {}: {}",
2050 file_path_clone.display(),
2051 e
2052 ))
2053 })
2054 } else {
2055 Err(Error::generic(format!("Fixture file not found: {}", file_path_clone.display())))
2056 }
2057 })
2058 .await
2059 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2060
2061 tracing::info!("Deleted fixture file: {}", file_path.display());
2062
2063 cleanup_empty_directories(&file_path).await;
2065
2066 Ok(())
2067}
2068
2069fn find_fixture_file_by_id(
2071 fixtures_path: &std::path::Path,
2072 fixture_id: &str,
2073) -> Result<std::path::PathBuf> {
2074 let protocols = ["http", "websocket", "grpc"];
2076
2077 for protocol in &protocols {
2078 let protocol_path = fixtures_path.join(protocol);
2079 if let Ok(found_path) = search_fixture_in_directory(&protocol_path, fixture_id) {
2080 return Ok(found_path);
2081 }
2082 }
2083
2084 Err(Error::generic(format!(
2085 "Fixture with ID '{}' not found in any protocol directory",
2086 fixture_id
2087 )))
2088}
2089
2090fn search_fixture_in_directory(
2092 dir_path: &std::path::Path,
2093 fixture_id: &str,
2094) -> Result<std::path::PathBuf> {
2095 if !dir_path.exists() {
2096 return Err(Error::generic(format!("Directory does not exist: {}", dir_path.display())));
2097 }
2098
2099 if let Ok(entries) = std::fs::read_dir(dir_path) {
2100 for entry in entries {
2101 let entry = entry
2102 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2103 let path = entry.path();
2104
2105 if path.is_dir() {
2106 if let Ok(found_path) = search_fixture_in_directory(&path, fixture_id) {
2108 return Ok(found_path);
2109 }
2110 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2111 if let Ok(fixture_info) = parse_fixture_file_sync(&path, "unknown") {
2113 if fixture_info.id == fixture_id {
2114 return Ok(path);
2115 }
2116 }
2117 }
2118 }
2119 }
2120
2121 Err(Error::generic(format!(
2122 "Fixture not found in directory: {}",
2123 dir_path.display()
2124 )))
2125}
2126
2127async fn cleanup_empty_directories(file_path: &std::path::Path) {
2129 let file_path = file_path.to_path_buf();
2130
2131 let _ = tokio::task::spawn_blocking(move || {
2133 if let Some(parent) = file_path.parent() {
2134 let mut current = parent;
2136 let fixtures_dir =
2137 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2138 let fixtures_path = std::path::Path::new(&fixtures_dir);
2139
2140 while current != fixtures_path && current.parent().is_some() {
2141 if let Ok(entries) = std::fs::read_dir(current) {
2142 if entries.count() == 0 {
2143 if let Err(e) = std::fs::remove_dir(current) {
2144 tracing::debug!(
2145 "Failed to remove empty directory {}: {}",
2146 current.display(),
2147 e
2148 );
2149 break;
2150 } else {
2151 tracing::debug!("Removed empty directory: {}", current.display());
2152 }
2153 } else {
2154 break;
2155 }
2156 } else {
2157 break;
2158 }
2159
2160 if let Some(next_parent) = current.parent() {
2161 current = next_parent;
2162 } else {
2163 break;
2164 }
2165 }
2166 }
2167 })
2168 .await;
2169}
2170
2171pub async fn download_fixture(Query(params): Query<HashMap<String, String>>) -> impl IntoResponse {
2173 let fixture_id = match params.get("id") {
2175 Some(id) => id,
2176 None => {
2177 return (
2178 http::StatusCode::BAD_REQUEST,
2179 [(http::header::CONTENT_TYPE, "application/json")],
2180 r#"{"error": "Missing fixture ID parameter"}"#,
2181 )
2182 .into_response();
2183 }
2184 };
2185
2186 match download_fixture_by_id(fixture_id).await {
2188 Ok((content, file_name)) => (
2189 http::StatusCode::OK,
2190 [
2191 (http::header::CONTENT_TYPE, "application/json".to_string()),
2192 (
2193 http::header::CONTENT_DISPOSITION,
2194 format!("attachment; filename=\"{}\"", file_name),
2195 ),
2196 ],
2197 content,
2198 )
2199 .into_response(),
2200 Err(e) => {
2201 tracing::error!("Failed to download fixture {}: {}", fixture_id, e);
2202 let error_response = format!(r#"{{"error": "Failed to download fixture: {}"}}"#, e);
2203 (
2204 http::StatusCode::NOT_FOUND,
2205 [(http::header::CONTENT_TYPE, "application/json".to_string())],
2206 error_response,
2207 )
2208 .into_response()
2209 }
2210 }
2211}
2212
2213async fn download_fixture_by_id(fixture_id: &str) -> Result<(String, String)> {
2215 let fixtures_dir =
2217 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2218 let fixtures_path = std::path::Path::new(&fixtures_dir);
2219
2220 if !fixtures_path.exists() {
2221 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2222 }
2223
2224 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2225
2226 let file_path_clone = file_path.clone();
2228 let (content, file_name) = tokio::task::spawn_blocking(move || {
2229 let content = std::fs::read_to_string(&file_path_clone)
2230 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2231
2232 let file_name = file_path_clone
2233 .file_name()
2234 .and_then(|name| name.to_str())
2235 .unwrap_or("fixture.json")
2236 .to_string();
2237
2238 Ok::<_, Error>((content, file_name))
2239 })
2240 .await
2241 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2242
2243 tracing::info!("Downloaded fixture file: {} ({} bytes)", file_path.display(), content.len());
2244 Ok((content, file_name))
2245}
2246
2247pub async fn rename_fixture(
2249 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2250 Json(payload): Json<FixtureRenameRequest>,
2251) -> Json<ApiResponse<String>> {
2252 match rename_fixture_by_id(&fixture_id, &payload.new_name).await {
2253 Ok(new_path) => {
2254 tracing::info!("Successfully renamed fixture: {} -> {}", fixture_id, payload.new_name);
2255 Json(ApiResponse::success(format!("Fixture renamed successfully to: {}", new_path)))
2256 }
2257 Err(e) => {
2258 tracing::error!("Failed to rename fixture {}: {}", fixture_id, e);
2259 Json(ApiResponse::error(format!("Failed to rename fixture: {}", e)))
2260 }
2261 }
2262}
2263
2264async fn rename_fixture_by_id(fixture_id: &str, new_name: &str) -> Result<String> {
2266 if new_name.is_empty() {
2268 return Err(Error::generic("New name cannot be empty".to_string()));
2269 }
2270
2271 let new_name = if new_name.ends_with(".json") {
2273 new_name.to_string()
2274 } else {
2275 format!("{}.json", new_name)
2276 };
2277
2278 let fixtures_dir =
2280 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2281 let fixtures_path = std::path::Path::new(&fixtures_dir);
2282
2283 if !fixtures_path.exists() {
2284 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2285 }
2286
2287 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2288
2289 let parent = old_path
2291 .parent()
2292 .ok_or_else(|| Error::generic("Could not determine parent directory".to_string()))?;
2293
2294 let new_path = parent.join(&new_name);
2295
2296 if new_path.exists() {
2298 return Err(Error::generic(format!(
2299 "A fixture with name '{}' already exists in the same directory",
2300 new_name
2301 )));
2302 }
2303
2304 let old_path_clone = old_path.clone();
2306 let new_path_clone = new_path.clone();
2307 tokio::task::spawn_blocking(move || {
2308 std::fs::rename(&old_path_clone, &new_path_clone)
2309 .map_err(|e| Error::generic(format!("Failed to rename fixture file: {}", e)))
2310 })
2311 .await
2312 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2313
2314 tracing::info!("Renamed fixture file: {} -> {}", old_path.display(), new_path.display());
2315
2316 Ok(new_path
2318 .strip_prefix(fixtures_path)
2319 .unwrap_or(&new_path)
2320 .to_string_lossy()
2321 .to_string())
2322}
2323
2324pub async fn move_fixture(
2326 axum::extract::Path(fixture_id): axum::extract::Path<String>,
2327 Json(payload): Json<FixtureMoveRequest>,
2328) -> Json<ApiResponse<String>> {
2329 match move_fixture_by_id(&fixture_id, &payload.new_path).await {
2330 Ok(new_location) => {
2331 tracing::info!("Successfully moved fixture: {} -> {}", fixture_id, payload.new_path);
2332 Json(ApiResponse::success(format!("Fixture moved successfully to: {}", new_location)))
2333 }
2334 Err(e) => {
2335 tracing::error!("Failed to move fixture {}: {}", fixture_id, e);
2336 Json(ApiResponse::error(format!("Failed to move fixture: {}", e)))
2337 }
2338 }
2339}
2340
2341async fn move_fixture_by_id(fixture_id: &str, new_path: &str) -> Result<String> {
2343 if new_path.is_empty() {
2345 return Err(Error::generic("New path cannot be empty".to_string()));
2346 }
2347
2348 let fixtures_dir =
2350 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2351 let fixtures_path = std::path::Path::new(&fixtures_dir);
2352
2353 if !fixtures_path.exists() {
2354 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2355 }
2356
2357 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2358
2359 let new_full_path = if new_path.starts_with('/') {
2361 fixtures_path.join(new_path.trim_start_matches('/'))
2363 } else {
2364 fixtures_path.join(new_path)
2366 };
2367
2368 let new_full_path = if new_full_path.extension().and_then(|s| s.to_str()) == Some("json") {
2370 new_full_path
2371 } else {
2372 if new_full_path.is_dir() || !new_path.contains('.') {
2374 let file_name = old_path.file_name().ok_or_else(|| {
2375 Error::generic("Could not determine original file name".to_string())
2376 })?;
2377 new_full_path.join(file_name)
2378 } else {
2379 new_full_path.with_extension("json")
2380 }
2381 };
2382
2383 if new_full_path.exists() {
2385 return Err(Error::generic(format!(
2386 "A fixture already exists at path: {}",
2387 new_full_path.display()
2388 )));
2389 }
2390
2391 let old_path_clone = old_path.clone();
2393 let new_full_path_clone = new_full_path.clone();
2394 tokio::task::spawn_blocking(move || {
2395 if let Some(parent) = new_full_path_clone.parent() {
2397 std::fs::create_dir_all(parent)
2398 .map_err(|e| Error::generic(format!("Failed to create target directory: {}", e)))?;
2399 }
2400
2401 std::fs::rename(&old_path_clone, &new_full_path_clone)
2403 .map_err(|e| Error::generic(format!("Failed to move fixture file: {}", e)))
2404 })
2405 .await
2406 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2407
2408 tracing::info!("Moved fixture file: {} -> {}", old_path.display(), new_full_path.display());
2409
2410 cleanup_empty_directories(&old_path).await;
2412
2413 Ok(new_full_path
2415 .strip_prefix(fixtures_path)
2416 .unwrap_or(&new_full_path)
2417 .to_string_lossy()
2418 .to_string())
2419}
2420
2421pub async fn get_validation(
2423 State(state): State<AdminState>,
2424) -> Json<ApiResponse<ValidationSettings>> {
2425 let config_state = state.get_config().await;
2427
2428 Json(ApiResponse::success(config_state.validation_settings))
2429}
2430
2431pub async fn update_validation(
2433 State(state): State<AdminState>,
2434 Json(update): Json<ValidationUpdate>,
2435) -> Json<ApiResponse<String>> {
2436 match update.mode.as_str() {
2438 "enforce" | "warn" | "off" => {}
2439 _ => {
2440 return Json(ApiResponse::error(
2441 "Invalid validation mode. Must be 'enforce', 'warn', or 'off'".to_string(),
2442 ))
2443 }
2444 }
2445
2446 let mode = update.mode.clone();
2448 state
2449 .update_validation_config(
2450 update.mode,
2451 update.aggregate_errors,
2452 update.validate_responses,
2453 update.overrides.unwrap_or_default(),
2454 )
2455 .await;
2456
2457 tracing::info!(
2458 "Updated validation settings: mode={}, aggregate_errors={}",
2459 mode,
2460 update.aggregate_errors
2461 );
2462
2463 Json(ApiResponse::success("Validation settings updated".to_string()))
2464}
2465
2466pub async fn get_env_vars() -> Json<ApiResponse<HashMap<String, String>>> {
2468 let mut env_vars = HashMap::new();
2470
2471 let relevant_vars = [
2472 "MOCKFORGE_LATENCY_ENABLED",
2474 "MOCKFORGE_FAILURES_ENABLED",
2475 "MOCKFORGE_PROXY_ENABLED",
2476 "MOCKFORGE_RECORD_ENABLED",
2477 "MOCKFORGE_REPLAY_ENABLED",
2478 "MOCKFORGE_LOG_LEVEL",
2479 "MOCKFORGE_CONFIG_FILE",
2480 "RUST_LOG",
2481 "MOCKFORGE_HTTP_PORT",
2483 "MOCKFORGE_HTTP_HOST",
2484 "MOCKFORGE_HTTP_OPENAPI_SPEC",
2485 "MOCKFORGE_CORS_ENABLED",
2486 "MOCKFORGE_REQUEST_TIMEOUT_SECS",
2487 "MOCKFORGE_WS_PORT",
2489 "MOCKFORGE_WS_HOST",
2490 "MOCKFORGE_WS_REPLAY_FILE",
2491 "MOCKFORGE_WS_CONNECTION_TIMEOUT_SECS",
2492 "MOCKFORGE_GRPC_PORT",
2494 "MOCKFORGE_GRPC_HOST",
2495 "MOCKFORGE_ADMIN_ENABLED",
2497 "MOCKFORGE_ADMIN_PORT",
2498 "MOCKFORGE_ADMIN_HOST",
2499 "MOCKFORGE_ADMIN_MOUNT_PATH",
2500 "MOCKFORGE_ADMIN_API_ENABLED",
2501 "MOCKFORGE_RESPONSE_TEMPLATE_EXPAND",
2503 "MOCKFORGE_REQUEST_VALIDATION",
2504 "MOCKFORGE_AGGREGATE_ERRORS",
2505 "MOCKFORGE_RESPONSE_VALIDATION",
2506 "MOCKFORGE_VALIDATION_STATUS",
2507 "MOCKFORGE_RAG_ENABLED",
2509 "MOCKFORGE_FAKE_TOKENS",
2510 "MOCKFORGE_FIXTURES_DIR",
2512 ];
2513
2514 for var_name in &relevant_vars {
2515 if let Ok(value) = std::env::var(var_name) {
2516 env_vars.insert(var_name.to_string(), value);
2517 }
2518 }
2519
2520 Json(ApiResponse::success(env_vars))
2521}
2522
2523pub async fn update_env_var(Json(update): Json<EnvVarUpdate>) -> Json<ApiResponse<String>> {
2525 std::env::set_var(&update.key, &update.value);
2527
2528 tracing::info!("Updated environment variable: {}={}", update.key, update.value);
2529
2530 Json(ApiResponse::success(format!(
2533 "Environment variable {} updated to '{}'. Note: This change is not persisted and will be lost on restart.",
2534 update.key, update.value
2535 )))
2536}
2537
2538pub async fn get_file_content(
2540 Json(request): Json<FileContentRequest>,
2541) -> Json<ApiResponse<String>> {
2542 if let Err(e) = validate_file_path(&request.file_path) {
2544 return Json(ApiResponse::error(format!("Invalid file path: {}", e)));
2545 }
2546
2547 match tokio::fs::read_to_string(&request.file_path).await {
2549 Ok(content) => {
2550 if let Err(e) = validate_file_content(&content) {
2552 return Json(ApiResponse::error(format!("Invalid file content: {}", e)));
2553 }
2554 Json(ApiResponse::success(content))
2555 }
2556 Err(e) => Json(ApiResponse::error(format!("Failed to read file: {}", e))),
2557 }
2558}
2559
2560pub async fn save_file_content(Json(request): Json<FileSaveRequest>) -> Json<ApiResponse<String>> {
2562 match save_file_to_filesystem(&request.file_path, &request.content).await {
2563 Ok(_) => {
2564 tracing::info!("Successfully saved file: {}", request.file_path);
2565 Json(ApiResponse::success("File saved successfully".to_string()))
2566 }
2567 Err(e) => {
2568 tracing::error!("Failed to save file {}: {}", request.file_path, e);
2569 Json(ApiResponse::error(format!("Failed to save file: {}", e)))
2570 }
2571 }
2572}
2573
2574async fn save_file_to_filesystem(file_path: &str, content: &str) -> Result<()> {
2576 validate_file_path(file_path)?;
2578
2579 validate_file_content(content)?;
2581
2582 let path = std::path::PathBuf::from(file_path);
2584 let content = content.to_string();
2585
2586 let path_clone = path.clone();
2588 let content_clone = content.clone();
2589 tokio::task::spawn_blocking(move || {
2590 if let Some(parent) = path_clone.parent() {
2592 std::fs::create_dir_all(parent).map_err(|e| {
2593 Error::generic(format!("Failed to create directory {}: {}", parent.display(), e))
2594 })?;
2595 }
2596
2597 std::fs::write(&path_clone, &content_clone).map_err(|e| {
2599 Error::generic(format!("Failed to write file {}: {}", path_clone.display(), e))
2600 })?;
2601
2602 let written_content = std::fs::read_to_string(&path_clone).map_err(|e| {
2604 Error::generic(format!("Failed to verify written file {}: {}", path_clone.display(), e))
2605 })?;
2606
2607 if written_content != content_clone {
2608 return Err(Error::generic(format!(
2609 "File content verification failed for {}",
2610 path_clone.display()
2611 )));
2612 }
2613
2614 Ok::<_, Error>(())
2615 })
2616 .await
2617 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2618
2619 tracing::info!("File saved successfully: {} ({} bytes)", path.display(), content.len());
2620 Ok(())
2621}
2622
2623fn validate_file_path(file_path: &str) -> Result<()> {
2625 if file_path.contains("..") {
2627 return Err(Error::generic("Path traversal detected in file path".to_string()));
2628 }
2629
2630 let path = std::path::Path::new(file_path);
2632 if path.is_absolute() {
2633 let allowed_dirs = [
2635 std::env::current_dir().unwrap_or_default(),
2636 std::path::PathBuf::from("."),
2637 std::path::PathBuf::from("fixtures"),
2638 std::path::PathBuf::from("config"),
2639 ];
2640
2641 let mut is_allowed = false;
2642 for allowed_dir in &allowed_dirs {
2643 if path.starts_with(allowed_dir) {
2644 is_allowed = true;
2645 break;
2646 }
2647 }
2648
2649 if !is_allowed {
2650 return Err(Error::generic("File path is outside allowed directories".to_string()));
2651 }
2652 }
2653
2654 let dangerous_extensions = ["exe", "bat", "cmd", "sh", "ps1", "scr", "com"];
2656 if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
2657 if dangerous_extensions.contains(&extension.to_lowercase().as_str()) {
2658 return Err(Error::generic(format!(
2659 "Dangerous file extension not allowed: {}",
2660 extension
2661 )));
2662 }
2663 }
2664
2665 Ok(())
2666}
2667
2668fn validate_file_content(content: &str) -> Result<()> {
2670 if content.len() > 10 * 1024 * 1024 {
2672 return Err(Error::generic("File content too large (max 10MB)".to_string()));
2674 }
2675
2676 if content.contains('\0') {
2678 return Err(Error::generic("File content contains null bytes".to_string()));
2679 }
2680
2681 Ok(())
2682}
2683
2684#[derive(Debug, Clone, Serialize, Deserialize)]
2686pub struct FixtureDeleteRequest {
2687 pub fixture_id: String,
2688}
2689
2690#[derive(Debug, Clone, Serialize, Deserialize)]
2692pub struct EnvVarUpdate {
2693 pub key: String,
2694 pub value: String,
2695}
2696
2697#[derive(Debug, Clone, Serialize, Deserialize)]
2699pub struct FixtureBulkDeleteRequest {
2700 pub fixture_ids: Vec<String>,
2701}
2702
2703#[derive(Debug, Clone, Serialize, Deserialize)]
2705pub struct FixtureBulkDeleteResult {
2706 pub deleted_count: usize,
2707 pub total_requested: usize,
2708 pub errors: Vec<String>,
2709}
2710
2711#[derive(Debug, Clone, Serialize, Deserialize)]
2713pub struct FixtureRenameRequest {
2714 pub new_name: String,
2715}
2716
2717#[derive(Debug, Clone, Serialize, Deserialize)]
2719pub struct FixtureMoveRequest {
2720 pub new_path: String,
2721}
2722
2723#[derive(Debug, Clone, Serialize, Deserialize)]
2725pub struct FileContentRequest {
2726 pub file_path: String,
2727 pub file_type: String,
2728}
2729
2730#[derive(Debug, Clone, Serialize, Deserialize)]
2732pub struct FileSaveRequest {
2733 pub file_path: String,
2734 pub content: String,
2735}
2736
2737pub async fn get_smoke_tests(
2739 State(state): State<AdminState>,
2740) -> Json<ApiResponse<Vec<SmokeTestResult>>> {
2741 let results = state.get_smoke_test_results().await;
2742 Json(ApiResponse::success(results))
2743}
2744
2745pub async fn run_smoke_tests_endpoint(
2747 State(state): State<AdminState>,
2748) -> Json<ApiResponse<String>> {
2749 tracing::info!("Starting smoke test execution");
2750
2751 let state_clone = state.clone();
2753 tokio::spawn(async move {
2754 if let Err(e) = execute_smoke_tests(&state_clone).await {
2755 tracing::error!("Smoke test execution failed: {}", e);
2756 } else {
2757 tracing::info!("Smoke test execution completed successfully");
2758 }
2759 });
2760
2761 Json(ApiResponse::success(
2762 "Smoke tests started. Check results in the smoke tests section.".to_string(),
2763 ))
2764}
2765
2766async fn execute_smoke_tests(state: &AdminState) -> Result<()> {
2768 let base_url =
2770 std::env::var("MOCKFORGE_BASE_URL").unwrap_or_else(|_| "http://localhost:3000".to_string());
2771
2772 let context = SmokeTestContext {
2773 base_url,
2774 timeout_seconds: 30,
2775 parallel: true,
2776 };
2777
2778 let fixtures = scan_fixtures_directory()?;
2780
2781 let http_fixtures: Vec<&FixtureInfo> =
2783 fixtures.iter().filter(|f| f.protocol == "http").collect();
2784
2785 if http_fixtures.is_empty() {
2786 tracing::warn!("No HTTP fixtures found for smoke testing");
2787 return Ok(());
2788 }
2789
2790 tracing::info!("Running smoke tests for {} HTTP fixtures", http_fixtures.len());
2791
2792 let mut test_results = Vec::new();
2794
2795 for fixture in http_fixtures {
2796 let test_result = create_smoke_test_from_fixture(fixture);
2797 test_results.push(test_result);
2798 }
2799
2800 let mut executed_results = Vec::new();
2802 for mut test_result in test_results {
2803 test_result.status = "running".to_string();
2805 state.update_smoke_test_result(test_result.clone()).await;
2806
2807 let start_time = std::time::Instant::now();
2809 match execute_single_smoke_test(&test_result, &context).await {
2810 Ok((status_code, response_time_ms)) => {
2811 test_result.status = "passed".to_string();
2812 test_result.status_code = Some(status_code);
2813 test_result.response_time_ms = Some(response_time_ms);
2814 test_result.error_message = None;
2815 }
2816 Err(e) => {
2817 test_result.status = "failed".to_string();
2818 test_result.error_message = Some(e.to_string());
2819 test_result.status_code = None;
2820 test_result.response_time_ms = None;
2821 }
2822 }
2823
2824 let duration = start_time.elapsed();
2825 test_result.duration_seconds = Some(duration.as_secs_f64());
2826 test_result.last_run = Some(chrono::Utc::now());
2827
2828 executed_results.push(test_result.clone());
2829 state.update_smoke_test_result(test_result).await;
2830 }
2831
2832 tracing::info!("Smoke test execution completed: {} tests run", executed_results.len());
2833 Ok(())
2834}
2835
2836fn create_smoke_test_from_fixture(fixture: &FixtureInfo) -> SmokeTestResult {
2838 let test_name = format!("{} {}", fixture.method, fixture.path);
2839 let description = format!("Smoke test for {} endpoint", fixture.path);
2840
2841 SmokeTestResult {
2842 id: format!("smoke_{}", fixture.id),
2843 name: test_name,
2844 method: fixture.method.clone(),
2845 path: fixture.path.clone(),
2846 description,
2847 last_run: None,
2848 status: "pending".to_string(),
2849 response_time_ms: None,
2850 error_message: None,
2851 status_code: None,
2852 duration_seconds: None,
2853 }
2854}
2855
2856async fn execute_single_smoke_test(
2858 test: &SmokeTestResult,
2859 context: &SmokeTestContext,
2860) -> Result<(u16, u64)> {
2861 let url = format!("{}{}", context.base_url, test.path);
2862 let client = reqwest::Client::builder()
2863 .timeout(std::time::Duration::from_secs(context.timeout_seconds))
2864 .build()
2865 .map_err(|e| Error::generic(format!("Failed to create HTTP client: {}", e)))?;
2866
2867 let start_time = std::time::Instant::now();
2868
2869 let response = match test.method.as_str() {
2870 "GET" => client.get(&url).send().await,
2871 "POST" => client.post(&url).send().await,
2872 "PUT" => client.put(&url).send().await,
2873 "DELETE" => client.delete(&url).send().await,
2874 "PATCH" => client.patch(&url).send().await,
2875 "HEAD" => client.head(&url).send().await,
2876 "OPTIONS" => client.request(reqwest::Method::OPTIONS, &url).send().await,
2877 _ => {
2878 return Err(Error::generic(format!("Unsupported HTTP method: {}", test.method)));
2879 }
2880 };
2881
2882 let response_time = start_time.elapsed();
2883 let response_time_ms = response_time.as_millis() as u64;
2884
2885 match response {
2886 Ok(resp) => {
2887 let status_code = resp.status().as_u16();
2888 if (200..400).contains(&status_code) {
2889 Ok((status_code, response_time_ms))
2890 } else {
2891 Err(Error::generic(format!(
2892 "HTTP error: {} {}",
2893 status_code,
2894 resp.status().canonical_reason().unwrap_or("Unknown")
2895 )))
2896 }
2897 }
2898 Err(e) => Err(Error::generic(format!("Request failed: {}", e))),
2899 }
2900}
2901
2902pub async fn install_plugin(Json(request): Json<serde_json::Value>) -> impl IntoResponse {
2904 let source = request.get("source").and_then(|s| s.as_str()).unwrap_or("");
2906
2907 if source.is_empty() {
2908 return Json(json!({
2909 "success": false,
2910 "error": "Plugin source is required"
2911 }));
2912 }
2913
2914 let plugin_path = if source.starts_with("http://") || source.starts_with("https://") {
2916 match download_plugin_from_url(source).await {
2918 Ok(temp_path) => temp_path,
2919 Err(e) => {
2920 return Json(json!({
2921 "success": false,
2922 "error": format!("Failed to download plugin: {}", e)
2923 }))
2924 }
2925 }
2926 } else {
2927 std::path::PathBuf::from(source)
2929 };
2930
2931 if !plugin_path.exists() {
2933 return Json(json!({
2934 "success": false,
2935 "error": format!("Plugin file not found: {}", source)
2936 }));
2937 }
2938
2939 Json(json!({
2941 "success": true,
2942 "message": format!("Plugin would be installed from: {}", source)
2943 }))
2944}
2945
2946async fn download_plugin_from_url(url: &str) -> Result<std::path::PathBuf> {
2948 let temp_file =
2950 std::env::temp_dir().join(format!("plugin_{}.tmp", chrono::Utc::now().timestamp()));
2951 let temp_path = temp_file.clone();
2952
2953 let response = reqwest::get(url)
2955 .await
2956 .map_err(|e| Error::generic(format!("Failed to download from URL: {}", e)))?;
2957
2958 if !response.status().is_success() {
2959 return Err(Error::generic(format!(
2960 "HTTP error {}: {}",
2961 response.status().as_u16(),
2962 response.status().canonical_reason().unwrap_or("Unknown")
2963 )));
2964 }
2965
2966 let bytes = response
2968 .bytes()
2969 .await
2970 .map_err(|e| Error::generic(format!("Failed to read response: {}", e)))?;
2971
2972 tokio::fs::write(&temp_file, &bytes)
2974 .await
2975 .map_err(|e| Error::generic(format!("Failed to write temporary file: {}", e)))?;
2976
2977 Ok(temp_path)
2978}
2979
2980pub async fn serve_icon() -> impl IntoResponse {
2981 ([(http::header::CONTENT_TYPE, "image/png")], "")
2983}
2984
2985pub async fn serve_icon_32() -> impl IntoResponse {
2986 ([(http::header::CONTENT_TYPE, "image/png")], "")
2987}
2988
2989pub async fn serve_icon_48() -> impl IntoResponse {
2990 ([(http::header::CONTENT_TYPE, "image/png")], "")
2991}
2992
2993pub async fn serve_logo() -> impl IntoResponse {
2994 ([(http::header::CONTENT_TYPE, "image/png")], "")
2995}
2996
2997pub async fn serve_logo_40() -> impl IntoResponse {
2998 ([(http::header::CONTENT_TYPE, "image/png")], "")
2999}
3000
3001pub async fn serve_logo_80() -> impl IntoResponse {
3002 ([(http::header::CONTENT_TYPE, "image/png")], "")
3003}
3004
3005pub async fn update_traffic_shaping(
3007 State(_state): State<AdminState>,
3008 Json(_config): Json<serde_json::Value>,
3009) -> Json<ApiResponse<String>> {
3010 Json(ApiResponse::success("Traffic shaping updated".to_string()))
3011}
3012
3013pub async fn import_postman(
3014 State(state): State<AdminState>,
3015 Json(request): Json<serde_json::Value>,
3016) -> Json<ApiResponse<String>> {
3017 use mockforge_core::workspace_import::{import_postman_to_workspace, WorkspaceImportConfig};
3018 use uuid::Uuid;
3019
3020 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3021 let filename = request.get("filename").and_then(|v| v.as_str());
3022 let environment = request.get("environment").and_then(|v| v.as_str());
3023 let base_url = request.get("base_url").and_then(|v| v.as_str());
3024
3025 let import_result = match mockforge_core::import::import_postman_collection(content, base_url) {
3027 Ok(result) => result,
3028 Err(e) => {
3029 let entry = ImportHistoryEntry {
3031 id: Uuid::new_v4().to_string(),
3032 format: "postman".to_string(),
3033 timestamp: chrono::Utc::now(),
3034 routes_count: 0,
3035 variables_count: 0,
3036 warnings_count: 0,
3037 success: false,
3038 filename: filename.map(|s| s.to_string()),
3039 environment: environment.map(|s| s.to_string()),
3040 base_url: base_url.map(|s| s.to_string()),
3041 error_message: Some(e.clone()),
3042 };
3043 let mut history = state.import_history.write().await;
3044 history.push(entry);
3045
3046 return Json(ApiResponse::error(format!("Postman import failed: {}", e)));
3047 }
3048 };
3049
3050 let workspace_name = filename
3052 .and_then(|f| f.split('.').next())
3053 .unwrap_or("Imported Postman Collection");
3054
3055 let config = WorkspaceImportConfig {
3056 create_folders: true,
3057 base_folder_name: None,
3058 preserve_hierarchy: true,
3059 max_depth: 5,
3060 };
3061
3062 let routes: Vec<ImportRoute> = import_result
3064 .routes
3065 .into_iter()
3066 .map(|route| ImportRoute {
3067 method: route.method,
3068 path: route.path,
3069 headers: route.headers,
3070 body: route.body,
3071 response: ImportResponse {
3072 status: route.response.status,
3073 headers: route.response.headers,
3074 body: route.response.body,
3075 },
3076 })
3077 .collect();
3078
3079 match import_postman_to_workspace(routes, workspace_name.to_string(), config) {
3080 Ok(workspace_result) => {
3081 if let Err(e) =
3083 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3084 {
3085 tracing::error!("Failed to save workspace: {}", e);
3086 return Json(ApiResponse::error(format!(
3087 "Import succeeded but failed to save workspace: {}",
3088 e
3089 )));
3090 }
3091
3092 let entry = ImportHistoryEntry {
3094 id: Uuid::new_v4().to_string(),
3095 format: "postman".to_string(),
3096 timestamp: chrono::Utc::now(),
3097 routes_count: workspace_result.request_count,
3098 variables_count: import_result.variables.len(),
3099 warnings_count: workspace_result.warnings.len(),
3100 success: true,
3101 filename: filename.map(|s| s.to_string()),
3102 environment: environment.map(|s| s.to_string()),
3103 base_url: base_url.map(|s| s.to_string()),
3104 error_message: None,
3105 };
3106 let mut history = state.import_history.write().await;
3107 history.push(entry);
3108
3109 Json(ApiResponse::success(format!(
3110 "Successfully imported {} routes into workspace '{}'",
3111 workspace_result.request_count, workspace_name
3112 )))
3113 }
3114 Err(e) => {
3115 let entry = ImportHistoryEntry {
3117 id: Uuid::new_v4().to_string(),
3118 format: "postman".to_string(),
3119 timestamp: chrono::Utc::now(),
3120 routes_count: 0,
3121 variables_count: 0,
3122 warnings_count: 0,
3123 success: false,
3124 filename: filename.map(|s| s.to_string()),
3125 environment: environment.map(|s| s.to_string()),
3126 base_url: base_url.map(|s| s.to_string()),
3127 error_message: Some(e.to_string()),
3128 };
3129 let mut history = state.import_history.write().await;
3130 history.push(entry);
3131
3132 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3133 }
3134 }
3135}
3136
3137pub async fn import_insomnia(
3138 State(state): State<AdminState>,
3139 Json(request): Json<serde_json::Value>,
3140) -> Json<ApiResponse<String>> {
3141 use uuid::Uuid;
3142
3143 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3144 let filename = request.get("filename").and_then(|v| v.as_str());
3145 let environment = request.get("environment").and_then(|v| v.as_str());
3146 let base_url = request.get("base_url").and_then(|v| v.as_str());
3147
3148 let import_result = match mockforge_core::import::import_insomnia_export(content, environment) {
3150 Ok(result) => result,
3151 Err(e) => {
3152 let entry = ImportHistoryEntry {
3154 id: Uuid::new_v4().to_string(),
3155 format: "insomnia".to_string(),
3156 timestamp: chrono::Utc::now(),
3157 routes_count: 0,
3158 variables_count: 0,
3159 warnings_count: 0,
3160 success: false,
3161 filename: filename.map(|s| s.to_string()),
3162 environment: environment.map(|s| s.to_string()),
3163 base_url: base_url.map(|s| s.to_string()),
3164 error_message: Some(e.clone()),
3165 };
3166 let mut history = state.import_history.write().await;
3167 history.push(entry);
3168
3169 return Json(ApiResponse::error(format!("Insomnia import failed: {}", e)));
3170 }
3171 };
3172
3173 let workspace_name = filename
3175 .and_then(|f| f.split('.').next())
3176 .unwrap_or("Imported Insomnia Collection");
3177
3178 let _config = WorkspaceImportConfig {
3179 create_folders: true,
3180 base_folder_name: None,
3181 preserve_hierarchy: true,
3182 max_depth: 5,
3183 };
3184
3185 let variables_count = import_result.variables.len();
3187
3188 match mockforge_core::workspace_import::create_workspace_from_insomnia(
3189 import_result,
3190 Some(workspace_name.to_string()),
3191 ) {
3192 Ok(workspace_result) => {
3193 if let Err(e) =
3195 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3196 {
3197 tracing::error!("Failed to save workspace: {}", e);
3198 return Json(ApiResponse::error(format!(
3199 "Import succeeded but failed to save workspace: {}",
3200 e
3201 )));
3202 }
3203
3204 let entry = ImportHistoryEntry {
3206 id: Uuid::new_v4().to_string(),
3207 format: "insomnia".to_string(),
3208 timestamp: chrono::Utc::now(),
3209 routes_count: workspace_result.request_count,
3210 variables_count,
3211 warnings_count: workspace_result.warnings.len(),
3212 success: true,
3213 filename: filename.map(|s| s.to_string()),
3214 environment: environment.map(|s| s.to_string()),
3215 base_url: base_url.map(|s| s.to_string()),
3216 error_message: None,
3217 };
3218 let mut history = state.import_history.write().await;
3219 history.push(entry);
3220
3221 Json(ApiResponse::success(format!(
3222 "Successfully imported {} routes into workspace '{}'",
3223 workspace_result.request_count, workspace_name
3224 )))
3225 }
3226 Err(e) => {
3227 let entry = ImportHistoryEntry {
3229 id: Uuid::new_v4().to_string(),
3230 format: "insomnia".to_string(),
3231 timestamp: chrono::Utc::now(),
3232 routes_count: 0,
3233 variables_count: 0,
3234 warnings_count: 0,
3235 success: false,
3236 filename: filename.map(|s| s.to_string()),
3237 environment: environment.map(|s| s.to_string()),
3238 base_url: base_url.map(|s| s.to_string()),
3239 error_message: Some(e.to_string()),
3240 };
3241 let mut history = state.import_history.write().await;
3242 history.push(entry);
3243
3244 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3245 }
3246 }
3247}
3248
3249pub async fn import_openapi(
3250 State(_state): State<AdminState>,
3251 Json(_request): Json<serde_json::Value>,
3252) -> Json<ApiResponse<String>> {
3253 Json(ApiResponse::success("OpenAPI import completed".to_string()))
3254}
3255
3256pub async fn import_curl(
3257 State(state): State<AdminState>,
3258 Json(request): Json<serde_json::Value>,
3259) -> Json<ApiResponse<String>> {
3260 use uuid::Uuid;
3261
3262 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3263 let filename = request.get("filename").and_then(|v| v.as_str());
3264 let base_url = request.get("base_url").and_then(|v| v.as_str());
3265
3266 let import_result = match mockforge_core::import::import_curl_commands(content, base_url) {
3268 Ok(result) => result,
3269 Err(e) => {
3270 let entry = ImportHistoryEntry {
3272 id: Uuid::new_v4().to_string(),
3273 format: "curl".to_string(),
3274 timestamp: chrono::Utc::now(),
3275 routes_count: 0,
3276 variables_count: 0,
3277 warnings_count: 0,
3278 success: false,
3279 filename: filename.map(|s| s.to_string()),
3280 environment: None,
3281 base_url: base_url.map(|s| s.to_string()),
3282 error_message: Some(e.clone()),
3283 };
3284 let mut history = state.import_history.write().await;
3285 history.push(entry);
3286
3287 return Json(ApiResponse::error(format!("Curl import failed: {}", e)));
3288 }
3289 };
3290
3291 let workspace_name =
3293 filename.and_then(|f| f.split('.').next()).unwrap_or("Imported Curl Commands");
3294
3295 match mockforge_core::workspace_import::create_workspace_from_curl(
3296 import_result,
3297 Some(workspace_name.to_string()),
3298 ) {
3299 Ok(workspace_result) => {
3300 if let Err(e) =
3302 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3303 {
3304 tracing::error!("Failed to save workspace: {}", e);
3305 return Json(ApiResponse::error(format!(
3306 "Import succeeded but failed to save workspace: {}",
3307 e
3308 )));
3309 }
3310
3311 let entry = ImportHistoryEntry {
3313 id: Uuid::new_v4().to_string(),
3314 format: "curl".to_string(),
3315 timestamp: chrono::Utc::now(),
3316 routes_count: workspace_result.request_count,
3317 variables_count: 0, warnings_count: workspace_result.warnings.len(),
3319 success: true,
3320 filename: filename.map(|s| s.to_string()),
3321 environment: None,
3322 base_url: base_url.map(|s| s.to_string()),
3323 error_message: None,
3324 };
3325 let mut history = state.import_history.write().await;
3326 history.push(entry);
3327
3328 Json(ApiResponse::success(format!(
3329 "Successfully imported {} routes into workspace '{}'",
3330 workspace_result.request_count, workspace_name
3331 )))
3332 }
3333 Err(e) => {
3334 let entry = ImportHistoryEntry {
3336 id: Uuid::new_v4().to_string(),
3337 format: "curl".to_string(),
3338 timestamp: chrono::Utc::now(),
3339 routes_count: 0,
3340 variables_count: 0,
3341 warnings_count: 0,
3342 success: false,
3343 filename: filename.map(|s| s.to_string()),
3344 environment: None,
3345 base_url: base_url.map(|s| s.to_string()),
3346 error_message: Some(e.to_string()),
3347 };
3348 let mut history = state.import_history.write().await;
3349 history.push(entry);
3350
3351 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3352 }
3353 }
3354}
3355
3356pub async fn preview_import(
3357 State(_state): State<AdminState>,
3358 Json(request): Json<serde_json::Value>,
3359) -> Json<ApiResponse<serde_json::Value>> {
3360 use mockforge_core::import::{
3361 import_curl_commands, import_insomnia_export, import_postman_collection,
3362 };
3363
3364 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3365 let filename = request.get("filename").and_then(|v| v.as_str());
3366 let environment = request.get("environment").and_then(|v| v.as_str());
3367 let base_url = request.get("base_url").and_then(|v| v.as_str());
3368
3369 let format = if let Some(fname) = filename {
3371 if fname.to_lowercase().contains("postman")
3372 || fname.to_lowercase().ends_with(".postman_collection")
3373 {
3374 "postman"
3375 } else if fname.to_lowercase().contains("insomnia")
3376 || fname.to_lowercase().ends_with(".insomnia")
3377 {
3378 "insomnia"
3379 } else if fname.to_lowercase().contains("curl")
3380 || fname.to_lowercase().ends_with(".sh")
3381 || fname.to_lowercase().ends_with(".curl")
3382 {
3383 "curl"
3384 } else {
3385 "unknown"
3386 }
3387 } else {
3388 "unknown"
3389 };
3390
3391 match format {
3392 "postman" => match import_postman_collection(content, base_url) {
3393 Ok(import_result) => {
3394 let routes: Vec<serde_json::Value> = import_result
3395 .routes
3396 .into_iter()
3397 .map(|route| {
3398 serde_json::json!({
3399 "method": route.method,
3400 "path": route.path,
3401 "headers": route.headers,
3402 "body": route.body,
3403 "status_code": route.response.status,
3404 "response": serde_json::json!({
3405 "status": route.response.status,
3406 "headers": route.response.headers,
3407 "body": route.response.body
3408 })
3409 })
3410 })
3411 .collect();
3412
3413 let response = serde_json::json!({
3414 "routes": routes,
3415 "variables": import_result.variables,
3416 "warnings": import_result.warnings
3417 });
3418
3419 Json(ApiResponse::success(response))
3420 }
3421 Err(e) => Json(ApiResponse::error(format!("Postman import failed: {}", e))),
3422 },
3423 "insomnia" => match import_insomnia_export(content, environment) {
3424 Ok(import_result) => {
3425 let routes: Vec<serde_json::Value> = import_result
3426 .routes
3427 .into_iter()
3428 .map(|route| {
3429 serde_json::json!({
3430 "method": route.method,
3431 "path": route.path,
3432 "headers": route.headers,
3433 "body": route.body,
3434 "status_code": route.response.status,
3435 "response": serde_json::json!({
3436 "status": route.response.status,
3437 "headers": route.response.headers,
3438 "body": route.response.body
3439 })
3440 })
3441 })
3442 .collect();
3443
3444 let response = serde_json::json!({
3445 "routes": routes,
3446 "variables": import_result.variables,
3447 "warnings": import_result.warnings
3448 });
3449
3450 Json(ApiResponse::success(response))
3451 }
3452 Err(e) => Json(ApiResponse::error(format!("Insomnia import failed: {}", e))),
3453 },
3454 "curl" => match import_curl_commands(content, base_url) {
3455 Ok(import_result) => {
3456 let routes: Vec<serde_json::Value> = import_result
3457 .routes
3458 .into_iter()
3459 .map(|route| {
3460 serde_json::json!({
3461 "method": route.method,
3462 "path": route.path,
3463 "headers": route.headers,
3464 "body": route.body,
3465 "status_code": route.response.status,
3466 "response": serde_json::json!({
3467 "status": route.response.status,
3468 "headers": route.response.headers,
3469 "body": route.response.body
3470 })
3471 })
3472 })
3473 .collect();
3474
3475 let response = serde_json::json!({
3476 "routes": routes,
3477 "variables": serde_json::json!({}),
3478 "warnings": import_result.warnings
3479 });
3480
3481 Json(ApiResponse::success(response))
3482 }
3483 Err(e) => Json(ApiResponse::error(format!("Curl import failed: {}", e))),
3484 },
3485 _ => Json(ApiResponse::error("Unsupported import format".to_string())),
3486 }
3487}
3488
3489pub async fn get_import_history(
3490 State(state): State<AdminState>,
3491) -> Json<ApiResponse<serde_json::Value>> {
3492 let history = state.import_history.read().await;
3493 let total = history.len();
3494
3495 let imports: Vec<serde_json::Value> = history
3496 .iter()
3497 .rev()
3498 .take(50)
3499 .map(|entry| {
3500 serde_json::json!({
3501 "id": entry.id,
3502 "format": entry.format,
3503 "timestamp": entry.timestamp.to_rfc3339(),
3504 "routes_count": entry.routes_count,
3505 "variables_count": entry.variables_count,
3506 "warnings_count": entry.warnings_count,
3507 "success": entry.success,
3508 "filename": entry.filename,
3509 "environment": entry.environment,
3510 "base_url": entry.base_url,
3511 "error_message": entry.error_message
3512 })
3513 })
3514 .collect();
3515
3516 let response = serde_json::json!({
3517 "imports": imports,
3518 "total": total
3519 });
3520
3521 Json(ApiResponse::success(response))
3522}
3523
3524pub async fn get_admin_api_state(
3525 State(_state): State<AdminState>,
3526) -> Json<ApiResponse<serde_json::Value>> {
3527 Json(ApiResponse::success(serde_json::json!({
3528 "status": "active"
3529 })))
3530}
3531
3532pub async fn get_admin_api_replay(
3533 State(_state): State<AdminState>,
3534) -> Json<ApiResponse<serde_json::Value>> {
3535 Json(ApiResponse::success(serde_json::json!({
3536 "replay": []
3537 })))
3538}
3539
3540pub async fn get_sse_status(
3541 State(_state): State<AdminState>,
3542) -> Json<ApiResponse<serde_json::Value>> {
3543 Json(ApiResponse::success(serde_json::json!({
3544 "available": true,
3545 "endpoint": "/sse",
3546 "config": {
3547 "event_type": "status",
3548 "interval_ms": 1000,
3549 "data_template": "{}"
3550 }
3551 })))
3552}
3553
3554pub async fn get_sse_connections(
3555 State(_state): State<AdminState>,
3556) -> Json<ApiResponse<serde_json::Value>> {
3557 Json(ApiResponse::success(serde_json::json!({
3558 "active_connections": 0
3559 })))
3560}
3561
3562pub async fn get_workspaces(
3564 State(_state): State<AdminState>,
3565) -> Json<ApiResponse<Vec<serde_json::Value>>> {
3566 Json(ApiResponse::success(vec![]))
3567}
3568
3569pub async fn create_workspace(
3570 State(_state): State<AdminState>,
3571 Json(_request): Json<serde_json::Value>,
3572) -> Json<ApiResponse<String>> {
3573 Json(ApiResponse::success("Workspace created".to_string()))
3574}
3575
3576pub async fn open_workspace_from_directory(
3577 State(_state): State<AdminState>,
3578 Json(_request): Json<serde_json::Value>,
3579) -> Json<ApiResponse<String>> {
3580 Json(ApiResponse::success("Workspace opened from directory".to_string()))
3581}
3582
3583pub async fn get_workspace(
3584 State(_state): State<AdminState>,
3585 axum::extract::Path(workspace_id): axum::extract::Path<String>,
3586) -> Json<ApiResponse<serde_json::Value>> {
3587 Json(ApiResponse::success(serde_json::json!({
3588 "workspace": {
3589 "summary": {
3590 "id": workspace_id,
3591 "name": "Mock Workspace",
3592 "description": "A mock workspace"
3593 },
3594 "folders": [],
3595 "requests": []
3596 }
3597 })))
3598}
3599
3600pub async fn delete_workspace(
3601 State(_state): State<AdminState>,
3602 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3603) -> Json<ApiResponse<String>> {
3604 Json(ApiResponse::success("Workspace deleted".to_string()))
3605}
3606
3607pub async fn set_active_workspace(
3608 State(_state): State<AdminState>,
3609 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3610) -> Json<ApiResponse<String>> {
3611 Json(ApiResponse::success("Workspace activated".to_string()))
3612}
3613
3614pub async fn create_folder(
3615 State(_state): State<AdminState>,
3616 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3617 Json(_request): Json<serde_json::Value>,
3618) -> Json<ApiResponse<String>> {
3619 Json(ApiResponse::success("Folder created".to_string()))
3620}
3621
3622pub async fn create_request(
3623 State(_state): State<AdminState>,
3624 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3625 Json(_request): Json<serde_json::Value>,
3626) -> Json<ApiResponse<String>> {
3627 Json(ApiResponse::success("Request created".to_string()))
3628}
3629
3630pub async fn execute_workspace_request(
3631 State(_state): State<AdminState>,
3632 axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
3633 Json(_request): Json<serde_json::Value>,
3634) -> Json<ApiResponse<serde_json::Value>> {
3635 Json(ApiResponse::success(serde_json::json!({
3636 "status": "executed",
3637 "response": {}
3638 })))
3639}
3640
3641pub async fn get_request_history(
3642 State(_state): State<AdminState>,
3643 axum::extract::Path((_workspace_id, _request_id)): axum::extract::Path<(String, String)>,
3644) -> Json<ApiResponse<Vec<serde_json::Value>>> {
3645 Json(ApiResponse::success(vec![]))
3646}
3647
3648pub async fn get_folder(
3649 State(_state): State<AdminState>,
3650 axum::extract::Path((_workspace_id, folder_id)): axum::extract::Path<(String, String)>,
3651) -> Json<ApiResponse<serde_json::Value>> {
3652 Json(ApiResponse::success(serde_json::json!({
3653 "folder": {
3654 "summary": {
3655 "id": folder_id,
3656 "name": "Mock Folder",
3657 "description": "A mock folder"
3658 },
3659 "requests": []
3660 }
3661 })))
3662}
3663
3664pub async fn import_to_workspace(
3665 State(_state): State<AdminState>,
3666 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3667 Json(_request): Json<serde_json::Value>,
3668) -> Json<ApiResponse<String>> {
3669 Json(ApiResponse::success("Import to workspace completed".to_string()))
3670}
3671
3672pub async fn export_workspaces(
3673 State(_state): State<AdminState>,
3674 Json(_request): Json<serde_json::Value>,
3675) -> Json<ApiResponse<String>> {
3676 Json(ApiResponse::success("Workspaces exported".to_string()))
3677}
3678
3679pub async fn get_environments(
3681 State(_state): State<AdminState>,
3682 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3683) -> Json<ApiResponse<serde_json::Value>> {
3684 let environments = vec![serde_json::json!({
3686 "id": "global",
3687 "name": "Global",
3688 "description": "Global environment variables",
3689 "variable_count": 0,
3690 "is_global": true,
3691 "active": true,
3692 "order": 0
3693 })];
3694
3695 Json(ApiResponse::success(serde_json::json!({
3696 "environments": environments,
3697 "total": 1
3698 })))
3699}
3700
3701pub async fn create_environment(
3702 State(_state): State<AdminState>,
3703 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3704 Json(_request): Json<serde_json::Value>,
3705) -> Json<ApiResponse<String>> {
3706 Json(ApiResponse::success("Environment created".to_string()))
3707}
3708
3709pub async fn update_environment(
3710 State(_state): State<AdminState>,
3711 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
3712 Json(_request): Json<serde_json::Value>,
3713) -> Json<ApiResponse<String>> {
3714 Json(ApiResponse::success("Environment updated".to_string()))
3715}
3716
3717pub async fn delete_environment(
3718 State(_state): State<AdminState>,
3719 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
3720) -> Json<ApiResponse<String>> {
3721 Json(ApiResponse::success("Environment deleted".to_string()))
3722}
3723
3724pub async fn set_active_environment(
3725 State(_state): State<AdminState>,
3726 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
3727) -> Json<ApiResponse<String>> {
3728 Json(ApiResponse::success("Environment activated".to_string()))
3729}
3730
3731pub async fn update_environments_order(
3732 State(_state): State<AdminState>,
3733 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3734 Json(_request): Json<serde_json::Value>,
3735) -> Json<ApiResponse<String>> {
3736 Json(ApiResponse::success("Environment order updated".to_string()))
3737}
3738
3739pub async fn get_environment_variables(
3740 State(_state): State<AdminState>,
3741 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
3742) -> Json<ApiResponse<serde_json::Value>> {
3743 Json(ApiResponse::success(serde_json::json!({
3744 "variables": []
3745 })))
3746}
3747
3748pub async fn set_environment_variable(
3749 State(_state): State<AdminState>,
3750 axum::extract::Path((_workspace_id, _environment_id)): axum::extract::Path<(String, String)>,
3751 Json(_request): Json<serde_json::Value>,
3752) -> Json<ApiResponse<String>> {
3753 Json(ApiResponse::success("Environment variable set".to_string()))
3754}
3755
3756pub async fn remove_environment_variable(
3757 State(_state): State<AdminState>,
3758 axum::extract::Path((_workspace_id, _environment_id, _variable_name)): axum::extract::Path<(
3759 String,
3760 String,
3761 String,
3762 )>,
3763) -> Json<ApiResponse<String>> {
3764 Json(ApiResponse::success("Environment variable removed".to_string()))
3765}
3766
3767pub async fn get_autocomplete_suggestions(
3769 State(_state): State<AdminState>,
3770 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3771 Json(_request): Json<serde_json::Value>,
3772) -> Json<ApiResponse<serde_json::Value>> {
3773 Json(ApiResponse::success(serde_json::json!({
3774 "suggestions": [],
3775 "start_position": 0,
3776 "end_position": 0
3777 })))
3778}
3779
3780pub async fn get_sync_status(
3782 State(_state): State<AdminState>,
3783 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3784) -> Json<ApiResponse<serde_json::Value>> {
3785 Json(ApiResponse::success(serde_json::json!({
3786 "status": "disabled"
3787 })))
3788}
3789
3790pub async fn configure_sync(
3791 State(_state): State<AdminState>,
3792 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3793 Json(_request): Json<serde_json::Value>,
3794) -> Json<ApiResponse<String>> {
3795 Json(ApiResponse::success("Sync configured".to_string()))
3796}
3797
3798pub async fn disable_sync(
3799 State(_state): State<AdminState>,
3800 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3801) -> Json<ApiResponse<String>> {
3802 Json(ApiResponse::success("Sync disabled".to_string()))
3803}
3804
3805pub async fn trigger_sync(
3806 State(_state): State<AdminState>,
3807 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3808) -> Json<ApiResponse<String>> {
3809 Json(ApiResponse::success("Sync triggered".to_string()))
3810}
3811
3812pub async fn get_sync_changes(
3813 State(_state): State<AdminState>,
3814 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3815) -> Json<ApiResponse<Vec<serde_json::Value>>> {
3816 Json(ApiResponse::success(vec![]))
3817}
3818
3819pub async fn confirm_sync_changes(
3820 State(_state): State<AdminState>,
3821 axum::extract::Path(_workspace_id): axum::extract::Path<String>,
3822 Json(_request): Json<serde_json::Value>,
3823) -> Json<ApiResponse<String>> {
3824 Json(ApiResponse::success("Sync changes confirmed".to_string()))
3825}
3826
3827pub async fn validate_plugin(
3829 State(_state): State<AdminState>,
3830 Json(_request): Json<serde_json::Value>,
3831) -> Json<ApiResponse<String>> {
3832 Json(ApiResponse::success("Plugin validated".to_string()))
3833}
3834
3835pub async fn clear_import_history(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
3837 let mut history = state.import_history.write().await;
3838 history.clear();
3839 Json(ApiResponse::success("Import history cleared".to_string()))
3840}
3841
3842#[cfg(test)]
3843mod tests {
3844 use super::*;
3845
3846 #[test]
3847 fn test_request_metrics_creation() {
3848 use std::collections::HashMap;
3849
3850 let metrics = RequestMetrics {
3851 total_requests: 100,
3852 active_connections: 5,
3853 requests_by_endpoint: HashMap::new(),
3854 response_times: vec![10, 20, 30],
3855 response_times_by_endpoint: HashMap::new(),
3856 errors_by_endpoint: HashMap::new(),
3857 last_request_by_endpoint: HashMap::new(),
3858 };
3859
3860 assert_eq!(metrics.total_requests, 100);
3861 assert_eq!(metrics.active_connections, 5);
3862 assert_eq!(metrics.response_times.len(), 3);
3863 }
3864
3865 #[test]
3866 fn test_system_metrics_creation() {
3867 let metrics = SystemMetrics {
3868 cpu_usage_percent: 45.5,
3869 memory_usage_mb: 100,
3870 active_threads: 10,
3871 };
3872
3873 assert_eq!(metrics.active_threads, 10);
3874 assert!(metrics.cpu_usage_percent > 0.0);
3875 assert_eq!(metrics.memory_usage_mb, 100);
3876 }
3877
3878 #[test]
3879 fn test_time_series_point() {
3880 let point = TimeSeriesPoint {
3881 timestamp: chrono::Utc::now(),
3882 value: 42.5,
3883 };
3884
3885 assert_eq!(point.value, 42.5);
3886 }
3887
3888 #[test]
3889 fn test_restart_status() {
3890 let status = RestartStatus {
3891 in_progress: true,
3892 initiated_at: Some(chrono::Utc::now()),
3893 reason: Some("Manual restart".to_string()),
3894 success: None,
3895 };
3896
3897 assert!(status.in_progress);
3898 assert!(status.reason.is_some());
3899 }
3900
3901 #[test]
3902 fn test_configuration_state() {
3903 use std::collections::HashMap;
3904
3905 let state = ConfigurationState {
3906 latency_profile: crate::models::LatencyProfile {
3907 name: "default".to_string(),
3908 base_ms: 100,
3909 jitter_ms: 10,
3910 tag_overrides: HashMap::new(),
3911 },
3912 fault_config: crate::models::FaultConfig {
3913 enabled: false,
3914 failure_rate: 0.0,
3915 status_codes: vec![],
3916 active_failures: 0,
3917 },
3918 proxy_config: crate::models::ProxyConfig {
3919 enabled: false,
3920 upstream_url: None,
3921 timeout_seconds: 30,
3922 requests_proxied: 0,
3923 },
3924 validation_settings: crate::models::ValidationSettings {
3925 mode: "off".to_string(),
3926 aggregate_errors: false,
3927 validate_responses: false,
3928 overrides: HashMap::new(),
3929 },
3930 };
3931
3932 assert_eq!(state.latency_profile.name, "default");
3933 assert!(!state.fault_config.enabled);
3934 assert!(!state.proxy_config.enabled);
3935 }
3936
3937 #[test]
3938 fn test_admin_state_new() {
3939 let http_addr: std::net::SocketAddr = "127.0.0.1:3000".parse().unwrap();
3940 let state = AdminState::new(Some(http_addr), None, None, None, true, 8080);
3941
3942 assert_eq!(state.http_server_addr, Some(http_addr));
3943 assert!(state.api_enabled);
3944 assert_eq!(state.admin_port, 8080);
3945 }
3946}