1use axum::{
13 extract::{Path, Query, State},
14 http::{self, StatusCode},
15 response::{
16 sse::{Event, Sse},
17 Html, IntoResponse, Json,
18 },
19};
20use chrono::Utc;
21use futures_util::stream::{self, Stream};
22use mockforge_core::{Error, Result};
23use mockforge_plugin_loader::PluginRegistry;
24use serde::{Deserialize, Serialize};
25use serde_json::json;
26use std::collections::HashMap;
27use std::convert::Infallible;
28use std::process::Command;
29use std::process::Stdio;
30use std::sync::Arc;
31use std::time::Duration;
32use sysinfo::System;
33use tokio::sync::RwLock;
34
35use crate::models::{
37 ApiResponse, ConfigUpdate, DashboardData, DashboardSystemInfo, FaultConfig, HealthCheck,
38 LatencyProfile, LogFilter, MetricsData, ProxyConfig, RequestLog, RouteInfo, ServerInfo,
39 ServerStatus, SimpleMetricsData, SystemInfo, TrafficShapingConfig, ValidationSettings,
40 ValidationUpdate,
41};
42
43use mockforge_core::workspace_import::{ImportResponse, ImportRoute};
45use mockforge_plugin_loader::{
46 GitPluginConfig, GitPluginLoader, PluginLoader, PluginLoaderConfig, PluginSource,
47 RemotePluginConfig, RemotePluginLoader,
48};
49
50pub mod admin;
52pub mod ai_studio;
53pub mod analytics;
54pub mod analytics_stream;
55pub mod analytics_v2;
56pub mod assets;
57pub mod behavioral_cloning;
58pub mod chains;
59pub mod chaos_api;
60pub mod community;
61pub mod contract_diff;
62pub mod coverage_metrics;
63pub mod failure_analysis;
64pub mod federation_api;
65pub mod graph;
66pub mod health;
67pub mod migration;
68pub mod pillar_analytics;
69pub mod playground;
70pub mod plugin;
71pub mod promotions;
72pub mod protocol_contracts;
73pub mod recorder_api;
74pub mod vbr_api;
75pub mod verification;
76pub mod voice;
77pub mod workspaces;
78pub mod world_state_proxy;
79
80pub use assets::*;
82pub use chains::*;
83pub use graph::*;
84pub use migration::*;
85pub use plugin::*;
86
87use mockforge_core::workspace_import::WorkspaceImportConfig;
89use mockforge_core::workspace_persistence::WorkspacePersistence;
90
91#[derive(Debug, Clone, Default)]
93pub struct RequestMetrics {
94 pub total_requests: u64,
96 pub active_connections: u64,
98 pub requests_by_endpoint: HashMap<String, u64>,
100 pub response_times: Vec<u64>,
102 pub response_times_by_endpoint: HashMap<String, Vec<u64>>,
104 pub errors_by_endpoint: HashMap<String, u64>,
106 pub last_request_by_endpoint: HashMap<String, chrono::DateTime<Utc>>,
108}
109
110#[derive(Debug, Clone)]
112pub struct SystemMetrics {
113 pub memory_usage_mb: u64,
115 pub cpu_usage_percent: f64,
117 pub active_threads: u32,
119}
120
121#[derive(Debug, Clone)]
123pub struct TimeSeriesPoint {
124 pub timestamp: chrono::DateTime<Utc>,
126 pub value: f64,
128}
129
130#[derive(Debug, Clone, Default)]
132pub struct TimeSeriesData {
133 pub memory_usage: Vec<TimeSeriesPoint>,
135 pub cpu_usage: Vec<TimeSeriesPoint>,
137 pub request_count: Vec<TimeSeriesPoint>,
139 pub response_time: Vec<TimeSeriesPoint>,
141}
142
143#[derive(Debug, Clone, Serialize, Deserialize)]
145pub struct RestartStatus {
146 pub in_progress: bool,
148 pub initiated_at: Option<chrono::DateTime<Utc>>,
150 pub reason: Option<String>,
152 pub success: Option<bool>,
154}
155
156#[derive(Debug, Clone, Serialize, Deserialize)]
158pub struct FixtureInfo {
159 pub id: String,
161 pub protocol: String,
163 pub method: String,
165 pub path: String,
167 pub saved_at: chrono::DateTime<Utc>,
169 pub file_size: u64,
171 pub file_path: String,
173 pub fingerprint: String,
175 pub metadata: serde_json::Value,
177}
178
179#[derive(Debug, Clone, Serialize, Deserialize)]
181pub struct SmokeTestResult {
182 pub id: String,
184 pub name: String,
186 pub method: String,
188 pub path: String,
190 pub description: String,
192 pub last_run: Option<chrono::DateTime<Utc>>,
194 pub status: String,
196 pub response_time_ms: Option<u64>,
198 pub error_message: Option<String>,
200 pub status_code: Option<u16>,
202 pub duration_seconds: Option<f64>,
204}
205
206#[derive(Debug, Clone)]
208pub struct SmokeTestContext {
209 pub base_url: String,
211 pub timeout_seconds: u64,
213 pub parallel: bool,
215}
216
217#[derive(Debug, Clone, Serialize)]
219pub struct ConfigurationState {
220 pub latency_profile: LatencyProfile,
222 pub fault_config: FaultConfig,
224 pub proxy_config: ProxyConfig,
226 pub validation_settings: ValidationSettings,
228 pub traffic_shaping: TrafficShapingConfig,
230}
231
232#[derive(Debug, Clone, Serialize, Deserialize)]
234pub struct ImportHistoryEntry {
235 pub id: String,
237 pub format: String,
239 pub timestamp: chrono::DateTime<Utc>,
241 pub routes_count: usize,
243 pub variables_count: usize,
245 pub warnings_count: usize,
247 pub success: bool,
249 pub filename: Option<String>,
251 pub environment: Option<String>,
253 pub base_url: Option<String>,
255 pub error_message: Option<String>,
257}
258
259#[derive(Clone)]
261pub struct AdminState {
262 pub http_server_addr: Option<std::net::SocketAddr>,
264 pub ws_server_addr: Option<std::net::SocketAddr>,
266 pub grpc_server_addr: Option<std::net::SocketAddr>,
268 pub graphql_server_addr: Option<std::net::SocketAddr>,
270 pub api_enabled: bool,
272 pub admin_port: u16,
274 pub start_time: chrono::DateTime<Utc>,
276 pub metrics: Arc<RwLock<RequestMetrics>>,
278 pub system_metrics: Arc<RwLock<SystemMetrics>>,
280 pub config: Arc<RwLock<ConfigurationState>>,
282 pub logs: Arc<RwLock<Vec<RequestLog>>>,
284 pub time_series: Arc<RwLock<TimeSeriesData>>,
286 pub restart_status: Arc<RwLock<RestartStatus>>,
288 pub smoke_test_results: Arc<RwLock<Vec<SmokeTestResult>>>,
290 pub import_history: Arc<RwLock<Vec<ImportHistoryEntry>>>,
292 pub workspace_persistence: Arc<WorkspacePersistence>,
294 pub plugin_registry: Arc<RwLock<PluginRegistry>>,
296 pub reality_engine: Arc<RwLock<mockforge_core::RealityEngine>>,
298 pub continuum_engine: Arc<RwLock<mockforge_core::RealityContinuumEngine>>,
300 pub chaos_api_state: Option<Arc<mockforge_chaos::api::ChaosApiState>>,
303 pub latency_injector: Option<Arc<RwLock<mockforge_core::latency::LatencyInjector>>>,
306 pub mockai: Option<Arc<RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
309 pub recorder: Option<Arc<mockforge_recorder::Recorder>>,
311 pub federation: Option<Arc<mockforge_federation::Federation>>,
313 pub vbr_engine: Option<Arc<mockforge_vbr::VbrEngine>>,
315}
316
317impl AdminState {
318 pub async fn start_system_monitoring(&self) {
320 let state_clone = self.clone();
321 tokio::spawn(async move {
322 let mut sys = System::new_all();
323 let mut refresh_count = 0u64;
324
325 tracing::info!("Starting system monitoring background task");
326
327 loop {
328 sys.refresh_all();
330
331 let cpu_usage = sys.global_cpu_usage();
333
334 let _total_memory = sys.total_memory() as f64;
336 let used_memory = sys.used_memory() as f64;
337 let memory_usage_mb = used_memory / 1024.0 / 1024.0;
338
339 let active_threads = sys.cpus().len() as u32;
341
342 let memory_mb_u64 = memory_usage_mb as u64;
344
345 if refresh_count.is_multiple_of(10) {
347 tracing::debug!(
348 "System metrics updated: CPU={:.1}%, Mem={}MB, Threads={}",
349 cpu_usage,
350 memory_mb_u64,
351 active_threads
352 );
353 }
354
355 state_clone
356 .update_system_metrics(memory_mb_u64, cpu_usage as f64, active_threads)
357 .await;
358
359 refresh_count += 1;
360
361 tokio::time::sleep(Duration::from_secs(10)).await;
363 }
364 });
365 }
366
367 #[allow(clippy::too_many_arguments)]
385 pub fn new(
386 http_server_addr: Option<std::net::SocketAddr>,
387 ws_server_addr: Option<std::net::SocketAddr>,
388 grpc_server_addr: Option<std::net::SocketAddr>,
389 graphql_server_addr: Option<std::net::SocketAddr>,
390 api_enabled: bool,
391 admin_port: u16,
392 chaos_api_state: Option<Arc<mockforge_chaos::api::ChaosApiState>>,
393 latency_injector: Option<Arc<RwLock<mockforge_core::latency::LatencyInjector>>>,
394 mockai: Option<Arc<RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
395 continuum_config: Option<mockforge_core::ContinuumConfig>,
396 virtual_clock: Option<Arc<mockforge_core::VirtualClock>>,
397 recorder: Option<Arc<mockforge_recorder::Recorder>>,
398 federation: Option<Arc<mockforge_federation::Federation>>,
399 vbr_engine: Option<Arc<mockforge_vbr::VbrEngine>>,
400 ) -> Self {
401 let start_time = Utc::now();
402
403 Self {
404 http_server_addr,
405 ws_server_addr,
406 grpc_server_addr,
407 graphql_server_addr,
408 api_enabled,
409 admin_port,
410 start_time,
411 metrics: Arc::new(RwLock::new(RequestMetrics::default())),
412 system_metrics: Arc::new(RwLock::new(SystemMetrics {
413 memory_usage_mb: 0,
414 cpu_usage_percent: 0.0,
415 active_threads: 0,
416 })),
417 config: Arc::new(RwLock::new(ConfigurationState {
418 latency_profile: LatencyProfile {
419 name: "default".to_string(),
420 base_ms: 50,
421 jitter_ms: 20,
422 tag_overrides: HashMap::new(),
423 },
424 fault_config: FaultConfig {
425 enabled: false,
426 failure_rate: 0.0,
427 status_codes: vec![500, 502, 503],
428 active_failures: 0,
429 },
430 proxy_config: ProxyConfig {
431 enabled: false,
432 upstream_url: None,
433 timeout_seconds: 30,
434 requests_proxied: 0,
435 },
436 validation_settings: ValidationSettings {
437 mode: "enforce".to_string(),
438 aggregate_errors: true,
439 validate_responses: false,
440 overrides: HashMap::new(),
441 },
442 traffic_shaping: TrafficShapingConfig {
443 enabled: false,
444 bandwidth: crate::models::BandwidthConfig {
445 enabled: false,
446 max_bytes_per_sec: 1_048_576,
447 burst_capacity_bytes: 10_485_760,
448 tag_overrides: HashMap::new(),
449 },
450 burst_loss: crate::models::BurstLossConfig {
451 enabled: false,
452 burst_probability: 0.1,
453 burst_duration_ms: 5000,
454 loss_rate_during_burst: 0.5,
455 recovery_time_ms: 30000,
456 tag_overrides: HashMap::new(),
457 },
458 },
459 })),
460 logs: Arc::new(RwLock::new(Vec::new())),
461 time_series: Arc::new(RwLock::new(TimeSeriesData::default())),
462 restart_status: Arc::new(RwLock::new(RestartStatus {
463 in_progress: false,
464 initiated_at: None,
465 reason: None,
466 success: None,
467 })),
468 smoke_test_results: Arc::new(RwLock::new(Vec::new())),
469 import_history: Arc::new(RwLock::new(Vec::new())),
470 workspace_persistence: Arc::new(WorkspacePersistence::new("./workspaces")),
471 plugin_registry: Arc::new(RwLock::new(PluginRegistry::new())),
472 reality_engine: Arc::new(RwLock::new(mockforge_core::RealityEngine::new())),
473 continuum_engine: Arc::new(RwLock::new({
474 let config = continuum_config.unwrap_or_default();
475 if let Some(clock) = virtual_clock {
476 mockforge_core::RealityContinuumEngine::with_virtual_clock(config, clock)
477 } else {
478 mockforge_core::RealityContinuumEngine::new(config)
479 }
480 })),
481 chaos_api_state,
482 latency_injector,
483 mockai,
484 recorder,
485 federation,
486 vbr_engine,
487 }
488 }
489
490 pub async fn record_request(
492 &self,
493 method: &str,
494 path: &str,
495 status_code: u16,
496 response_time_ms: u64,
497 error: Option<String>,
498 ) {
499 let mut metrics = self.metrics.write().await;
500
501 metrics.total_requests += 1;
502 let endpoint = format!("{} {}", method, path);
503 *metrics.requests_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
504
505 if status_code >= 400 {
506 *metrics.errors_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
507 }
508
509 metrics.response_times.push(response_time_ms);
511 if metrics.response_times.len() > 100 {
512 metrics.response_times.remove(0);
513 }
514
515 let endpoint_times = metrics
517 .response_times_by_endpoint
518 .entry(endpoint.clone())
519 .or_insert_with(Vec::new);
520 endpoint_times.push(response_time_ms);
521 if endpoint_times.len() > 50 {
522 endpoint_times.remove(0);
523 }
524
525 metrics.last_request_by_endpoint.insert(endpoint, Utc::now());
527
528 let total_requests = metrics.total_requests;
530
531 drop(metrics);
533
534 self.update_time_series_on_request(response_time_ms, total_requests).await;
536
537 let mut logs = self.logs.write().await;
539 let log_entry = RequestLog {
540 id: format!("req_{}", total_requests),
541 timestamp: Utc::now(),
542 method: method.to_string(),
543 path: path.to_string(),
544 status_code,
545 response_time_ms,
546 client_ip: None,
547 user_agent: None,
548 headers: HashMap::new(),
549 response_size_bytes: 0,
550 error_message: error,
551 };
552
553 logs.push(log_entry);
554
555 if logs.len() > 1000 {
557 logs.remove(0);
558 }
559 }
560
561 pub async fn get_metrics(&self) -> RequestMetrics {
563 self.metrics.read().await.clone()
564 }
565
566 pub async fn update_system_metrics(&self, memory_mb: u64, cpu_percent: f64, threads: u32) {
568 let mut system_metrics = self.system_metrics.write().await;
569 system_metrics.memory_usage_mb = memory_mb;
570 system_metrics.cpu_usage_percent = cpu_percent;
571 system_metrics.active_threads = threads;
572
573 self.update_time_series_data(memory_mb as f64, cpu_percent).await;
575 }
576
577 async fn update_time_series_data(&self, memory_mb: f64, cpu_percent: f64) {
579 let now = Utc::now();
580 let mut time_series = self.time_series.write().await;
581
582 time_series.memory_usage.push(TimeSeriesPoint {
584 timestamp: now,
585 value: memory_mb,
586 });
587
588 time_series.cpu_usage.push(TimeSeriesPoint {
590 timestamp: now,
591 value: cpu_percent,
592 });
593
594 let metrics = self.metrics.read().await;
596 time_series.request_count.push(TimeSeriesPoint {
597 timestamp: now,
598 value: metrics.total_requests as f64,
599 });
600
601 let avg_response_time = if !metrics.response_times.is_empty() {
603 metrics.response_times.iter().sum::<u64>() as f64 / metrics.response_times.len() as f64
604 } else {
605 0.0
606 };
607 time_series.response_time.push(TimeSeriesPoint {
608 timestamp: now,
609 value: avg_response_time,
610 });
611
612 const MAX_POINTS: usize = 100;
614 if time_series.memory_usage.len() > MAX_POINTS {
615 time_series.memory_usage.remove(0);
616 }
617 if time_series.cpu_usage.len() > MAX_POINTS {
618 time_series.cpu_usage.remove(0);
619 }
620 if time_series.request_count.len() > MAX_POINTS {
621 time_series.request_count.remove(0);
622 }
623 if time_series.response_time.len() > MAX_POINTS {
624 time_series.response_time.remove(0);
625 }
626 }
627
628 pub async fn get_system_metrics(&self) -> SystemMetrics {
630 self.system_metrics.read().await.clone()
631 }
632
633 pub async fn get_time_series_data(&self) -> TimeSeriesData {
635 self.time_series.read().await.clone()
636 }
637
638 pub async fn get_restart_status(&self) -> RestartStatus {
640 self.restart_status.read().await.clone()
641 }
642
643 pub async fn initiate_restart(&self, reason: String) -> Result<()> {
645 let mut status = self.restart_status.write().await;
646
647 if status.in_progress {
648 return Err(Error::generic("Restart already in progress".to_string()));
649 }
650
651 status.in_progress = true;
652 status.initiated_at = Some(Utc::now());
653 status.reason = Some(reason);
654 status.success = None;
655
656 Ok(())
657 }
658
659 pub async fn complete_restart(&self, success: bool) {
661 let mut status = self.restart_status.write().await;
662 status.in_progress = false;
663 status.success = Some(success);
664 }
665
666 pub async fn get_smoke_test_results(&self) -> Vec<SmokeTestResult> {
668 self.smoke_test_results.read().await.clone()
669 }
670
671 pub async fn update_smoke_test_result(&self, result: SmokeTestResult) {
673 let mut results = self.smoke_test_results.write().await;
674
675 if let Some(existing) = results.iter_mut().find(|r| r.id == result.id) {
677 *existing = result;
678 } else {
679 results.push(result);
680 }
681
682 if results.len() > 100 {
684 results.remove(0);
685 }
686 }
687
688 pub async fn clear_smoke_test_results(&self) {
690 let mut results = self.smoke_test_results.write().await;
691 results.clear();
692 }
693
694 async fn update_time_series_on_request(&self, response_time_ms: u64, total_requests: u64) {
696 let now = Utc::now();
697 let mut time_series = self.time_series.write().await;
698
699 time_series.request_count.push(TimeSeriesPoint {
701 timestamp: now,
702 value: total_requests as f64,
703 });
704
705 time_series.response_time.push(TimeSeriesPoint {
707 timestamp: now,
708 value: response_time_ms as f64,
709 });
710
711 const MAX_POINTS: usize = 100;
713 if time_series.request_count.len() > MAX_POINTS {
714 time_series.request_count.remove(0);
715 }
716 if time_series.response_time.len() > MAX_POINTS {
717 time_series.response_time.remove(0);
718 }
719 }
720
721 pub async fn get_config(&self) -> ConfigurationState {
723 self.config.read().await.clone()
724 }
725
726 pub async fn update_latency_config(
728 &self,
729 base_ms: u64,
730 jitter_ms: u64,
731 tag_overrides: HashMap<String, u64>,
732 ) {
733 let mut config = self.config.write().await;
734 config.latency_profile.base_ms = base_ms;
735 config.latency_profile.jitter_ms = jitter_ms;
736 config.latency_profile.tag_overrides = tag_overrides;
737 }
738
739 pub async fn update_fault_config(
741 &self,
742 enabled: bool,
743 failure_rate: f64,
744 status_codes: Vec<u16>,
745 ) {
746 let mut config = self.config.write().await;
747 config.fault_config.enabled = enabled;
748 config.fault_config.failure_rate = failure_rate;
749 config.fault_config.status_codes = status_codes;
750 }
751
752 pub async fn update_proxy_config(
754 &self,
755 enabled: bool,
756 upstream_url: Option<String>,
757 timeout_seconds: u64,
758 ) {
759 let mut config = self.config.write().await;
760 config.proxy_config.enabled = enabled;
761 config.proxy_config.upstream_url = upstream_url;
762 config.proxy_config.timeout_seconds = timeout_seconds;
763 }
764
765 pub async fn update_validation_config(
767 &self,
768 mode: String,
769 aggregate_errors: bool,
770 validate_responses: bool,
771 overrides: HashMap<String, String>,
772 ) {
773 let mut config = self.config.write().await;
774 config.validation_settings.mode = mode;
775 config.validation_settings.aggregate_errors = aggregate_errors;
776 config.validation_settings.validate_responses = validate_responses;
777 config.validation_settings.overrides = overrides;
778 }
779
780 pub async fn get_logs_filtered(&self, filter: &LogFilter) -> Vec<RequestLog> {
782 let logs = self.logs.read().await;
783
784 logs.iter()
785 .rev() .filter(|log| {
787 if let Some(ref method) = filter.method {
788 if log.method != *method {
789 return false;
790 }
791 }
792 if let Some(ref path_pattern) = filter.path_pattern {
793 if !log.path.contains(path_pattern) {
794 return false;
795 }
796 }
797 if let Some(status) = filter.status_code {
798 if log.status_code != status {
799 return false;
800 }
801 }
802 true
803 })
804 .take(filter.limit.unwrap_or(100))
805 .cloned()
806 .collect()
807 }
808
809 pub async fn clear_logs(&self) {
811 let mut logs = self.logs.write().await;
812 logs.clear();
813 }
814}
815
816pub async fn serve_admin_html() -> Html<&'static str> {
818 Html(crate::get_admin_html())
819}
820
821pub async fn serve_admin_css() -> ([(http::HeaderName, &'static str); 1], &'static str) {
823 ([(http::header::CONTENT_TYPE, "text/css")], crate::get_admin_css())
824}
825
826pub async fn serve_admin_js() -> ([(http::HeaderName, &'static str); 1], &'static str) {
828 ([(http::header::CONTENT_TYPE, "application/javascript")], crate::get_admin_js())
829}
830
831pub async fn get_dashboard(State(state): State<AdminState>) -> Json<ApiResponse<DashboardData>> {
833 let uptime = Utc::now().signed_duration_since(state.start_time).num_seconds() as u64;
834
835 let system_metrics = state.get_system_metrics().await;
837 let _config = state.get_config().await;
838
839 let (recent_logs, calculated_metrics): (Vec<RequestLog>, RequestMetrics) =
841 if let Some(global_logger) = mockforge_core::get_global_logger() {
842 let all_logs = global_logger.get_recent_logs(None).await;
844 let recent_logs_subset = global_logger.get_recent_logs(Some(20)).await;
845
846 let total_requests = all_logs.len() as u64;
848 let mut requests_by_endpoint = HashMap::new();
849 let mut errors_by_endpoint = HashMap::new();
850 let mut response_times = Vec::new();
851 let mut last_request_by_endpoint = HashMap::new();
852
853 for log in &all_logs {
854 let endpoint_key = format!("{} {}", log.method, log.path);
855 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
856
857 if log.status_code >= 400 {
858 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
859 }
860
861 response_times.push(log.response_time_ms);
862 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
863 }
864
865 let calculated_metrics = RequestMetrics {
866 total_requests,
867 active_connections: 0, requests_by_endpoint,
869 response_times,
870 response_times_by_endpoint: HashMap::new(), errors_by_endpoint,
872 last_request_by_endpoint,
873 };
874
875 let recent_logs = recent_logs_subset
877 .into_iter()
878 .map(|log| RequestLog {
879 id: log.id,
880 timestamp: log.timestamp,
881 method: log.method,
882 path: log.path,
883 status_code: log.status_code,
884 response_time_ms: log.response_time_ms,
885 client_ip: log.client_ip,
886 user_agent: log.user_agent,
887 headers: log.headers,
888 response_size_bytes: log.response_size_bytes,
889 error_message: log.error_message,
890 })
891 .collect();
892
893 (recent_logs, calculated_metrics)
894 } else {
895 let logs = state.logs.read().await;
897 let recent_logs = logs.iter().rev().take(10).cloned().collect();
898 let metrics = state.get_metrics().await;
899 (recent_logs, metrics)
900 };
901
902 let metrics = calculated_metrics;
903
904 let system_info = SystemInfo {
905 version: env!("CARGO_PKG_VERSION").to_string(),
906 uptime_seconds: uptime,
907 memory_usage_mb: system_metrics.memory_usage_mb,
908 cpu_usage_percent: system_metrics.cpu_usage_percent,
909 active_threads: system_metrics.active_threads as usize,
910 total_routes: metrics.requests_by_endpoint.len(),
911 total_fixtures: count_fixtures().unwrap_or(0),
912 };
913
914 let servers = vec![
915 ServerStatus {
916 server_type: "HTTP".to_string(),
917 address: state.http_server_addr.map(|addr| addr.to_string()),
918 running: state.http_server_addr.is_some(),
919 start_time: Some(state.start_time),
920 uptime_seconds: Some(uptime),
921 active_connections: metrics.active_connections,
922 total_requests: count_requests_by_server_type(&metrics, "HTTP"),
923 },
924 ServerStatus {
925 server_type: "WebSocket".to_string(),
926 address: state.ws_server_addr.map(|addr| addr.to_string()),
927 running: state.ws_server_addr.is_some(),
928 start_time: Some(state.start_time),
929 uptime_seconds: Some(uptime),
930 active_connections: metrics.active_connections / 2, total_requests: count_requests_by_server_type(&metrics, "WebSocket"),
932 },
933 ServerStatus {
934 server_type: "gRPC".to_string(),
935 address: state.grpc_server_addr.map(|addr| addr.to_string()),
936 running: state.grpc_server_addr.is_some(),
937 start_time: Some(state.start_time),
938 uptime_seconds: Some(uptime),
939 active_connections: metrics.active_connections / 3, total_requests: count_requests_by_server_type(&metrics, "gRPC"),
941 },
942 ];
943
944 let mut routes = Vec::new();
946 for (endpoint, count) in &metrics.requests_by_endpoint {
947 let parts: Vec<&str> = endpoint.splitn(2, ' ').collect();
948 if parts.len() == 2 {
949 let method = parts[0].to_string();
950 let path = parts[1].to_string();
951 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
952
953 routes.push(RouteInfo {
954 method: Some(method.clone()),
955 path: path.clone(),
956 priority: 0,
957 has_fixtures: route_has_fixtures(&method, &path),
958 latency_ms: calculate_endpoint_latency(&metrics, endpoint),
959 request_count: *count,
960 last_request: get_endpoint_last_request(&metrics, endpoint),
961 error_count,
962 });
963 }
964 }
965
966 let dashboard = DashboardData {
967 server_info: ServerInfo {
968 version: env!("CARGO_PKG_VERSION").to_string(),
969 build_time: option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown").to_string(),
970 git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("unknown").to_string(),
971 http_server: state.http_server_addr.map(|addr| addr.to_string()),
972 ws_server: state.ws_server_addr.map(|addr| addr.to_string()),
973 grpc_server: state.grpc_server_addr.map(|addr| addr.to_string()),
974 graphql_server: state.graphql_server_addr.map(|addr| addr.to_string()),
975 api_enabled: state.api_enabled,
976 admin_port: state.admin_port,
977 },
978 system_info: DashboardSystemInfo {
979 os: std::env::consts::OS.to_string(),
980 arch: std::env::consts::ARCH.to_string(),
981 uptime,
982 memory_usage: system_metrics.memory_usage_mb * 1024 * 1024, },
984 metrics: SimpleMetricsData {
985 total_requests: metrics.requests_by_endpoint.values().sum(),
986 active_requests: metrics.active_connections,
987 average_response_time: if metrics.response_times.is_empty() {
988 0.0
989 } else {
990 metrics.response_times.iter().sum::<u64>() as f64
991 / metrics.response_times.len() as f64
992 },
993 error_rate: {
994 let total_requests = metrics.requests_by_endpoint.values().sum::<u64>();
995 let total_errors = metrics.errors_by_endpoint.values().sum::<u64>();
996 if total_requests == 0 {
997 0.0
998 } else {
999 total_errors as f64 / total_requests as f64
1000 }
1001 },
1002 },
1003 servers,
1004 recent_logs,
1005 system: system_info,
1006 };
1007
1008 Json(ApiResponse::success(dashboard))
1009}
1010
1011pub async fn get_routes(State(state): State<AdminState>) -> impl IntoResponse {
1013 if let Some(http_addr) = state.http_server_addr {
1014 let proxy_ip = if http_addr.ip().is_unspecified() {
1017 std::net::IpAddr::V4(std::net::Ipv4Addr::LOCALHOST)
1018 } else {
1019 http_addr.ip()
1020 };
1021 let url = format!("http://{}:{}/__mockforge/routes", proxy_ip, http_addr.port());
1022 if let Ok(response) = reqwest::get(&url).await {
1023 if response.status().is_success() {
1024 if let Ok(body) = response.text().await {
1025 return (StatusCode::OK, [("content-type", "application/json")], body);
1026 }
1027 }
1028 }
1029 }
1030
1031 (
1033 StatusCode::OK,
1034 [("content-type", "application/json")],
1035 r#"{"routes":[]}"#.to_string(),
1036 )
1037}
1038
1039pub async fn get_server_info(State(state): State<AdminState>) -> Json<serde_json::Value> {
1041 Json(json!({
1042 "http_server": state.http_server_addr.map(|addr| addr.to_string()),
1043 "ws_server": state.ws_server_addr.map(|addr| addr.to_string()),
1044 "grpc_server": state.grpc_server_addr.map(|addr| addr.to_string()),
1045 "admin_port": state.admin_port
1046 }))
1047}
1048
1049pub async fn get_health() -> Json<HealthCheck> {
1051 Json(
1052 HealthCheck::healthy()
1053 .with_service("http".to_string(), "healthy".to_string())
1054 .with_service("websocket".to_string(), "healthy".to_string())
1055 .with_service("grpc".to_string(), "healthy".to_string()),
1056 )
1057}
1058
1059pub async fn get_logs(
1061 State(state): State<AdminState>,
1062 Query(params): Query<HashMap<String, String>>,
1063) -> Json<ApiResponse<Vec<RequestLog>>> {
1064 let mut filter = LogFilter::default();
1065
1066 if let Some(method) = params.get("method") {
1067 filter.method = Some(method.clone());
1068 }
1069 if let Some(path) = params.get("path") {
1070 filter.path_pattern = Some(path.clone());
1071 }
1072 if let Some(status) = params.get("status").and_then(|s| s.parse().ok()) {
1073 filter.status_code = Some(status);
1074 }
1075 if let Some(limit) = params.get("limit").and_then(|s| s.parse().ok()) {
1076 filter.limit = Some(limit);
1077 }
1078
1079 let logs = if let Some(global_logger) = mockforge_core::get_global_logger() {
1081 let centralized_logs = global_logger.get_recent_logs(filter.limit).await;
1083
1084 centralized_logs
1086 .into_iter()
1087 .filter(|log| {
1088 if let Some(ref method) = filter.method {
1089 if log.method != *method {
1090 return false;
1091 }
1092 }
1093 if let Some(ref path_pattern) = filter.path_pattern {
1094 if !log.path.contains(path_pattern) {
1095 return false;
1096 }
1097 }
1098 if let Some(status) = filter.status_code {
1099 if log.status_code != status {
1100 return false;
1101 }
1102 }
1103 true
1104 })
1105 .map(|log| RequestLog {
1106 id: log.id,
1107 timestamp: log.timestamp,
1108 method: log.method,
1109 path: log.path,
1110 status_code: log.status_code,
1111 response_time_ms: log.response_time_ms,
1112 client_ip: log.client_ip,
1113 user_agent: log.user_agent,
1114 headers: log.headers,
1115 response_size_bytes: log.response_size_bytes,
1116 error_message: log.error_message,
1117 })
1118 .collect()
1119 } else {
1120 state.get_logs_filtered(&filter).await
1122 };
1123
1124 Json(ApiResponse::success(logs))
1125}
1126
1127pub async fn get_reality_trace(
1131 Path(request_id): Path<String>,
1132) -> Json<ApiResponse<Option<mockforge_core::request_logger::RealityTraceMetadata>>> {
1133 if let Some(global_logger) = mockforge_core::get_global_logger() {
1134 let logs = global_logger.get_recent_logs(None).await;
1135 if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1136 Json(ApiResponse::success(log_entry.reality_metadata))
1137 } else {
1138 Json(ApiResponse::error(format!("Request {} not found", request_id)))
1139 }
1140 } else {
1141 Json(ApiResponse::error("Request logger not initialized".to_string()))
1142 }
1143}
1144
1145pub async fn get_response_trace(
1149 Path(request_id): Path<String>,
1150) -> Json<ApiResponse<Option<serde_json::Value>>> {
1151 if let Some(global_logger) = mockforge_core::get_global_logger() {
1152 let logs = global_logger.get_recent_logs(None).await;
1153 if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
1154 let trace = log_entry
1157 .metadata
1158 .get("response_generation_trace")
1159 .and_then(|s| serde_json::from_str::<serde_json::Value>(s).ok());
1160 Json(ApiResponse::success(trace))
1161 } else {
1162 Json(ApiResponse::error(format!("Request {} not found", request_id)))
1163 }
1164 } else {
1165 Json(ApiResponse::error("Request logger not initialized".to_string()))
1166 }
1167}
1168
1169const RECENT_LOGS_LIMIT: usize = 20;
1171const RECENT_LOGS_TTL_MINUTES: i64 = 5;
1172
1173pub async fn logs_sse(
1175 State(_state): State<AdminState>,
1176) -> Sse<impl Stream<Item = std::result::Result<Event, Infallible>>> {
1177 tracing::info!("SSE endpoint /logs/sse accessed - starting real-time log streaming for recent requests only");
1178
1179 let stream = stream::unfold(std::collections::HashSet::new(), |mut seen_ids| async move {
1180 tokio::time::sleep(Duration::from_millis(500)).await;
1181
1182 if let Some(global_logger) = mockforge_core::get_global_logger() {
1184 let centralized_logs = global_logger.get_recent_logs(Some(RECENT_LOGS_LIMIT)).await;
1185
1186 tracing::debug!(
1187 "SSE: Checking logs - total logs: {}, seen logs: {}",
1188 centralized_logs.len(),
1189 seen_ids.len()
1190 );
1191
1192 let now = Utc::now();
1194 let ttl_cutoff = now - chrono::Duration::minutes(RECENT_LOGS_TTL_MINUTES);
1195
1196 let new_logs: Vec<RequestLog> = centralized_logs
1198 .into_iter()
1199 .filter(|log| {
1200 log.timestamp > ttl_cutoff && !seen_ids.contains(&log.id)
1202 })
1203 .map(|log| RequestLog {
1204 id: log.id,
1205 timestamp: log.timestamp,
1206 method: log.method,
1207 path: log.path,
1208 status_code: log.status_code,
1209 response_time_ms: log.response_time_ms,
1210 client_ip: log.client_ip,
1211 user_agent: log.user_agent,
1212 headers: log.headers,
1213 response_size_bytes: log.response_size_bytes,
1214 error_message: log.error_message,
1215 })
1216 .collect();
1217
1218 for log in &new_logs {
1220 seen_ids.insert(log.id.clone());
1221 }
1222
1223 if !new_logs.is_empty() {
1225 tracing::info!("SSE: Sending {} new logs to client", new_logs.len());
1226
1227 let event_data = serde_json::to_string(&new_logs).unwrap_or_default();
1228 let event = Ok(Event::default().event("new_logs").data(event_data));
1229
1230 return Some((event, seen_ids));
1231 }
1232 }
1233
1234 let event = Ok(Event::default().event("keep_alive").data(""));
1236 Some((event, seen_ids))
1237 });
1238
1239 Sse::new(stream).keep_alive(
1240 axum::response::sse::KeepAlive::new()
1241 .interval(Duration::from_secs(15))
1242 .text("keep-alive-text"),
1243 )
1244}
1245
1246pub async fn get_metrics(State(state): State<AdminState>) -> Json<ApiResponse<MetricsData>> {
1248 let metrics = if let Some(global_logger) = mockforge_core::get_global_logger() {
1250 let all_logs = global_logger.get_recent_logs(None).await;
1251
1252 let total_requests = all_logs.len() as u64;
1253 let mut requests_by_endpoint = HashMap::new();
1254 let mut errors_by_endpoint = HashMap::new();
1255 let mut response_times = Vec::new();
1256 let mut last_request_by_endpoint = HashMap::new();
1257
1258 for log in &all_logs {
1259 let endpoint_key = format!("{} {}", log.method, log.path);
1260 *requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1261
1262 if log.status_code >= 400 {
1263 *errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
1264 }
1265
1266 response_times.push(log.response_time_ms);
1267 last_request_by_endpoint.insert(endpoint_key, log.timestamp);
1268 }
1269
1270 RequestMetrics {
1271 total_requests,
1272 active_connections: 0,
1273 requests_by_endpoint,
1274 response_times,
1275 response_times_by_endpoint: HashMap::new(),
1276 errors_by_endpoint,
1277 last_request_by_endpoint,
1278 }
1279 } else {
1280 state.get_metrics().await
1281 };
1282
1283 let system_metrics = state.get_system_metrics().await;
1284 let time_series = state.get_time_series_data().await;
1285
1286 fn calculate_percentile(sorted_data: &[u64], percentile: f64) -> u64 {
1288 if sorted_data.is_empty() {
1289 return 0;
1290 }
1291 let idx = ((sorted_data.len() as f64) * percentile).ceil() as usize;
1292 let idx = idx.min(sorted_data.len().saturating_sub(1));
1293 sorted_data[idx]
1294 }
1295
1296 let mut response_times = metrics.response_times.clone();
1298 response_times.sort();
1299
1300 let p50 = calculate_percentile(&response_times, 0.50);
1301 let p75 = calculate_percentile(&response_times, 0.75);
1302 let p90 = calculate_percentile(&response_times, 0.90);
1303 let p95 = calculate_percentile(&response_times, 0.95);
1304 let p99 = calculate_percentile(&response_times, 0.99);
1305 let p999 = calculate_percentile(&response_times, 0.999);
1306
1307 let mut response_times_by_endpoint: HashMap<String, Vec<u64>> = HashMap::new();
1309 if let Some(global_logger) = mockforge_core::get_global_logger() {
1310 let all_logs = global_logger.get_recent_logs(None).await;
1311 for log in &all_logs {
1312 let endpoint_key = format!("{} {}", log.method, log.path);
1313 response_times_by_endpoint
1314 .entry(endpoint_key)
1315 .or_default()
1316 .push(log.response_time_ms);
1317 }
1318 }
1319
1320 let mut endpoint_percentiles: HashMap<String, HashMap<String, u64>> = HashMap::new();
1322 for (endpoint, times) in &mut response_times_by_endpoint {
1323 times.sort();
1324 if !times.is_empty() {
1325 endpoint_percentiles.insert(
1326 endpoint.clone(),
1327 HashMap::from([
1328 ("p50".to_string(), calculate_percentile(times, 0.50)),
1329 ("p75".to_string(), calculate_percentile(times, 0.75)),
1330 ("p90".to_string(), calculate_percentile(times, 0.90)),
1331 ("p95".to_string(), calculate_percentile(times, 0.95)),
1332 ("p99".to_string(), calculate_percentile(times, 0.99)),
1333 ("p999".to_string(), calculate_percentile(times, 0.999)),
1334 ]),
1335 );
1336 }
1337 }
1338
1339 let mut error_rate_by_endpoint = HashMap::new();
1341 for (endpoint, total_count) in &metrics.requests_by_endpoint {
1342 let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
1343 let error_rate = if *total_count > 0 {
1344 error_count as f64 / *total_count as f64
1345 } else {
1346 0.0
1347 };
1348 error_rate_by_endpoint.insert(endpoint.clone(), error_rate);
1349 }
1350
1351 let memory_usage_over_time = if time_series.memory_usage.is_empty() {
1354 vec![(Utc::now(), system_metrics.memory_usage_mb)]
1355 } else {
1356 time_series
1357 .memory_usage
1358 .iter()
1359 .map(|point| (point.timestamp, point.value as u64))
1360 .collect()
1361 };
1362
1363 let cpu_usage_over_time = if time_series.cpu_usage.is_empty() {
1364 vec![(Utc::now(), system_metrics.cpu_usage_percent)]
1365 } else {
1366 time_series
1367 .cpu_usage
1368 .iter()
1369 .map(|point| (point.timestamp, point.value))
1370 .collect()
1371 };
1372
1373 let latency_over_time: Vec<(chrono::DateTime<Utc>, u64)> =
1375 if let Some(global_logger) = mockforge_core::get_global_logger() {
1376 let all_logs = global_logger.get_recent_logs(Some(100)).await;
1377 all_logs.iter().map(|log| (log.timestamp, log.response_time_ms)).collect()
1378 } else {
1379 Vec::new()
1380 };
1381
1382 let metrics_data = MetricsData {
1383 requests_by_endpoint: metrics.requests_by_endpoint,
1384 response_time_percentiles: HashMap::from([
1385 ("p50".to_string(), p50),
1386 ("p75".to_string(), p75),
1387 ("p90".to_string(), p90),
1388 ("p95".to_string(), p95),
1389 ("p99".to_string(), p99),
1390 ("p999".to_string(), p999),
1391 ]),
1392 endpoint_percentiles: Some(endpoint_percentiles),
1393 latency_over_time: Some(latency_over_time),
1394 error_rate_by_endpoint,
1395 memory_usage_over_time,
1396 cpu_usage_over_time,
1397 };
1398
1399 Json(ApiResponse::success(metrics_data))
1400}
1401
1402pub async fn update_latency(
1404 State(state): State<AdminState>,
1405 headers: http::HeaderMap,
1406 Json(update): Json<ConfigUpdate>,
1407) -> Json<ApiResponse<String>> {
1408 use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1409 use crate::rbac::{extract_user_context, get_default_user_context};
1410
1411 if update.config_type != "latency" {
1412 return Json(ApiResponse::error("Invalid config type".to_string()));
1413 }
1414
1415 let base_ms = update.data.get("base_ms").and_then(|v| v.as_u64()).unwrap_or(50);
1417 let jitter_ms = update.data.get("jitter_ms").and_then(|v| v.as_u64()).unwrap_or(20);
1418
1419 let tag_overrides: HashMap<String, u64> = update
1420 .data
1421 .get("tag_overrides")
1422 .and_then(|v| v.as_object())
1423 .map(|obj| obj.iter().filter_map(|(k, v)| v.as_u64().map(|val| (k.clone(), val))).collect())
1424 .unwrap_or_default();
1425
1426 state.update_latency_config(base_ms, jitter_ms, tag_overrides.clone()).await;
1428
1429 if let Some(audit_store) = get_global_audit_store() {
1431 let metadata = serde_json::json!({
1432 "base_ms": base_ms,
1433 "jitter_ms": jitter_ms,
1434 "tag_overrides": tag_overrides,
1435 });
1436 let mut audit_log = create_audit_log(
1437 AdminActionType::ConfigLatencyUpdated,
1438 format!("Latency profile updated: base_ms={}, jitter_ms={}", base_ms, jitter_ms),
1439 None,
1440 true,
1441 None,
1442 Some(metadata),
1443 );
1444
1445 if let Some(user_ctx) = extract_user_context(&headers).or_else(get_default_user_context) {
1447 audit_log.user_id = Some(user_ctx.user_id);
1448 audit_log.username = Some(user_ctx.username);
1449 }
1450
1451 if let Some(ip) = headers
1453 .get("x-forwarded-for")
1454 .or_else(|| headers.get("x-real-ip"))
1455 .and_then(|h| h.to_str().ok())
1456 {
1457 audit_log.ip_address = Some(ip.to_string());
1458 }
1459
1460 if let Some(ua) = headers.get("user-agent").and_then(|h| h.to_str().ok()) {
1462 audit_log.user_agent = Some(ua.to_string());
1463 }
1464
1465 audit_store.record(audit_log).await;
1466 }
1467
1468 tracing::info!("Updated latency profile: base_ms={}, jitter_ms={}", base_ms, jitter_ms);
1469
1470 Json(ApiResponse::success("Latency profile updated".to_string()))
1471}
1472
1473pub async fn update_faults(
1475 State(state): State<AdminState>,
1476 Json(update): Json<ConfigUpdate>,
1477) -> Json<ApiResponse<String>> {
1478 if update.config_type != "faults" {
1479 return Json(ApiResponse::error("Invalid config type".to_string()));
1480 }
1481
1482 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1484
1485 let failure_rate = update.data.get("failure_rate").and_then(|v| v.as_f64()).unwrap_or(0.0);
1486
1487 let status_codes = update
1488 .data
1489 .get("status_codes")
1490 .and_then(|v| v.as_array())
1491 .map(|arr| arr.iter().filter_map(|v| v.as_u64().map(|n| n as u16)).collect())
1492 .unwrap_or_else(|| vec![500, 502, 503]);
1493
1494 state.update_fault_config(enabled, failure_rate, status_codes).await;
1496
1497 tracing::info!(
1498 "Updated fault configuration: enabled={}, failure_rate={}",
1499 enabled,
1500 failure_rate
1501 );
1502
1503 Json(ApiResponse::success("Fault configuration updated".to_string()))
1504}
1505
1506pub async fn update_proxy(
1508 State(state): State<AdminState>,
1509 Json(update): Json<ConfigUpdate>,
1510) -> Json<ApiResponse<String>> {
1511 if update.config_type != "proxy" {
1512 return Json(ApiResponse::error("Invalid config type".to_string()));
1513 }
1514
1515 let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
1517
1518 let upstream_url =
1519 update.data.get("upstream_url").and_then(|v| v.as_str()).map(|s| s.to_string());
1520
1521 let timeout_seconds = update.data.get("timeout_seconds").and_then(|v| v.as_u64()).unwrap_or(30);
1522
1523 state.update_proxy_config(enabled, upstream_url.clone(), timeout_seconds).await;
1525
1526 tracing::info!(
1527 "Updated proxy configuration: enabled={}, upstream_url={:?}",
1528 enabled,
1529 upstream_url
1530 );
1531
1532 Json(ApiResponse::success("Proxy configuration updated".to_string()))
1533}
1534
1535pub async fn clear_logs(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1537 state.clear_logs().await;
1539 tracing::info!("Cleared all request logs");
1540
1541 Json(ApiResponse::success("Logs cleared".to_string()))
1542}
1543
1544pub async fn restart_servers(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
1546 use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
1547 let current_status = state.get_restart_status().await;
1549 if current_status.in_progress {
1550 return Json(ApiResponse::error("Server restart already in progress".to_string()));
1551 }
1552
1553 let restart_result = state
1555 .initiate_restart("Manual restart requested via admin UI".to_string())
1556 .await;
1557
1558 let success = restart_result.is_ok();
1559 let error_msg = restart_result.as_ref().err().map(|e| format!("{}", e));
1560
1561 if let Some(audit_store) = get_global_audit_store() {
1563 let audit_log = create_audit_log(
1564 AdminActionType::ServerRestarted,
1565 "Server restart initiated via admin UI".to_string(),
1566 None,
1567 success,
1568 error_msg.clone(),
1569 None,
1570 );
1571 audit_store.record(audit_log).await;
1572 }
1573
1574 if let Err(e) = restart_result {
1575 return Json(ApiResponse::error(format!("Failed to initiate restart: {}", e)));
1576 }
1577
1578 let state_clone = state.clone();
1580 tokio::spawn(async move {
1581 if let Err(e) = perform_server_restart(&state_clone).await {
1582 tracing::error!("Server restart failed: {}", e);
1583 state_clone.complete_restart(false).await;
1584 } else {
1585 tracing::info!("Server restart completed successfully");
1586 state_clone.complete_restart(true).await;
1587 }
1588 });
1589
1590 tracing::info!("Server restart initiated via admin UI");
1591 Json(ApiResponse::success(
1592 "Server restart initiated. Please wait for completion.".to_string(),
1593 ))
1594}
1595
1596async fn perform_server_restart(_state: &AdminState) -> Result<()> {
1598 let current_pid = std::process::id();
1600 tracing::info!("Initiating restart for process PID: {}", current_pid);
1601
1602 let parent_pid = get_parent_process_id(current_pid).await?;
1604 tracing::info!("Found parent process PID: {}", parent_pid);
1605
1606 if let Ok(()) = restart_via_parent_signal(parent_pid).await {
1608 tracing::info!("Restart initiated via parent process signal");
1609 return Ok(());
1610 }
1611
1612 if let Ok(()) = restart_via_process_replacement().await {
1614 tracing::info!("Restart initiated via process replacement");
1615 return Ok(());
1616 }
1617
1618 restart_via_script().await
1620}
1621
1622async fn get_parent_process_id(pid: u32) -> Result<u32> {
1624 #[cfg(target_os = "linux")]
1626 {
1627 let stat_path = format!("/proc/{}/stat", pid);
1629 if let Ok(ppid) = tokio::task::spawn_blocking(move || -> Result<u32> {
1630 let content = std::fs::read_to_string(&stat_path)
1631 .map_err(|e| Error::generic(format!("Failed to read {}: {}", stat_path, e)))?;
1632
1633 let fields: Vec<&str> = content.split_whitespace().collect();
1634 if fields.len() > 3 {
1635 fields[3]
1636 .parse::<u32>()
1637 .map_err(|e| Error::generic(format!("Failed to parse PPID: {}", e)))
1638 } else {
1639 Err(Error::generic("Insufficient fields in /proc/pid/stat".to_string()))
1640 }
1641 })
1642 .await
1643 {
1644 return ppid;
1645 }
1646 }
1647
1648 Ok(1) }
1651
1652async fn restart_via_parent_signal(parent_pid: u32) -> Result<()> {
1654 #[cfg(unix)]
1655 {
1656 use std::process::Command;
1657
1658 let output = Command::new("kill")
1660 .args(["-TERM", &parent_pid.to_string()])
1661 .output()
1662 .map_err(|e| Error::generic(format!("Failed to send signal: {}", e)))?;
1663
1664 if !output.status.success() {
1665 return Err(Error::generic(
1666 "Failed to send restart signal to parent process".to_string(),
1667 ));
1668 }
1669
1670 tokio::time::sleep(Duration::from_millis(100)).await;
1672 Ok(())
1673 }
1674
1675 #[cfg(not(unix))]
1676 {
1677 Err(Error::generic(
1678 "Signal-based restart not supported on this platform".to_string(),
1679 ))
1680 }
1681}
1682
1683async fn restart_via_process_replacement() -> Result<()> {
1685 let current_exe = std::env::current_exe()
1687 .map_err(|e| Error::generic(format!("Failed to get current executable: {}", e)))?;
1688
1689 let args: Vec<String> = std::env::args().collect();
1691
1692 tracing::info!("Restarting with command: {:?}", args);
1693
1694 let mut child = Command::new(¤t_exe)
1696 .args(&args[1..]) .stdout(Stdio::inherit())
1698 .stderr(Stdio::inherit())
1699 .spawn()
1700 .map_err(|e| Error::generic(format!("Failed to start new process: {}", e)))?;
1701
1702 tokio::time::sleep(Duration::from_millis(500)).await;
1704
1705 match child.try_wait() {
1707 Ok(Some(status)) => {
1708 if status.success() {
1709 tracing::info!("New process started successfully");
1710 Ok(())
1711 } else {
1712 Err(Error::generic("New process exited with error".to_string()))
1713 }
1714 }
1715 Ok(None) => {
1716 tracing::info!("New process is running, exiting current process");
1717 std::process::exit(0);
1719 }
1720 Err(e) => Err(Error::generic(format!("Failed to check new process status: {}", e))),
1721 }
1722}
1723
1724async fn restart_via_script() -> Result<()> {
1726 let script_paths = ["./scripts/restart.sh", "./restart.sh", "restart.sh"];
1728
1729 for script_path in &script_paths {
1730 if std::path::Path::new(script_path).exists() {
1731 tracing::info!("Using restart script: {}", script_path);
1732
1733 let output = Command::new("bash")
1734 .arg(script_path)
1735 .output()
1736 .map_err(|e| Error::generic(format!("Failed to execute restart script: {}", e)))?;
1737
1738 if output.status.success() {
1739 return Ok(());
1740 } else {
1741 tracing::warn!(
1742 "Restart script failed: {}",
1743 String::from_utf8_lossy(&output.stderr)
1744 );
1745 }
1746 }
1747 }
1748
1749 let clear_script = "./scripts/clear-ports.sh";
1751 if std::path::Path::new(clear_script).exists() {
1752 tracing::info!("Using clear-ports script as fallback");
1753
1754 let _ = Command::new("bash").arg(clear_script).output();
1755 }
1756
1757 Err(Error::generic(
1758 "No restart mechanism available. Please restart manually.".to_string(),
1759 ))
1760}
1761
1762pub async fn get_restart_status(
1764 State(state): State<AdminState>,
1765) -> Json<ApiResponse<RestartStatus>> {
1766 let status = state.get_restart_status().await;
1767 Json(ApiResponse::success(status))
1768}
1769
1770pub async fn get_audit_logs(
1772 Query(params): Query<HashMap<String, String>>,
1773) -> Json<ApiResponse<Vec<crate::audit::AdminAuditLog>>> {
1774 use crate::audit::{get_global_audit_store, AdminActionType};
1775
1776 let action_type_str = params.get("action_type");
1777 let user_id = params.get("user_id").map(|s| s.as_str());
1778 let limit = params.get("limit").and_then(|s| s.parse::<usize>().ok());
1779 let offset = params.get("offset").and_then(|s| s.parse::<usize>().ok());
1780
1781 let action_type = action_type_str.and_then(|s| {
1783 match s.as_str() {
1785 "config_latency_updated" => Some(AdminActionType::ConfigLatencyUpdated),
1786 "config_faults_updated" => Some(AdminActionType::ConfigFaultsUpdated),
1787 "server_restarted" => Some(AdminActionType::ServerRestarted),
1788 "logs_cleared" => Some(AdminActionType::LogsCleared),
1789 _ => None,
1790 }
1791 });
1792
1793 if let Some(audit_store) = get_global_audit_store() {
1794 let logs = audit_store.get_logs(action_type, user_id, limit, offset).await;
1795 Json(ApiResponse::success(logs))
1796 } else {
1797 Json(ApiResponse::error("Audit logging not initialized".to_string()))
1798 }
1799}
1800
1801pub async fn get_audit_stats() -> Json<ApiResponse<crate::audit::AuditLogStats>> {
1803 use crate::audit::get_global_audit_store;
1804
1805 if let Some(audit_store) = get_global_audit_store() {
1806 let stats = audit_store.get_stats().await;
1807 Json(ApiResponse::success(stats))
1808 } else {
1809 Json(ApiResponse::error("Audit logging not initialized".to_string()))
1810 }
1811}
1812
1813pub async fn get_config(State(state): State<AdminState>) -> Json<ApiResponse<serde_json::Value>> {
1815 let config_state = state.get_config().await;
1816
1817 let config = json!({
1818 "latency": {
1819 "enabled": true,
1820 "base_ms": config_state.latency_profile.base_ms,
1821 "jitter_ms": config_state.latency_profile.jitter_ms,
1822 "tag_overrides": config_state.latency_profile.tag_overrides
1823 },
1824 "faults": {
1825 "enabled": config_state.fault_config.enabled,
1826 "failure_rate": config_state.fault_config.failure_rate,
1827 "status_codes": config_state.fault_config.status_codes
1828 },
1829 "proxy": {
1830 "enabled": config_state.proxy_config.enabled,
1831 "upstream_url": config_state.proxy_config.upstream_url,
1832 "timeout_seconds": config_state.proxy_config.timeout_seconds
1833 },
1834 "traffic_shaping": {
1835 "enabled": config_state.traffic_shaping.enabled,
1836 "bandwidth": config_state.traffic_shaping.bandwidth,
1837 "burst_loss": config_state.traffic_shaping.burst_loss
1838 },
1839 "validation": {
1840 "mode": config_state.validation_settings.mode,
1841 "aggregate_errors": config_state.validation_settings.aggregate_errors,
1842 "validate_responses": config_state.validation_settings.validate_responses,
1843 "overrides": config_state.validation_settings.overrides
1844 }
1845 });
1846
1847 Json(ApiResponse::success(config))
1848}
1849
1850pub fn count_fixtures() -> Result<usize> {
1852 let fixtures_dir =
1854 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1855 let fixtures_path = std::path::Path::new(&fixtures_dir);
1856
1857 if !fixtures_path.exists() {
1858 return Ok(0);
1859 }
1860
1861 let mut total_count = 0;
1862
1863 let http_fixtures_path = fixtures_path.join("http");
1865 if http_fixtures_path.exists() {
1866 total_count += count_fixtures_in_directory(&http_fixtures_path)?;
1867 }
1868
1869 let ws_fixtures_path = fixtures_path.join("websocket");
1871 if ws_fixtures_path.exists() {
1872 total_count += count_fixtures_in_directory(&ws_fixtures_path)?;
1873 }
1874
1875 let grpc_fixtures_path = fixtures_path.join("grpc");
1877 if grpc_fixtures_path.exists() {
1878 total_count += count_fixtures_in_directory(&grpc_fixtures_path)?;
1879 }
1880
1881 Ok(total_count)
1882}
1883
1884fn count_fixtures_in_directory(dir_path: &std::path::Path) -> Result<usize> {
1886 let mut count = 0;
1887
1888 if let Ok(entries) = std::fs::read_dir(dir_path) {
1889 for entry in entries {
1890 let entry = entry
1891 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
1892 let path = entry.path();
1893
1894 if path.is_dir() {
1895 count += count_fixtures_in_directory(&path)?;
1897 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
1898 count += 1;
1900 }
1901 }
1902 }
1903
1904 Ok(count)
1905}
1906
1907pub fn route_has_fixtures(method: &str, path: &str) -> bool {
1909 let fixtures_dir =
1911 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
1912 let fixtures_path = std::path::Path::new(&fixtures_dir);
1913
1914 if !fixtures_path.exists() {
1915 return false;
1916 }
1917
1918 let method_lower = method.to_lowercase();
1920 let path_hash = path.replace(['/', ':'], "_");
1921 let http_fixtures_path = fixtures_path.join("http").join(&method_lower).join(&path_hash);
1922
1923 if http_fixtures_path.exists() {
1924 if let Ok(entries) = std::fs::read_dir(&http_fixtures_path) {
1926 for entry in entries.flatten() {
1927 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1928 return true;
1929 }
1930 }
1931 }
1932 }
1933
1934 if method.to_uppercase() == "WS" {
1936 let ws_fixtures_path = fixtures_path.join("websocket").join(&path_hash);
1937
1938 if ws_fixtures_path.exists() {
1939 if let Ok(entries) = std::fs::read_dir(&ws_fixtures_path) {
1940 for entry in entries.flatten() {
1941 if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
1942 return true;
1943 }
1944 }
1945 }
1946 }
1947 }
1948
1949 false
1950}
1951
1952fn calculate_endpoint_latency(metrics: &RequestMetrics, endpoint: &str) -> Option<u64> {
1954 metrics.response_times_by_endpoint.get(endpoint).and_then(|times| {
1955 if times.is_empty() {
1956 None
1957 } else {
1958 let sum: u64 = times.iter().sum();
1959 Some(sum / times.len() as u64)
1960 }
1961 })
1962}
1963
1964fn get_endpoint_last_request(
1966 metrics: &RequestMetrics,
1967 endpoint: &str,
1968) -> Option<chrono::DateTime<Utc>> {
1969 metrics.last_request_by_endpoint.get(endpoint).copied()
1970}
1971
1972fn count_requests_by_server_type(metrics: &RequestMetrics, server_type: &str) -> u64 {
1974 match server_type {
1975 "HTTP" => {
1976 metrics
1978 .requests_by_endpoint
1979 .iter()
1980 .filter(|(endpoint, _)| {
1981 let method = endpoint.split(' ').next().unwrap_or("");
1982 matches!(
1983 method,
1984 "GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "HEAD" | "OPTIONS"
1985 )
1986 })
1987 .map(|(_, count)| count)
1988 .sum()
1989 }
1990 "WebSocket" => {
1991 metrics
1993 .requests_by_endpoint
1994 .iter()
1995 .filter(|(endpoint, _)| {
1996 let method = endpoint.split(' ').next().unwrap_or("");
1997 method == "WS"
1998 })
1999 .map(|(_, count)| count)
2000 .sum()
2001 }
2002 "gRPC" => {
2003 metrics
2005 .requests_by_endpoint
2006 .iter()
2007 .filter(|(endpoint, _)| {
2008 let method = endpoint.split(' ').next().unwrap_or("");
2009 method == "gRPC"
2010 })
2011 .map(|(_, count)| count)
2012 .sum()
2013 }
2014 _ => 0,
2015 }
2016}
2017
2018pub async fn get_fixtures() -> Json<ApiResponse<Vec<FixtureInfo>>> {
2020 match scan_fixtures_directory() {
2021 Ok(fixtures) => Json(ApiResponse::success(fixtures)),
2022 Err(e) => {
2023 tracing::error!("Failed to scan fixtures directory: {}", e);
2024 Json(ApiResponse::error(format!("Failed to load fixtures: {}", e)))
2025 }
2026 }
2027}
2028
2029fn scan_fixtures_directory() -> Result<Vec<FixtureInfo>> {
2031 let fixtures_dir =
2032 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2033 let fixtures_path = std::path::Path::new(&fixtures_dir);
2034
2035 if !fixtures_path.exists() {
2036 tracing::warn!("Fixtures directory does not exist: {}", fixtures_dir);
2037 return Ok(Vec::new());
2038 }
2039
2040 let mut all_fixtures = Vec::new();
2041
2042 let http_fixtures = scan_protocol_fixtures(fixtures_path, "http")?;
2044 all_fixtures.extend(http_fixtures);
2045
2046 let ws_fixtures = scan_protocol_fixtures(fixtures_path, "websocket")?;
2048 all_fixtures.extend(ws_fixtures);
2049
2050 let grpc_fixtures = scan_protocol_fixtures(fixtures_path, "grpc")?;
2052 all_fixtures.extend(grpc_fixtures);
2053
2054 all_fixtures.sort_by(|a, b| b.saved_at.cmp(&a.saved_at));
2056
2057 tracing::info!("Found {} fixtures in directory: {}", all_fixtures.len(), fixtures_dir);
2058 Ok(all_fixtures)
2059}
2060
2061fn scan_protocol_fixtures(
2063 fixtures_path: &std::path::Path,
2064 protocol: &str,
2065) -> Result<Vec<FixtureInfo>> {
2066 let protocol_path = fixtures_path.join(protocol);
2067 let mut fixtures = Vec::new();
2068
2069 if !protocol_path.exists() {
2070 return Ok(fixtures);
2071 }
2072
2073 if let Ok(entries) = std::fs::read_dir(&protocol_path) {
2075 for entry in entries {
2076 let entry = entry
2077 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2078 let path = entry.path();
2079
2080 if path.is_dir() {
2081 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2083 fixtures.extend(sub_fixtures);
2084 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2085 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2087 fixtures.push(fixture);
2088 }
2089 }
2090 }
2091 }
2092
2093 Ok(fixtures)
2094}
2095
2096fn scan_directory_recursive(
2098 dir_path: &std::path::Path,
2099 protocol: &str,
2100) -> Result<Vec<FixtureInfo>> {
2101 let mut fixtures = Vec::new();
2102
2103 if let Ok(entries) = std::fs::read_dir(dir_path) {
2104 for entry in entries {
2105 let entry = entry
2106 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2107 let path = entry.path();
2108
2109 if path.is_dir() {
2110 let sub_fixtures = scan_directory_recursive(&path, protocol)?;
2112 fixtures.extend(sub_fixtures);
2113 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2114 if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
2116 fixtures.push(fixture);
2117 }
2118 }
2119 }
2120 }
2121
2122 Ok(fixtures)
2123}
2124
2125fn parse_fixture_file_sync(file_path: &std::path::Path, protocol: &str) -> Result<FixtureInfo> {
2127 let metadata = std::fs::metadata(file_path)
2129 .map_err(|e| Error::generic(format!("Failed to read file metadata: {}", e)))?;
2130
2131 let file_size = metadata.len();
2132 let modified_time = metadata
2133 .modified()
2134 .map_err(|e| Error::generic(format!("Failed to get file modification time: {}", e)))?;
2135
2136 let saved_at = chrono::DateTime::from(modified_time);
2137
2138 let content = std::fs::read_to_string(file_path)
2140 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2141
2142 let fixture_data: serde_json::Value = serde_json::from_str(&content)
2143 .map_err(|e| Error::generic(format!("Failed to parse fixture JSON: {}", e)))?;
2144
2145 let (method, path) = extract_method_and_path(&fixture_data, protocol)?;
2147
2148 let id = generate_fixture_id(file_path, &content);
2150
2151 let fingerprint = extract_fingerprint(file_path, &fixture_data)?;
2153
2154 let fixtures_dir =
2156 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2157 let fixtures_path = std::path::Path::new(&fixtures_dir);
2158 let file_path_str = file_path
2159 .strip_prefix(fixtures_path)
2160 .unwrap_or(file_path)
2161 .to_string_lossy()
2162 .to_string();
2163
2164 Ok(FixtureInfo {
2165 id,
2166 protocol: protocol.to_string(),
2167 method,
2168 path,
2169 saved_at,
2170 file_size,
2171 file_path: file_path_str,
2172 fingerprint,
2173 metadata: fixture_data,
2174 })
2175}
2176
2177fn extract_method_and_path(
2179 fixture_data: &serde_json::Value,
2180 protocol: &str,
2181) -> Result<(String, String)> {
2182 match protocol {
2183 "http" => {
2184 let method = fixture_data
2186 .get("request")
2187 .and_then(|req| req.get("method"))
2188 .and_then(|m| m.as_str())
2189 .unwrap_or("UNKNOWN")
2190 .to_uppercase();
2191
2192 let path = fixture_data
2193 .get("request")
2194 .and_then(|req| req.get("path"))
2195 .and_then(|p| p.as_str())
2196 .unwrap_or("/unknown")
2197 .to_string();
2198
2199 Ok((method, path))
2200 }
2201 "websocket" => {
2202 let path = fixture_data
2204 .get("path")
2205 .and_then(|p| p.as_str())
2206 .or_else(|| {
2207 fixture_data
2208 .get("request")
2209 .and_then(|req| req.get("path"))
2210 .and_then(|p| p.as_str())
2211 })
2212 .unwrap_or("/ws")
2213 .to_string();
2214
2215 Ok(("WS".to_string(), path))
2216 }
2217 "grpc" => {
2218 let service =
2220 fixture_data.get("service").and_then(|s| s.as_str()).unwrap_or("UnknownService");
2221
2222 let method =
2223 fixture_data.get("method").and_then(|m| m.as_str()).unwrap_or("UnknownMethod");
2224
2225 let path = format!("/{}/{}", service, method);
2226 Ok(("gRPC".to_string(), path))
2227 }
2228 _ => {
2229 let path = fixture_data
2230 .get("path")
2231 .and_then(|p| p.as_str())
2232 .unwrap_or("/unknown")
2233 .to_string();
2234 Ok((protocol.to_uppercase(), path))
2235 }
2236 }
2237}
2238
2239fn generate_fixture_id(file_path: &std::path::Path, content: &str) -> String {
2241 use std::collections::hash_map::DefaultHasher;
2242 use std::hash::{Hash, Hasher};
2243
2244 let mut hasher = DefaultHasher::new();
2245 file_path.hash(&mut hasher);
2246 content.hash(&mut hasher);
2247 format!("fixture_{:x}", hasher.finish())
2248}
2249
2250fn extract_fingerprint(
2252 file_path: &std::path::Path,
2253 fixture_data: &serde_json::Value,
2254) -> Result<String> {
2255 if let Some(fingerprint) = fixture_data.get("fingerprint").and_then(|f| f.as_str()) {
2257 return Ok(fingerprint.to_string());
2258 }
2259
2260 if let Some(file_name) = file_path.file_stem().and_then(|s| s.to_str()) {
2262 if let Some(hash) = file_name.split('_').next_back() {
2264 if hash.len() >= 8 && hash.chars().all(|c| c.is_alphanumeric()) {
2265 return Ok(hash.to_string());
2266 }
2267 }
2268 }
2269
2270 use std::collections::hash_map::DefaultHasher;
2272 use std::hash::{Hash, Hasher};
2273
2274 let mut hasher = DefaultHasher::new();
2275 file_path.hash(&mut hasher);
2276 Ok(format!("{:x}", hasher.finish()))
2277}
2278
2279pub async fn delete_fixture(
2281 Json(payload): Json<FixtureDeleteRequest>,
2282) -> Json<ApiResponse<String>> {
2283 match delete_fixture_by_id(&payload.fixture_id).await {
2284 Ok(_) => {
2285 tracing::info!("Successfully deleted fixture: {}", payload.fixture_id);
2286 Json(ApiResponse::success("Fixture deleted successfully".to_string()))
2287 }
2288 Err(e) => {
2289 tracing::error!("Failed to delete fixture {}: {}", payload.fixture_id, e);
2290 Json(ApiResponse::error(format!("Failed to delete fixture: {}", e)))
2291 }
2292 }
2293}
2294
2295pub async fn delete_fixtures_bulk(
2297 Json(payload): Json<FixtureBulkDeleteRequest>,
2298) -> Json<ApiResponse<FixtureBulkDeleteResult>> {
2299 let mut deleted_count = 0;
2300 let mut errors = Vec::new();
2301
2302 for fixture_id in &payload.fixture_ids {
2303 match delete_fixture_by_id(fixture_id).await {
2304 Ok(_) => {
2305 deleted_count += 1;
2306 tracing::info!("Successfully deleted fixture: {}", fixture_id);
2307 }
2308 Err(e) => {
2309 errors.push(format!("Failed to delete {}: {}", fixture_id, e));
2310 tracing::error!("Failed to delete fixture {}: {}", fixture_id, e);
2311 }
2312 }
2313 }
2314
2315 let result = FixtureBulkDeleteResult {
2316 deleted_count,
2317 total_requested: payload.fixture_ids.len(),
2318 errors: errors.clone(),
2319 };
2320
2321 if errors.is_empty() {
2322 Json(ApiResponse::success(result))
2323 } else {
2324 Json(ApiResponse::error(format!(
2325 "Partial success: {} deleted, {} errors",
2326 deleted_count,
2327 errors.len()
2328 )))
2329 }
2330}
2331
2332async fn delete_fixture_by_id(fixture_id: &str) -> Result<()> {
2334 let fixtures_dir =
2337 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2338 let fixtures_path = std::path::Path::new(&fixtures_dir);
2339
2340 if !fixtures_path.exists() {
2341 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2342 }
2343
2344 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2346
2347 let file_path_clone = file_path.clone();
2349 tokio::task::spawn_blocking(move || {
2350 if file_path_clone.exists() {
2351 std::fs::remove_file(&file_path_clone).map_err(|e| {
2352 Error::generic(format!(
2353 "Failed to delete fixture file {}: {}",
2354 file_path_clone.display(),
2355 e
2356 ))
2357 })
2358 } else {
2359 Err(Error::generic(format!("Fixture file not found: {}", file_path_clone.display())))
2360 }
2361 })
2362 .await
2363 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2364
2365 tracing::info!("Deleted fixture file: {}", file_path.display());
2366
2367 cleanup_empty_directories(&file_path).await;
2369
2370 Ok(())
2371}
2372
2373fn find_fixture_file_by_id(
2375 fixtures_path: &std::path::Path,
2376 fixture_id: &str,
2377) -> Result<std::path::PathBuf> {
2378 let protocols = ["http", "websocket", "grpc"];
2380
2381 for protocol in &protocols {
2382 let protocol_path = fixtures_path.join(protocol);
2383 if let Ok(found_path) = search_fixture_in_directory(&protocol_path, fixture_id) {
2384 return Ok(found_path);
2385 }
2386 }
2387
2388 Err(Error::generic(format!(
2389 "Fixture with ID '{}' not found in any protocol directory",
2390 fixture_id
2391 )))
2392}
2393
2394fn search_fixture_in_directory(
2396 dir_path: &std::path::Path,
2397 fixture_id: &str,
2398) -> Result<std::path::PathBuf> {
2399 if !dir_path.exists() {
2400 return Err(Error::generic(format!("Directory does not exist: {}", dir_path.display())));
2401 }
2402
2403 if let Ok(entries) = std::fs::read_dir(dir_path) {
2404 for entry in entries {
2405 let entry = entry
2406 .map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
2407 let path = entry.path();
2408
2409 if path.is_dir() {
2410 if let Ok(found_path) = search_fixture_in_directory(&path, fixture_id) {
2412 return Ok(found_path);
2413 }
2414 } else if path.extension().and_then(|s| s.to_str()) == Some("json") {
2415 if let Ok(fixture_info) = parse_fixture_file_sync(&path, "unknown") {
2417 if fixture_info.id == fixture_id {
2418 return Ok(path);
2419 }
2420 }
2421 }
2422 }
2423 }
2424
2425 Err(Error::generic(format!(
2426 "Fixture not found in directory: {}",
2427 dir_path.display()
2428 )))
2429}
2430
2431async fn cleanup_empty_directories(file_path: &std::path::Path) {
2433 let file_path = file_path.to_path_buf();
2434
2435 let _ = tokio::task::spawn_blocking(move || {
2437 if let Some(parent) = file_path.parent() {
2438 let mut current = parent;
2440 let fixtures_dir =
2441 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2442 let fixtures_path = std::path::Path::new(&fixtures_dir);
2443
2444 while current != fixtures_path && current.parent().is_some() {
2445 if let Ok(entries) = std::fs::read_dir(current) {
2446 if entries.count() == 0 {
2447 if let Err(e) = std::fs::remove_dir(current) {
2448 tracing::debug!(
2449 "Failed to remove empty directory {}: {}",
2450 current.display(),
2451 e
2452 );
2453 break;
2454 } else {
2455 tracing::debug!("Removed empty directory: {}", current.display());
2456 }
2457 } else {
2458 break;
2459 }
2460 } else {
2461 break;
2462 }
2463
2464 if let Some(next_parent) = current.parent() {
2465 current = next_parent;
2466 } else {
2467 break;
2468 }
2469 }
2470 }
2471 })
2472 .await;
2473}
2474
2475pub async fn download_fixture(Path(fixture_id): Path<String>) -> impl IntoResponse {
2477 match download_fixture_by_id(&fixture_id).await {
2479 Ok((content, file_name)) => (
2480 StatusCode::OK,
2481 [
2482 (http::header::CONTENT_TYPE, "application/json".to_string()),
2483 (
2484 http::header::CONTENT_DISPOSITION,
2485 format!("attachment; filename=\"{}\"", file_name),
2486 ),
2487 ],
2488 content,
2489 )
2490 .into_response(),
2491 Err(e) => {
2492 tracing::error!("Failed to download fixture {}: {}", fixture_id, e);
2493 let error_response = format!(r#"{{"error": "Failed to download fixture: {}"}}"#, e);
2494 (
2495 StatusCode::NOT_FOUND,
2496 [(http::header::CONTENT_TYPE, "application/json".to_string())],
2497 error_response,
2498 )
2499 .into_response()
2500 }
2501 }
2502}
2503
2504async fn download_fixture_by_id(fixture_id: &str) -> Result<(String, String)> {
2506 let fixtures_dir =
2508 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2509 let fixtures_path = std::path::Path::new(&fixtures_dir);
2510
2511 if !fixtures_path.exists() {
2512 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2513 }
2514
2515 let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2516
2517 let file_path_clone = file_path.clone();
2519 let (content, file_name) = tokio::task::spawn_blocking(move || {
2520 let content = std::fs::read_to_string(&file_path_clone)
2521 .map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
2522
2523 let file_name = file_path_clone
2524 .file_name()
2525 .and_then(|name| name.to_str())
2526 .unwrap_or("fixture.json")
2527 .to_string();
2528
2529 Ok::<_, Error>((content, file_name))
2530 })
2531 .await
2532 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2533
2534 tracing::info!("Downloaded fixture file: {} ({} bytes)", file_path.display(), content.len());
2535 Ok((content, file_name))
2536}
2537
2538pub async fn rename_fixture(
2540 Path(fixture_id): Path<String>,
2541 Json(payload): Json<FixtureRenameRequest>,
2542) -> Json<ApiResponse<String>> {
2543 match rename_fixture_by_id(&fixture_id, &payload.new_name).await {
2544 Ok(new_path) => {
2545 tracing::info!("Successfully renamed fixture: {} -> {}", fixture_id, payload.new_name);
2546 Json(ApiResponse::success(format!("Fixture renamed successfully to: {}", new_path)))
2547 }
2548 Err(e) => {
2549 tracing::error!("Failed to rename fixture {}: {}", fixture_id, e);
2550 Json(ApiResponse::error(format!("Failed to rename fixture: {}", e)))
2551 }
2552 }
2553}
2554
2555async fn rename_fixture_by_id(fixture_id: &str, new_name: &str) -> Result<String> {
2557 if new_name.is_empty() {
2559 return Err(Error::generic("New name cannot be empty".to_string()));
2560 }
2561
2562 let new_name = if new_name.ends_with(".json") {
2564 new_name.to_string()
2565 } else {
2566 format!("{}.json", new_name)
2567 };
2568
2569 let fixtures_dir =
2571 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2572 let fixtures_path = std::path::Path::new(&fixtures_dir);
2573
2574 if !fixtures_path.exists() {
2575 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2576 }
2577
2578 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2579
2580 let parent = old_path
2582 .parent()
2583 .ok_or_else(|| Error::generic("Could not determine parent directory".to_string()))?;
2584
2585 let new_path = parent.join(&new_name);
2586
2587 if new_path.exists() {
2589 return Err(Error::generic(format!(
2590 "A fixture with name '{}' already exists in the same directory",
2591 new_name
2592 )));
2593 }
2594
2595 let old_path_clone = old_path.clone();
2597 let new_path_clone = new_path.clone();
2598 tokio::task::spawn_blocking(move || {
2599 std::fs::rename(&old_path_clone, &new_path_clone)
2600 .map_err(|e| Error::generic(format!("Failed to rename fixture file: {}", e)))
2601 })
2602 .await
2603 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2604
2605 tracing::info!("Renamed fixture file: {} -> {}", old_path.display(), new_path.display());
2606
2607 Ok(new_path
2609 .strip_prefix(fixtures_path)
2610 .unwrap_or(&new_path)
2611 .to_string_lossy()
2612 .to_string())
2613}
2614
2615pub async fn move_fixture(
2617 Path(fixture_id): Path<String>,
2618 Json(payload): Json<FixtureMoveRequest>,
2619) -> Json<ApiResponse<String>> {
2620 match move_fixture_by_id(&fixture_id, &payload.new_path).await {
2621 Ok(new_location) => {
2622 tracing::info!("Successfully moved fixture: {} -> {}", fixture_id, payload.new_path);
2623 Json(ApiResponse::success(format!("Fixture moved successfully to: {}", new_location)))
2624 }
2625 Err(e) => {
2626 tracing::error!("Failed to move fixture {}: {}", fixture_id, e);
2627 Json(ApiResponse::error(format!("Failed to move fixture: {}", e)))
2628 }
2629 }
2630}
2631
2632async fn move_fixture_by_id(fixture_id: &str, new_path: &str) -> Result<String> {
2634 if new_path.is_empty() {
2636 return Err(Error::generic("New path cannot be empty".to_string()));
2637 }
2638
2639 let fixtures_dir =
2641 std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
2642 let fixtures_path = std::path::Path::new(&fixtures_dir);
2643
2644 if !fixtures_path.exists() {
2645 return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
2646 }
2647
2648 let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
2649
2650 let new_full_path = if new_path.starts_with('/') {
2652 fixtures_path.join(new_path.trim_start_matches('/'))
2654 } else {
2655 fixtures_path.join(new_path)
2657 };
2658
2659 let new_full_path = if new_full_path.extension().and_then(|s| s.to_str()) == Some("json") {
2661 new_full_path
2662 } else {
2663 if new_full_path.is_dir() || !new_path.contains('.') {
2665 let file_name = old_path.file_name().ok_or_else(|| {
2666 Error::generic("Could not determine original file name".to_string())
2667 })?;
2668 new_full_path.join(file_name)
2669 } else {
2670 new_full_path.with_extension("json")
2671 }
2672 };
2673
2674 if new_full_path.exists() {
2676 return Err(Error::generic(format!(
2677 "A fixture already exists at path: {}",
2678 new_full_path.display()
2679 )));
2680 }
2681
2682 let old_path_clone = old_path.clone();
2684 let new_full_path_clone = new_full_path.clone();
2685 tokio::task::spawn_blocking(move || {
2686 if let Some(parent) = new_full_path_clone.parent() {
2688 std::fs::create_dir_all(parent)
2689 .map_err(|e| Error::generic(format!("Failed to create target directory: {}", e)))?;
2690 }
2691
2692 std::fs::rename(&old_path_clone, &new_full_path_clone)
2694 .map_err(|e| Error::generic(format!("Failed to move fixture file: {}", e)))
2695 })
2696 .await
2697 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2698
2699 tracing::info!("Moved fixture file: {} -> {}", old_path.display(), new_full_path.display());
2700
2701 cleanup_empty_directories(&old_path).await;
2703
2704 Ok(new_full_path
2706 .strip_prefix(fixtures_path)
2707 .unwrap_or(&new_full_path)
2708 .to_string_lossy()
2709 .to_string())
2710}
2711
2712pub async fn get_validation(
2714 State(state): State<AdminState>,
2715) -> Json<ApiResponse<ValidationSettings>> {
2716 let config_state = state.get_config().await;
2718
2719 Json(ApiResponse::success(config_state.validation_settings))
2720}
2721
2722pub async fn update_validation(
2724 State(state): State<AdminState>,
2725 Json(update): Json<ValidationUpdate>,
2726) -> Json<ApiResponse<String>> {
2727 match update.mode.as_str() {
2729 "enforce" | "warn" | "off" => {}
2730 _ => {
2731 return Json(ApiResponse::error(
2732 "Invalid validation mode. Must be 'enforce', 'warn', or 'off'".to_string(),
2733 ))
2734 }
2735 }
2736
2737 let mode = update.mode.clone();
2739 state
2740 .update_validation_config(
2741 update.mode,
2742 update.aggregate_errors,
2743 update.validate_responses,
2744 update.overrides.unwrap_or_default(),
2745 )
2746 .await;
2747
2748 tracing::info!(
2749 "Updated validation settings: mode={}, aggregate_errors={}",
2750 mode,
2751 update.aggregate_errors
2752 );
2753
2754 Json(ApiResponse::success("Validation settings updated".to_string()))
2755}
2756
2757pub async fn get_env_vars() -> Json<ApiResponse<HashMap<String, String>>> {
2759 let mut env_vars = HashMap::new();
2761
2762 let relevant_vars = [
2763 "MOCKFORGE_LATENCY_ENABLED",
2765 "MOCKFORGE_FAILURES_ENABLED",
2766 "MOCKFORGE_PROXY_ENABLED",
2767 "MOCKFORGE_RECORD_ENABLED",
2768 "MOCKFORGE_REPLAY_ENABLED",
2769 "MOCKFORGE_LOG_LEVEL",
2770 "MOCKFORGE_CONFIG_FILE",
2771 "RUST_LOG",
2772 "MOCKFORGE_HTTP_PORT",
2774 "MOCKFORGE_HTTP_HOST",
2775 "MOCKFORGE_HTTP_OPENAPI_SPEC",
2776 "MOCKFORGE_CORS_ENABLED",
2777 "MOCKFORGE_REQUEST_TIMEOUT_SECS",
2778 "MOCKFORGE_WS_PORT",
2780 "MOCKFORGE_WS_HOST",
2781 "MOCKFORGE_WS_REPLAY_FILE",
2782 "MOCKFORGE_WS_CONNECTION_TIMEOUT_SECS",
2783 "MOCKFORGE_GRPC_PORT",
2785 "MOCKFORGE_GRPC_HOST",
2786 "MOCKFORGE_ADMIN_ENABLED",
2788 "MOCKFORGE_ADMIN_PORT",
2789 "MOCKFORGE_ADMIN_HOST",
2790 "MOCKFORGE_ADMIN_MOUNT_PATH",
2791 "MOCKFORGE_ADMIN_API_ENABLED",
2792 "MOCKFORGE_RESPONSE_TEMPLATE_EXPAND",
2794 "MOCKFORGE_REQUEST_VALIDATION",
2795 "MOCKFORGE_AGGREGATE_ERRORS",
2796 "MOCKFORGE_RESPONSE_VALIDATION",
2797 "MOCKFORGE_VALIDATION_STATUS",
2798 "MOCKFORGE_RAG_ENABLED",
2800 "MOCKFORGE_FAKE_TOKENS",
2801 "MOCKFORGE_FIXTURES_DIR",
2803 ];
2804
2805 for var_name in &relevant_vars {
2806 if let Ok(value) = std::env::var(var_name) {
2807 env_vars.insert(var_name.to_string(), value);
2808 }
2809 }
2810
2811 Json(ApiResponse::success(env_vars))
2812}
2813
2814pub async fn update_env_var(Json(update): Json<EnvVarUpdate>) -> Json<ApiResponse<String>> {
2816 std::env::set_var(&update.key, &update.value);
2818
2819 tracing::info!("Updated environment variable: {}={}", update.key, update.value);
2820
2821 Json(ApiResponse::success(format!(
2824 "Environment variable {} updated to '{}'. Note: This change is not persisted and will be lost on restart.",
2825 update.key, update.value
2826 )))
2827}
2828
2829pub async fn get_file_content(
2831 Json(request): Json<FileContentRequest>,
2832) -> Json<ApiResponse<String>> {
2833 if let Err(e) = validate_file_path(&request.file_path) {
2835 return Json(ApiResponse::error(format!("Invalid file path: {}", e)));
2836 }
2837
2838 match tokio::fs::read_to_string(&request.file_path).await {
2840 Ok(content) => {
2841 if let Err(e) = validate_file_content(&content) {
2843 return Json(ApiResponse::error(format!("Invalid file content: {}", e)));
2844 }
2845 Json(ApiResponse::success(content))
2846 }
2847 Err(e) => Json(ApiResponse::error(format!("Failed to read file: {}", e))),
2848 }
2849}
2850
2851pub async fn save_file_content(Json(request): Json<FileSaveRequest>) -> Json<ApiResponse<String>> {
2853 match save_file_to_filesystem(&request.file_path, &request.content).await {
2854 Ok(_) => {
2855 tracing::info!("Successfully saved file: {}", request.file_path);
2856 Json(ApiResponse::success("File saved successfully".to_string()))
2857 }
2858 Err(e) => {
2859 tracing::error!("Failed to save file {}: {}", request.file_path, e);
2860 Json(ApiResponse::error(format!("Failed to save file: {}", e)))
2861 }
2862 }
2863}
2864
2865async fn save_file_to_filesystem(file_path: &str, content: &str) -> Result<()> {
2867 validate_file_path(file_path)?;
2869
2870 validate_file_content(content)?;
2872
2873 let path = std::path::PathBuf::from(file_path);
2875 let content = content.to_string();
2876
2877 let path_clone = path.clone();
2879 let content_clone = content.clone();
2880 tokio::task::spawn_blocking(move || {
2881 if let Some(parent) = path_clone.parent() {
2883 std::fs::create_dir_all(parent).map_err(|e| {
2884 Error::generic(format!("Failed to create directory {}: {}", parent.display(), e))
2885 })?;
2886 }
2887
2888 std::fs::write(&path_clone, &content_clone).map_err(|e| {
2890 Error::generic(format!("Failed to write file {}: {}", path_clone.display(), e))
2891 })?;
2892
2893 let written_content = std::fs::read_to_string(&path_clone).map_err(|e| {
2895 Error::generic(format!("Failed to verify written file {}: {}", path_clone.display(), e))
2896 })?;
2897
2898 if written_content != content_clone {
2899 return Err(Error::generic(format!(
2900 "File content verification failed for {}",
2901 path_clone.display()
2902 )));
2903 }
2904
2905 Ok::<_, Error>(())
2906 })
2907 .await
2908 .map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
2909
2910 tracing::info!("File saved successfully: {} ({} bytes)", path.display(), content.len());
2911 Ok(())
2912}
2913
2914fn validate_file_path(file_path: &str) -> Result<()> {
2916 if file_path.contains("..") {
2918 return Err(Error::generic("Path traversal detected in file path".to_string()));
2919 }
2920
2921 let path = std::path::Path::new(file_path);
2923 if path.is_absolute() {
2924 let allowed_dirs = [
2926 std::env::current_dir().unwrap_or_default(),
2927 std::path::PathBuf::from("."),
2928 std::path::PathBuf::from("fixtures"),
2929 std::path::PathBuf::from("config"),
2930 ];
2931
2932 let mut is_allowed = false;
2933 for allowed_dir in &allowed_dirs {
2934 if path.starts_with(allowed_dir) {
2935 is_allowed = true;
2936 break;
2937 }
2938 }
2939
2940 if !is_allowed {
2941 return Err(Error::generic("File path is outside allowed directories".to_string()));
2942 }
2943 }
2944
2945 let dangerous_extensions = ["exe", "bat", "cmd", "sh", "ps1", "scr", "com"];
2947 if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
2948 if dangerous_extensions.contains(&extension.to_lowercase().as_str()) {
2949 return Err(Error::generic(format!(
2950 "Dangerous file extension not allowed: {}",
2951 extension
2952 )));
2953 }
2954 }
2955
2956 Ok(())
2957}
2958
2959fn validate_file_content(content: &str) -> Result<()> {
2961 if content.len() > 10 * 1024 * 1024 {
2963 return Err(Error::generic("File content too large (max 10MB)".to_string()));
2965 }
2966
2967 if content.contains('\0') {
2969 return Err(Error::generic("File content contains null bytes".to_string()));
2970 }
2971
2972 Ok(())
2973}
2974
2975#[derive(Debug, Clone, Serialize, Deserialize)]
2977pub struct FixtureDeleteRequest {
2978 pub fixture_id: String,
2979}
2980
2981#[derive(Debug, Clone, Serialize, Deserialize)]
2983pub struct EnvVarUpdate {
2984 pub key: String,
2985 pub value: String,
2986}
2987
2988#[derive(Debug, Clone, Serialize, Deserialize)]
2990pub struct FixtureBulkDeleteRequest {
2991 pub fixture_ids: Vec<String>,
2992}
2993
2994#[derive(Debug, Clone, Serialize, Deserialize)]
2996pub struct FixtureBulkDeleteResult {
2997 pub deleted_count: usize,
2998 pub total_requested: usize,
2999 pub errors: Vec<String>,
3000}
3001
3002#[derive(Debug, Clone, Serialize, Deserialize)]
3004pub struct FixtureRenameRequest {
3005 pub new_name: String,
3006}
3007
3008#[derive(Debug, Clone, Serialize, Deserialize)]
3010pub struct FixtureMoveRequest {
3011 pub new_path: String,
3012}
3013
3014#[derive(Debug, Clone, Serialize, Deserialize)]
3016pub struct FileContentRequest {
3017 pub file_path: String,
3018 pub file_type: String,
3019}
3020
3021#[derive(Debug, Clone, Serialize, Deserialize)]
3023pub struct FileSaveRequest {
3024 pub file_path: String,
3025 pub content: String,
3026}
3027
3028pub async fn get_smoke_tests(
3030 State(state): State<AdminState>,
3031) -> Json<ApiResponse<Vec<SmokeTestResult>>> {
3032 let results = state.get_smoke_test_results().await;
3033 Json(ApiResponse::success(results))
3034}
3035
3036pub async fn run_smoke_tests_endpoint(
3038 State(state): State<AdminState>,
3039) -> Json<ApiResponse<String>> {
3040 tracing::info!("Starting smoke test execution");
3041
3042 let state_clone = state.clone();
3044 tokio::spawn(async move {
3045 if let Err(e) = execute_smoke_tests(&state_clone).await {
3046 tracing::error!("Smoke test execution failed: {}", e);
3047 } else {
3048 tracing::info!("Smoke test execution completed successfully");
3049 }
3050 });
3051
3052 Json(ApiResponse::success(
3053 "Smoke tests started. Check results in the smoke tests section.".to_string(),
3054 ))
3055}
3056
3057async fn execute_smoke_tests(state: &AdminState) -> Result<()> {
3059 let base_url =
3061 std::env::var("MOCKFORGE_BASE_URL").unwrap_or_else(|_| "http://localhost:3000".to_string());
3062
3063 let context = SmokeTestContext {
3064 base_url,
3065 timeout_seconds: 30,
3066 parallel: true,
3067 };
3068
3069 let fixtures = scan_fixtures_directory()?;
3071
3072 let http_fixtures: Vec<&FixtureInfo> =
3074 fixtures.iter().filter(|f| f.protocol == "http").collect();
3075
3076 if http_fixtures.is_empty() {
3077 tracing::warn!("No HTTP fixtures found for smoke testing");
3078 return Ok(());
3079 }
3080
3081 tracing::info!("Running smoke tests for {} HTTP fixtures", http_fixtures.len());
3082
3083 let mut test_results = Vec::new();
3085
3086 for fixture in http_fixtures {
3087 let test_result = create_smoke_test_from_fixture(fixture);
3088 test_results.push(test_result);
3089 }
3090
3091 let mut executed_results = Vec::new();
3093 for mut test_result in test_results {
3094 test_result.status = "running".to_string();
3096 state.update_smoke_test_result(test_result.clone()).await;
3097
3098 let start_time = std::time::Instant::now();
3100 match execute_single_smoke_test(&test_result, &context).await {
3101 Ok((status_code, response_time_ms)) => {
3102 test_result.status = "passed".to_string();
3103 test_result.status_code = Some(status_code);
3104 test_result.response_time_ms = Some(response_time_ms);
3105 test_result.error_message = None;
3106 }
3107 Err(e) => {
3108 test_result.status = "failed".to_string();
3109 test_result.error_message = Some(e.to_string());
3110 test_result.status_code = None;
3111 test_result.response_time_ms = None;
3112 }
3113 }
3114
3115 let duration = start_time.elapsed();
3116 test_result.duration_seconds = Some(duration.as_secs_f64());
3117 test_result.last_run = Some(Utc::now());
3118
3119 executed_results.push(test_result.clone());
3120 state.update_smoke_test_result(test_result).await;
3121 }
3122
3123 tracing::info!("Smoke test execution completed: {} tests run", executed_results.len());
3124 Ok(())
3125}
3126
3127fn create_smoke_test_from_fixture(fixture: &FixtureInfo) -> SmokeTestResult {
3129 let test_name = format!("{} {}", fixture.method, fixture.path);
3130 let description = format!("Smoke test for {} endpoint", fixture.path);
3131
3132 SmokeTestResult {
3133 id: format!("smoke_{}", fixture.id),
3134 name: test_name,
3135 method: fixture.method.clone(),
3136 path: fixture.path.clone(),
3137 description,
3138 last_run: None,
3139 status: "pending".to_string(),
3140 response_time_ms: None,
3141 error_message: None,
3142 status_code: None,
3143 duration_seconds: None,
3144 }
3145}
3146
3147async fn execute_single_smoke_test(
3149 test: &SmokeTestResult,
3150 context: &SmokeTestContext,
3151) -> Result<(u16, u64)> {
3152 let url = format!("{}{}", context.base_url, test.path);
3153 let client = reqwest::Client::builder()
3154 .timeout(Duration::from_secs(context.timeout_seconds))
3155 .build()
3156 .map_err(|e| Error::generic(format!("Failed to create HTTP client: {}", e)))?;
3157
3158 let start_time = std::time::Instant::now();
3159
3160 let response = match test.method.as_str() {
3161 "GET" => client.get(&url).send().await,
3162 "POST" => client.post(&url).send().await,
3163 "PUT" => client.put(&url).send().await,
3164 "DELETE" => client.delete(&url).send().await,
3165 "PATCH" => client.patch(&url).send().await,
3166 "HEAD" => client.head(&url).send().await,
3167 "OPTIONS" => client.request(reqwest::Method::OPTIONS, &url).send().await,
3168 _ => {
3169 return Err(Error::generic(format!("Unsupported HTTP method: {}", test.method)));
3170 }
3171 };
3172
3173 let response_time = start_time.elapsed();
3174 let response_time_ms = response_time.as_millis() as u64;
3175
3176 match response {
3177 Ok(resp) => {
3178 let status_code = resp.status().as_u16();
3179 if (200..400).contains(&status_code) {
3180 Ok((status_code, response_time_ms))
3181 } else {
3182 Err(Error::generic(format!(
3183 "HTTP error: {} {}",
3184 status_code,
3185 resp.status().canonical_reason().unwrap_or("Unknown")
3186 )))
3187 }
3188 }
3189 Err(e) => Err(Error::generic(format!("Request failed: {}", e))),
3190 }
3191}
3192
3193#[derive(Debug, Deserialize)]
3194pub struct PluginInstallRequest {
3195 pub source: String,
3196 #[serde(default)]
3197 pub force: bool,
3198 #[serde(default)]
3199 pub skip_validation: bool,
3200 #[serde(default)]
3201 pub no_verify: bool,
3202 pub checksum: Option<String>,
3203}
3204
3205#[derive(Debug, Deserialize)]
3206pub struct PluginValidateRequest {
3207 pub source: String,
3208}
3209
3210fn find_plugin_directory(path: &std::path::Path) -> Option<std::path::PathBuf> {
3211 if path.join("plugin.yaml").exists() {
3212 return Some(path.to_path_buf());
3213 }
3214
3215 let entries = std::fs::read_dir(path).ok()?;
3216 for entry in entries.filter_map(|e| e.ok()) {
3217 let child = entry.path();
3218 if child.is_dir() && child.join("plugin.yaml").exists() {
3219 return Some(child);
3220 }
3221 }
3222
3223 None
3224}
3225
3226async fn resolve_plugin_source_path(
3227 source: PluginSource,
3228 checksum: Option<&str>,
3229) -> std::result::Result<std::path::PathBuf, String> {
3230 match source {
3231 PluginSource::Local(path) => Ok(path),
3232 PluginSource::Url { url, .. } => {
3233 let loader = RemotePluginLoader::new(RemotePluginConfig::default())
3234 .map_err(|e| format!("Failed to initialize remote plugin loader: {}", e))?;
3235 loader
3236 .download_with_checksum(&url, checksum)
3237 .await
3238 .map_err(|e| format!("Failed to download plugin from URL: {}", e))
3239 }
3240 PluginSource::Git(git_source) => {
3241 let loader = GitPluginLoader::new(GitPluginConfig::default())
3242 .map_err(|e| format!("Failed to initialize git plugin loader: {}", e))?;
3243 loader
3244 .clone_from_git(&git_source)
3245 .await
3246 .map_err(|e| format!("Failed to clone plugin from git: {}", e))
3247 }
3248 PluginSource::Registry { name, version } => Err(format!(
3249 "Registry plugin installation is not yet supported from the admin API (requested {}@{})",
3250 name,
3251 version.unwrap_or_else(|| "latest".to_string())
3252 )),
3253 }
3254}
3255
3256pub async fn install_plugin(
3258 State(state): State<AdminState>,
3259 Json(request): Json<PluginInstallRequest>,
3260) -> impl IntoResponse {
3261 let source = request.source.trim().to_string();
3262 if source.is_empty() {
3263 return Json(json!({
3264 "success": false,
3265 "error": "Plugin source is required"
3266 }));
3267 }
3268
3269 if request.skip_validation {
3270 return Json(json!({
3271 "success": false,
3272 "error": "Skipping validation is not supported in admin install flow."
3273 }));
3274 }
3275
3276 if request.no_verify {
3277 return Json(json!({
3278 "success": false,
3279 "error": "Disabling signature verification is not supported in admin install flow."
3280 }));
3281 }
3282
3283 let force = request.force;
3284 let checksum = request.checksum.clone();
3285 let state_for_install = state.clone();
3286
3287 let install_result = tokio::task::spawn_blocking(
3288 move || -> std::result::Result<(String, String, String), String> {
3289 let runtime = tokio::runtime::Builder::new_current_thread()
3290 .enable_all()
3291 .build()
3292 .map_err(|e| format!("Failed to initialize install runtime: {}", e))?;
3293
3294 let (plugin_instance, plugin_id, plugin_name, plugin_version) =
3295 runtime.block_on(async move {
3296 let parsed_source = PluginSource::parse(&source)
3297 .map_err(|e| format!("Invalid plugin source: {}", e))?;
3298
3299 let source_path =
3300 resolve_plugin_source_path(parsed_source, checksum.as_deref()).await?;
3301
3302 let plugin_root = if source_path.is_dir() {
3303 find_plugin_directory(&source_path).unwrap_or(source_path.clone())
3304 } else {
3305 source_path.clone()
3306 };
3307
3308 if !plugin_root.exists() || !plugin_root.is_dir() {
3309 return Err(format!(
3310 "Resolved plugin path is not a directory: {}",
3311 plugin_root.display()
3312 ));
3313 }
3314
3315 let loader = PluginLoader::new(PluginLoaderConfig::default());
3316 let manifest = loader
3317 .validate_plugin(&plugin_root)
3318 .await
3319 .map_err(|e| format!("Failed to validate plugin: {}", e))?;
3320
3321 let plugin_id = manifest.info.id.clone();
3322 let plugin_name = manifest.info.name.clone();
3323 let plugin_version = manifest.info.version.to_string();
3324
3325 let parent_dir = plugin_root
3326 .parent()
3327 .unwrap_or_else(|| std::path::Path::new("."))
3328 .to_string_lossy()
3329 .to_string();
3330
3331 let runtime_loader = PluginLoader::new(PluginLoaderConfig {
3332 plugin_dirs: vec![parent_dir],
3333 ..PluginLoaderConfig::default()
3334 });
3335
3336 runtime_loader
3337 .load_plugin(&plugin_id)
3338 .await
3339 .map_err(|e| format!("Failed to load plugin into runtime: {}", e))?;
3340
3341 let plugin_instance =
3342 runtime_loader.get_plugin(&plugin_id).await.ok_or_else(|| {
3343 "Plugin loaded but instance was not retrievable from loader".to_string()
3344 })?;
3345
3346 Ok::<_, String>((
3347 plugin_instance,
3348 plugin_id.to_string(),
3349 plugin_name,
3350 plugin_version,
3351 ))
3352 })?;
3353
3354 let mut registry = state_for_install.plugin_registry.blocking_write();
3355
3356 if let Some(existing_id) =
3357 registry.list_plugins().into_iter().find(|id| id.as_str() == plugin_id)
3358 {
3359 if force {
3360 registry.remove_plugin(&existing_id).map_err(|e| {
3361 format!("Failed to remove existing plugin before reinstall: {}", e)
3362 })?;
3363 } else {
3364 return Err(format!(
3365 "Plugin '{}' is already installed. Use force=true to reinstall.",
3366 plugin_id
3367 ));
3368 }
3369 }
3370
3371 registry
3372 .add_plugin(plugin_instance)
3373 .map_err(|e| format!("Failed to register plugin in admin registry: {}", e))?;
3374
3375 Ok((plugin_id, plugin_name, plugin_version))
3376 },
3377 )
3378 .await;
3379
3380 let (plugin_id, plugin_name, plugin_version) = match install_result {
3381 Ok(Ok(result)) => result,
3382 Ok(Err(err)) => {
3383 return Json(json!({
3384 "success": false,
3385 "error": err
3386 }))
3387 }
3388 Err(err) => {
3389 return Json(json!({
3390 "success": false,
3391 "error": format!("Plugin installation task failed: {}", err)
3392 }))
3393 }
3394 };
3395
3396 Json(json!({
3397 "success": true,
3398 "data": {
3399 "plugin_id": plugin_id,
3400 "name": plugin_name,
3401 "version": plugin_version
3402 },
3403 "message": "Plugin installed and registered in runtime."
3404 }))
3405}
3406
3407pub async fn validate_plugin(Json(request): Json<PluginValidateRequest>) -> impl IntoResponse {
3409 let source = request.source.trim();
3410 if source.is_empty() {
3411 return Json(json!({
3412 "success": false,
3413 "error": "Plugin source is required"
3414 }));
3415 }
3416
3417 let source = match PluginSource::parse(source) {
3418 Ok(source) => source,
3419 Err(e) => {
3420 return Json(json!({
3421 "success": false,
3422 "error": format!("Invalid plugin source: {}", e)
3423 }));
3424 }
3425 };
3426
3427 let path = match source.clone() {
3428 PluginSource::Local(path) => path,
3429 PluginSource::Url { .. } | PluginSource::Git(_) => {
3430 match resolve_plugin_source_path(source, None).await {
3431 Ok(path) => path,
3432 Err(err) => {
3433 return Json(json!({
3434 "success": false,
3435 "error": err
3436 }))
3437 }
3438 }
3439 }
3440 PluginSource::Registry { .. } => {
3441 return Json(json!({
3442 "success": false,
3443 "error": "Registry plugin validation is not yet supported from the admin API."
3444 }))
3445 }
3446 };
3447
3448 let plugin_root = if path.is_dir() {
3449 find_plugin_directory(&path).unwrap_or(path.clone())
3450 } else {
3451 path
3452 };
3453
3454 let loader = PluginLoader::new(PluginLoaderConfig::default());
3455 match loader.validate_plugin(&plugin_root).await {
3456 Ok(manifest) => Json(json!({
3457 "success": true,
3458 "data": {
3459 "valid": true,
3460 "id": manifest.info.id.to_string(),
3461 "name": manifest.info.name,
3462 "version": manifest.info.version.to_string()
3463 }
3464 })),
3465 Err(e) => Json(json!({
3466 "success": false,
3467 "data": { "valid": false },
3468 "error": format!("Plugin validation failed: {}", e)
3469 })),
3470 }
3471}
3472
3473pub async fn update_traffic_shaping(
3475 State(state): State<AdminState>,
3476 Json(config): Json<TrafficShapingConfig>,
3477) -> Json<ApiResponse<String>> {
3478 if config.burst_loss.burst_probability > 1.0
3479 || config.burst_loss.loss_rate_during_burst > 1.0
3480 || config.burst_loss.burst_probability < 0.0
3481 || config.burst_loss.loss_rate_during_burst < 0.0
3482 {
3483 return Json(ApiResponse::error(
3484 "Burst loss probabilities must be between 0.0 and 1.0".to_string(),
3485 ));
3486 }
3487
3488 {
3489 let mut cfg = state.config.write().await;
3490 cfg.traffic_shaping = config.clone();
3491 }
3492
3493 if let Some(ref chaos_api_state) = state.chaos_api_state {
3494 let mut chaos_config = chaos_api_state.config.write().await;
3495 chaos_config.traffic_shaping = Some(mockforge_chaos::config::TrafficShapingConfig {
3496 enabled: config.enabled,
3497 bandwidth_limit_bps: config.bandwidth.max_bytes_per_sec,
3498 packet_loss_percent: config.burst_loss.loss_rate_during_burst * 100.0,
3499 max_connections: 0,
3500 connection_timeout_ms: 30000,
3501 });
3502 }
3503
3504 Json(ApiResponse::success("Traffic shaping updated".to_string()))
3505}
3506
3507pub async fn import_postman(
3508 State(state): State<AdminState>,
3509 Json(request): Json<serde_json::Value>,
3510) -> Json<ApiResponse<String>> {
3511 use mockforge_core::workspace_import::{import_postman_to_workspace, WorkspaceImportConfig};
3512 use uuid::Uuid;
3513
3514 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3515 let filename = request.get("filename").and_then(|v| v.as_str());
3516 let environment = request.get("environment").and_then(|v| v.as_str());
3517 let base_url = request.get("base_url").and_then(|v| v.as_str());
3518
3519 let import_result = match mockforge_core::import::import_postman_collection(content, base_url) {
3521 Ok(result) => result,
3522 Err(e) => {
3523 let entry = ImportHistoryEntry {
3525 id: Uuid::new_v4().to_string(),
3526 format: "postman".to_string(),
3527 timestamp: Utc::now(),
3528 routes_count: 0,
3529 variables_count: 0,
3530 warnings_count: 0,
3531 success: false,
3532 filename: filename.map(|s| s.to_string()),
3533 environment: environment.map(|s| s.to_string()),
3534 base_url: base_url.map(|s| s.to_string()),
3535 error_message: Some(e.clone()),
3536 };
3537 let mut history = state.import_history.write().await;
3538 history.push(entry);
3539
3540 return Json(ApiResponse::error(format!("Postman import failed: {}", e)));
3541 }
3542 };
3543
3544 let workspace_name = filename
3546 .and_then(|f| f.split('.').next())
3547 .unwrap_or("Imported Postman Collection");
3548
3549 let config = WorkspaceImportConfig {
3550 create_folders: true,
3551 base_folder_name: None,
3552 preserve_hierarchy: true,
3553 max_depth: 5,
3554 };
3555
3556 let routes: Vec<ImportRoute> = import_result
3558 .routes
3559 .into_iter()
3560 .map(|route| ImportRoute {
3561 method: route.method,
3562 path: route.path,
3563 headers: route.headers,
3564 body: route.body,
3565 response: ImportResponse {
3566 status: route.response.status,
3567 headers: route.response.headers,
3568 body: route.response.body,
3569 },
3570 })
3571 .collect();
3572
3573 match import_postman_to_workspace(routes, workspace_name.to_string(), config) {
3574 Ok(workspace_result) => {
3575 if let Err(e) =
3577 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3578 {
3579 tracing::error!("Failed to save workspace: {}", e);
3580 return Json(ApiResponse::error(format!(
3581 "Import succeeded but failed to save workspace: {}",
3582 e
3583 )));
3584 }
3585
3586 let entry = ImportHistoryEntry {
3588 id: Uuid::new_v4().to_string(),
3589 format: "postman".to_string(),
3590 timestamp: Utc::now(),
3591 routes_count: workspace_result.request_count,
3592 variables_count: import_result.variables.len(),
3593 warnings_count: workspace_result.warnings.len(),
3594 success: true,
3595 filename: filename.map(|s| s.to_string()),
3596 environment: environment.map(|s| s.to_string()),
3597 base_url: base_url.map(|s| s.to_string()),
3598 error_message: None,
3599 };
3600 let mut history = state.import_history.write().await;
3601 history.push(entry);
3602
3603 Json(ApiResponse::success(format!(
3604 "Successfully imported {} routes into workspace '{}'",
3605 workspace_result.request_count, workspace_name
3606 )))
3607 }
3608 Err(e) => {
3609 let entry = ImportHistoryEntry {
3611 id: Uuid::new_v4().to_string(),
3612 format: "postman".to_string(),
3613 timestamp: Utc::now(),
3614 routes_count: 0,
3615 variables_count: 0,
3616 warnings_count: 0,
3617 success: false,
3618 filename: filename.map(|s| s.to_string()),
3619 environment: environment.map(|s| s.to_string()),
3620 base_url: base_url.map(|s| s.to_string()),
3621 error_message: Some(e.to_string()),
3622 };
3623 let mut history = state.import_history.write().await;
3624 history.push(entry);
3625
3626 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3627 }
3628 }
3629}
3630
3631pub async fn import_insomnia(
3632 State(state): State<AdminState>,
3633 Json(request): Json<serde_json::Value>,
3634) -> Json<ApiResponse<String>> {
3635 use uuid::Uuid;
3636
3637 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3638 let filename = request.get("filename").and_then(|v| v.as_str());
3639 let environment = request.get("environment").and_then(|v| v.as_str());
3640 let base_url = request.get("base_url").and_then(|v| v.as_str());
3641
3642 let import_result = match mockforge_core::import::import_insomnia_export(content, environment) {
3644 Ok(result) => result,
3645 Err(e) => {
3646 let entry = ImportHistoryEntry {
3648 id: Uuid::new_v4().to_string(),
3649 format: "insomnia".to_string(),
3650 timestamp: Utc::now(),
3651 routes_count: 0,
3652 variables_count: 0,
3653 warnings_count: 0,
3654 success: false,
3655 filename: filename.map(|s| s.to_string()),
3656 environment: environment.map(|s| s.to_string()),
3657 base_url: base_url.map(|s| s.to_string()),
3658 error_message: Some(e.clone()),
3659 };
3660 let mut history = state.import_history.write().await;
3661 history.push(entry);
3662
3663 return Json(ApiResponse::error(format!("Insomnia import failed: {}", e)));
3664 }
3665 };
3666
3667 let workspace_name = filename
3669 .and_then(|f| f.split('.').next())
3670 .unwrap_or("Imported Insomnia Collection");
3671
3672 let _config = WorkspaceImportConfig {
3673 create_folders: true,
3674 base_folder_name: None,
3675 preserve_hierarchy: true,
3676 max_depth: 5,
3677 };
3678
3679 let variables_count = import_result.variables.len();
3681
3682 match mockforge_core::workspace_import::create_workspace_from_insomnia(
3683 import_result,
3684 Some(workspace_name.to_string()),
3685 ) {
3686 Ok(workspace_result) => {
3687 if let Err(e) =
3689 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3690 {
3691 tracing::error!("Failed to save workspace: {}", e);
3692 return Json(ApiResponse::error(format!(
3693 "Import succeeded but failed to save workspace: {}",
3694 e
3695 )));
3696 }
3697
3698 let entry = ImportHistoryEntry {
3700 id: Uuid::new_v4().to_string(),
3701 format: "insomnia".to_string(),
3702 timestamp: Utc::now(),
3703 routes_count: workspace_result.request_count,
3704 variables_count,
3705 warnings_count: workspace_result.warnings.len(),
3706 success: true,
3707 filename: filename.map(|s| s.to_string()),
3708 environment: environment.map(|s| s.to_string()),
3709 base_url: base_url.map(|s| s.to_string()),
3710 error_message: None,
3711 };
3712 let mut history = state.import_history.write().await;
3713 history.push(entry);
3714
3715 Json(ApiResponse::success(format!(
3716 "Successfully imported {} routes into workspace '{}'",
3717 workspace_result.request_count, workspace_name
3718 )))
3719 }
3720 Err(e) => {
3721 let entry = ImportHistoryEntry {
3723 id: Uuid::new_v4().to_string(),
3724 format: "insomnia".to_string(),
3725 timestamp: Utc::now(),
3726 routes_count: 0,
3727 variables_count: 0,
3728 warnings_count: 0,
3729 success: false,
3730 filename: filename.map(|s| s.to_string()),
3731 environment: environment.map(|s| s.to_string()),
3732 base_url: base_url.map(|s| s.to_string()),
3733 error_message: Some(e.to_string()),
3734 };
3735 let mut history = state.import_history.write().await;
3736 history.push(entry);
3737
3738 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3739 }
3740 }
3741}
3742
3743pub async fn import_openapi(
3744 State(_state): State<AdminState>,
3745 Json(_request): Json<serde_json::Value>,
3746) -> Json<ApiResponse<String>> {
3747 Json(ApiResponse::success("OpenAPI import completed".to_string()))
3748}
3749
3750pub async fn import_curl(
3751 State(state): State<AdminState>,
3752 Json(request): Json<serde_json::Value>,
3753) -> Json<ApiResponse<String>> {
3754 use uuid::Uuid;
3755
3756 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3757 let filename = request.get("filename").and_then(|v| v.as_str());
3758 let base_url = request.get("base_url").and_then(|v| v.as_str());
3759
3760 let import_result = match mockforge_core::import::import_curl_commands(content, base_url) {
3762 Ok(result) => result,
3763 Err(e) => {
3764 let entry = ImportHistoryEntry {
3766 id: Uuid::new_v4().to_string(),
3767 format: "curl".to_string(),
3768 timestamp: Utc::now(),
3769 routes_count: 0,
3770 variables_count: 0,
3771 warnings_count: 0,
3772 success: false,
3773 filename: filename.map(|s| s.to_string()),
3774 environment: None,
3775 base_url: base_url.map(|s| s.to_string()),
3776 error_message: Some(e.clone()),
3777 };
3778 let mut history = state.import_history.write().await;
3779 history.push(entry);
3780
3781 return Json(ApiResponse::error(format!("Curl import failed: {}", e)));
3782 }
3783 };
3784
3785 let workspace_name =
3787 filename.and_then(|f| f.split('.').next()).unwrap_or("Imported Curl Commands");
3788
3789 match mockforge_core::workspace_import::create_workspace_from_curl(
3790 import_result,
3791 Some(workspace_name.to_string()),
3792 ) {
3793 Ok(workspace_result) => {
3794 if let Err(e) =
3796 state.workspace_persistence.save_workspace(&workspace_result.workspace).await
3797 {
3798 tracing::error!("Failed to save workspace: {}", e);
3799 return Json(ApiResponse::error(format!(
3800 "Import succeeded but failed to save workspace: {}",
3801 e
3802 )));
3803 }
3804
3805 let entry = ImportHistoryEntry {
3807 id: Uuid::new_v4().to_string(),
3808 format: "curl".to_string(),
3809 timestamp: Utc::now(),
3810 routes_count: workspace_result.request_count,
3811 variables_count: 0, warnings_count: workspace_result.warnings.len(),
3813 success: true,
3814 filename: filename.map(|s| s.to_string()),
3815 environment: None,
3816 base_url: base_url.map(|s| s.to_string()),
3817 error_message: None,
3818 };
3819 let mut history = state.import_history.write().await;
3820 history.push(entry);
3821
3822 Json(ApiResponse::success(format!(
3823 "Successfully imported {} routes into workspace '{}'",
3824 workspace_result.request_count, workspace_name
3825 )))
3826 }
3827 Err(e) => {
3828 let entry = ImportHistoryEntry {
3830 id: Uuid::new_v4().to_string(),
3831 format: "curl".to_string(),
3832 timestamp: Utc::now(),
3833 routes_count: 0,
3834 variables_count: 0,
3835 warnings_count: 0,
3836 success: false,
3837 filename: filename.map(|s| s.to_string()),
3838 environment: None,
3839 base_url: base_url.map(|s| s.to_string()),
3840 error_message: Some(e.to_string()),
3841 };
3842 let mut history = state.import_history.write().await;
3843 history.push(entry);
3844
3845 Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
3846 }
3847 }
3848}
3849
3850pub async fn preview_import(
3851 State(_state): State<AdminState>,
3852 Json(request): Json<serde_json::Value>,
3853) -> Json<ApiResponse<serde_json::Value>> {
3854 use mockforge_core::import::{
3855 import_curl_commands, import_insomnia_export, import_postman_collection,
3856 };
3857
3858 let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
3859 let filename = request.get("filename").and_then(|v| v.as_str());
3860 let environment = request.get("environment").and_then(|v| v.as_str());
3861 let base_url = request.get("base_url").and_then(|v| v.as_str());
3862
3863 let format = if let Some(fname) = filename {
3865 if fname.to_lowercase().contains("postman")
3866 || fname.to_lowercase().ends_with(".postman_collection")
3867 {
3868 "postman"
3869 } else if fname.to_lowercase().contains("insomnia")
3870 || fname.to_lowercase().ends_with(".insomnia")
3871 {
3872 "insomnia"
3873 } else if fname.to_lowercase().contains("curl")
3874 || fname.to_lowercase().ends_with(".sh")
3875 || fname.to_lowercase().ends_with(".curl")
3876 {
3877 "curl"
3878 } else {
3879 "unknown"
3880 }
3881 } else {
3882 "unknown"
3883 };
3884
3885 match format {
3886 "postman" => match import_postman_collection(content, base_url) {
3887 Ok(import_result) => {
3888 let routes: Vec<serde_json::Value> = import_result
3889 .routes
3890 .into_iter()
3891 .map(|route| {
3892 serde_json::json!({
3893 "method": route.method,
3894 "path": route.path,
3895 "headers": route.headers,
3896 "body": route.body,
3897 "status_code": route.response.status,
3898 "response": serde_json::json!({
3899 "status": route.response.status,
3900 "headers": route.response.headers,
3901 "body": route.response.body
3902 })
3903 })
3904 })
3905 .collect();
3906
3907 let response = serde_json::json!({
3908 "routes": routes,
3909 "variables": import_result.variables,
3910 "warnings": import_result.warnings
3911 });
3912
3913 Json(ApiResponse::success(response))
3914 }
3915 Err(e) => Json(ApiResponse::error(format!("Postman import failed: {}", e))),
3916 },
3917 "insomnia" => match import_insomnia_export(content, environment) {
3918 Ok(import_result) => {
3919 let routes: Vec<serde_json::Value> = import_result
3920 .routes
3921 .into_iter()
3922 .map(|route| {
3923 serde_json::json!({
3924 "method": route.method,
3925 "path": route.path,
3926 "headers": route.headers,
3927 "body": route.body,
3928 "status_code": route.response.status,
3929 "response": serde_json::json!({
3930 "status": route.response.status,
3931 "headers": route.response.headers,
3932 "body": route.response.body
3933 })
3934 })
3935 })
3936 .collect();
3937
3938 let response = serde_json::json!({
3939 "routes": routes,
3940 "variables": import_result.variables,
3941 "warnings": import_result.warnings
3942 });
3943
3944 Json(ApiResponse::success(response))
3945 }
3946 Err(e) => Json(ApiResponse::error(format!("Insomnia import failed: {}", e))),
3947 },
3948 "curl" => match import_curl_commands(content, base_url) {
3949 Ok(import_result) => {
3950 let routes: Vec<serde_json::Value> = import_result
3951 .routes
3952 .into_iter()
3953 .map(|route| {
3954 serde_json::json!({
3955 "method": route.method,
3956 "path": route.path,
3957 "headers": route.headers,
3958 "body": route.body,
3959 "status_code": route.response.status,
3960 "response": serde_json::json!({
3961 "status": route.response.status,
3962 "headers": route.response.headers,
3963 "body": route.response.body
3964 })
3965 })
3966 })
3967 .collect();
3968
3969 let response = serde_json::json!({
3970 "routes": routes,
3971 "variables": serde_json::json!({}),
3972 "warnings": import_result.warnings
3973 });
3974
3975 Json(ApiResponse::success(response))
3976 }
3977 Err(e) => Json(ApiResponse::error(format!("Curl import failed: {}", e))),
3978 },
3979 _ => Json(ApiResponse::error("Unsupported import format".to_string())),
3980 }
3981}
3982
3983pub async fn get_import_history(
3984 State(state): State<AdminState>,
3985) -> Json<ApiResponse<serde_json::Value>> {
3986 let history = state.import_history.read().await;
3987 let total = history.len();
3988
3989 let imports: Vec<serde_json::Value> = history
3990 .iter()
3991 .rev()
3992 .take(50)
3993 .map(|entry| {
3994 serde_json::json!({
3995 "id": entry.id,
3996 "format": entry.format,
3997 "timestamp": entry.timestamp.to_rfc3339(),
3998 "routes_count": entry.routes_count,
3999 "variables_count": entry.variables_count,
4000 "warnings_count": entry.warnings_count,
4001 "success": entry.success,
4002 "filename": entry.filename,
4003 "environment": entry.environment,
4004 "base_url": entry.base_url,
4005 "error_message": entry.error_message
4006 })
4007 })
4008 .collect();
4009
4010 let response = serde_json::json!({
4011 "imports": imports,
4012 "total": total
4013 });
4014
4015 Json(ApiResponse::success(response))
4016}
4017
4018pub async fn get_admin_api_state(
4019 State(_state): State<AdminState>,
4020) -> Json<ApiResponse<serde_json::Value>> {
4021 Json(ApiResponse::success(serde_json::json!({
4022 "status": "active"
4023 })))
4024}
4025
4026pub async fn get_admin_api_replay(
4027 State(_state): State<AdminState>,
4028) -> Json<ApiResponse<serde_json::Value>> {
4029 Json(ApiResponse::success(serde_json::json!({
4030 "replay": []
4031 })))
4032}
4033
4034pub async fn get_sse_status(
4035 State(_state): State<AdminState>,
4036) -> Json<ApiResponse<serde_json::Value>> {
4037 Json(ApiResponse::success(serde_json::json!({
4038 "available": true,
4039 "endpoint": "/sse",
4040 "config": {
4041 "event_type": "status",
4042 "interval_ms": 1000,
4043 "data_template": "{}"
4044 }
4045 })))
4046}
4047
4048pub async fn get_sse_connections(
4049 State(_state): State<AdminState>,
4050) -> Json<ApiResponse<serde_json::Value>> {
4051 Json(ApiResponse::success(serde_json::json!({
4052 "active_connections": 0
4053 })))
4054}
4055
4056pub async fn get_workspaces(
4058 State(_state): State<AdminState>,
4059) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4060 Json(ApiResponse::success(vec![]))
4061}
4062
4063pub async fn create_workspace(
4064 State(_state): State<AdminState>,
4065 Json(_request): Json<serde_json::Value>,
4066) -> Json<ApiResponse<String>> {
4067 Json(ApiResponse::success("Workspace created".to_string()))
4068}
4069
4070pub async fn open_workspace_from_directory(
4071 State(_state): State<AdminState>,
4072 Json(_request): Json<serde_json::Value>,
4073) -> Json<ApiResponse<String>> {
4074 Json(ApiResponse::success("Workspace opened from directory".to_string()))
4075}
4076
4077pub async fn get_reality_level(
4081 State(state): State<AdminState>,
4082) -> Json<ApiResponse<serde_json::Value>> {
4083 let engine = state.reality_engine.read().await;
4084 let level = engine.get_level().await;
4085 let config = engine.get_config().await;
4086
4087 Json(ApiResponse::success(serde_json::json!({
4088 "level": level.value(),
4089 "level_name": level.name(),
4090 "description": level.description(),
4091 "chaos": {
4092 "enabled": config.chaos.enabled,
4093 "error_rate": config.chaos.error_rate,
4094 "delay_rate": config.chaos.delay_rate,
4095 },
4096 "latency": {
4097 "base_ms": config.latency.base_ms,
4098 "jitter_ms": config.latency.jitter_ms,
4099 },
4100 "mockai": {
4101 "enabled": config.mockai.enabled,
4102 },
4103 })))
4104}
4105
4106#[derive(Deserialize)]
4108pub struct SetRealityLevelRequest {
4109 level: u8,
4110}
4111
4112pub async fn set_reality_level(
4113 State(state): State<AdminState>,
4114 Json(request): Json<SetRealityLevelRequest>,
4115) -> Json<ApiResponse<serde_json::Value>> {
4116 let level = match mockforge_core::RealityLevel::from_value(request.level) {
4117 Some(l) => l,
4118 None => {
4119 return Json(ApiResponse::error(format!(
4120 "Invalid reality level: {}. Must be between 1 and 5.",
4121 request.level
4122 )));
4123 }
4124 };
4125
4126 let engine = state.reality_engine.write().await;
4128 engine.set_level(level).await;
4129 let config = engine.get_config().await;
4130 drop(engine); let mut update_errors = Vec::new();
4134
4135 if let Some(ref chaos_api_state) = state.chaos_api_state {
4137 let mut chaos_config = chaos_api_state.config.write().await;
4138
4139 use mockforge_chaos::config::{FaultInjectionConfig, LatencyConfig};
4142
4143 let latency_config = if config.latency.base_ms > 0 {
4144 Some(LatencyConfig {
4145 enabled: true,
4146 fixed_delay_ms: Some(config.latency.base_ms),
4147 random_delay_range_ms: config
4148 .latency
4149 .max_ms
4150 .map(|max| (config.latency.min_ms, max)),
4151 jitter_percent: if config.latency.jitter_ms > 0 {
4152 (config.latency.jitter_ms as f64 / config.latency.base_ms as f64).min(1.0)
4153 } else {
4154 0.0
4155 },
4156 probability: 1.0,
4157 })
4158 } else {
4159 None
4160 };
4161
4162 let fault_injection_config = if config.chaos.enabled {
4163 Some(FaultInjectionConfig {
4164 enabled: true,
4165 http_errors: config.chaos.status_codes.clone(),
4166 http_error_probability: config.chaos.error_rate,
4167 connection_errors: false,
4168 connection_error_probability: 0.0,
4169 timeout_errors: config.chaos.inject_timeouts,
4170 timeout_ms: config.chaos.timeout_ms,
4171 timeout_probability: if config.chaos.inject_timeouts {
4172 config.chaos.error_rate
4173 } else {
4174 0.0
4175 },
4176 partial_responses: false,
4177 partial_response_probability: 0.0,
4178 payload_corruption: false,
4179 payload_corruption_probability: 0.0,
4180 corruption_type: mockforge_chaos::config::CorruptionType::None,
4181 error_pattern: Some(mockforge_chaos::config::ErrorPattern::Random {
4182 probability: config.chaos.error_rate,
4183 }),
4184 mockai_enabled: false,
4185 })
4186 } else {
4187 None
4188 };
4189
4190 chaos_config.enabled = config.chaos.enabled;
4192 chaos_config.latency = latency_config;
4193 chaos_config.fault_injection = fault_injection_config;
4194
4195 drop(chaos_config);
4196 tracing::info!("✅ Updated chaos config for reality level {}", level.value());
4197
4198 }
4203
4204 if let Some(ref latency_injector) = state.latency_injector {
4206 match mockforge_core::latency::LatencyInjector::update_profile_async(
4207 latency_injector,
4208 config.latency.clone(),
4209 )
4210 .await
4211 {
4212 Ok(_) => {
4213 tracing::info!("✅ Updated latency injector for reality level {}", level.value());
4214 }
4215 Err(e) => {
4216 let error_msg = format!("Failed to update latency injector: {}", e);
4217 tracing::warn!("{}", error_msg);
4218 update_errors.push(error_msg);
4219 }
4220 }
4221 }
4222
4223 if let Some(ref mockai) = state.mockai {
4225 match mockforge_core::intelligent_behavior::MockAI::update_config_async(
4226 mockai,
4227 config.mockai.clone(),
4228 )
4229 .await
4230 {
4231 Ok(_) => {
4232 tracing::info!("✅ Updated MockAI config for reality level {}", level.value());
4233 }
4234 Err(e) => {
4235 let error_msg = format!("Failed to update MockAI: {}", e);
4236 tracing::warn!("{}", error_msg);
4237 update_errors.push(error_msg);
4238 }
4239 }
4240 }
4241
4242 let mut response = serde_json::json!({
4244 "level": level.value(),
4245 "level_name": level.name(),
4246 "description": level.description(),
4247 "chaos": {
4248 "enabled": config.chaos.enabled,
4249 "error_rate": config.chaos.error_rate,
4250 "delay_rate": config.chaos.delay_rate,
4251 },
4252 "latency": {
4253 "base_ms": config.latency.base_ms,
4254 "jitter_ms": config.latency.jitter_ms,
4255 },
4256 "mockai": {
4257 "enabled": config.mockai.enabled,
4258 },
4259 });
4260
4261 if !update_errors.is_empty() {
4263 response["warnings"] = serde_json::json!(update_errors);
4264 tracing::warn!(
4265 "Reality level updated to {} but some subsystems failed to update: {:?}",
4266 level.value(),
4267 update_errors
4268 );
4269 } else {
4270 tracing::info!(
4271 "✅ Reality level successfully updated to {} (hot-reload applied)",
4272 level.value()
4273 );
4274 }
4275
4276 Json(ApiResponse::success(response))
4277}
4278
4279pub async fn list_reality_presets(
4281 State(state): State<AdminState>,
4282) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4283 let persistence = &state.workspace_persistence;
4284 match persistence.list_reality_presets().await {
4285 Ok(preset_paths) => {
4286 let presets: Vec<serde_json::Value> = preset_paths
4287 .iter()
4288 .map(|path| {
4289 serde_json::json!({
4290 "id": path.file_name().and_then(|n| n.to_str()).unwrap_or("unknown"),
4291 "path": path.to_string_lossy(),
4292 "name": path.file_stem().and_then(|n| n.to_str()).unwrap_or("unknown"),
4293 })
4294 })
4295 .collect();
4296 Json(ApiResponse::success(presets))
4297 }
4298 Err(e) => Json(ApiResponse::error(format!("Failed to list presets: {}", e))),
4299 }
4300}
4301
4302#[derive(Deserialize)]
4304pub struct ImportPresetRequest {
4305 path: String,
4306}
4307
4308pub async fn import_reality_preset(
4309 State(state): State<AdminState>,
4310 Json(request): Json<ImportPresetRequest>,
4311) -> Json<ApiResponse<serde_json::Value>> {
4312 let persistence = &state.workspace_persistence;
4313 let path = std::path::Path::new(&request.path);
4314
4315 match persistence.import_reality_preset(path).await {
4316 Ok(preset) => {
4317 let engine = state.reality_engine.write().await;
4319 engine.apply_preset(preset.clone()).await;
4320
4321 Json(ApiResponse::success(serde_json::json!({
4322 "name": preset.name,
4323 "description": preset.description,
4324 "level": preset.config.level.value(),
4325 "level_name": preset.config.level.name(),
4326 })))
4327 }
4328 Err(e) => Json(ApiResponse::error(format!("Failed to import preset: {}", e))),
4329 }
4330}
4331
4332#[derive(Deserialize)]
4334pub struct ExportPresetRequest {
4335 name: String,
4336 description: Option<String>,
4337}
4338
4339pub async fn export_reality_preset(
4340 State(state): State<AdminState>,
4341 Json(request): Json<ExportPresetRequest>,
4342) -> Json<ApiResponse<serde_json::Value>> {
4343 let engine = state.reality_engine.read().await;
4344 let preset = engine.create_preset(request.name.clone(), request.description.clone()).await;
4345
4346 let persistence = &state.workspace_persistence;
4347 let presets_dir = persistence.presets_dir();
4348 let filename = format!("{}.json", request.name.replace(' ', "_").to_lowercase());
4349 let output_path = presets_dir.join(&filename);
4350
4351 match persistence.export_reality_preset(&preset, &output_path).await {
4352 Ok(_) => Json(ApiResponse::success(serde_json::json!({
4353 "name": preset.name,
4354 "description": preset.description,
4355 "path": output_path.to_string_lossy(),
4356 "level": preset.config.level.value(),
4357 }))),
4358 Err(e) => Json(ApiResponse::error(format!("Failed to export preset: {}", e))),
4359 }
4360}
4361
4362pub async fn get_continuum_ratio(
4366 State(state): State<AdminState>,
4367 Query(params): Query<HashMap<String, String>>,
4368) -> Json<ApiResponse<serde_json::Value>> {
4369 let path = params.get("path").cloned().unwrap_or_else(|| "/".to_string());
4370 let engine = state.continuum_engine.read().await;
4371 let ratio = engine.get_blend_ratio(&path).await;
4372 let config = engine.get_config().await;
4373 let enabled = engine.is_enabled().await;
4374
4375 Json(ApiResponse::success(serde_json::json!({
4376 "path": path,
4377 "blend_ratio": ratio,
4378 "enabled": enabled,
4379 "transition_mode": format!("{:?}", config.transition_mode),
4380 "merge_strategy": format!("{:?}", config.merge_strategy),
4381 "default_ratio": config.default_ratio,
4382 })))
4383}
4384
4385#[derive(Deserialize)]
4387pub struct SetContinuumRatioRequest {
4388 path: String,
4389 ratio: f64,
4390}
4391
4392pub async fn set_continuum_ratio(
4393 State(state): State<AdminState>,
4394 Json(request): Json<SetContinuumRatioRequest>,
4395) -> Json<ApiResponse<serde_json::Value>> {
4396 let ratio = request.ratio.clamp(0.0, 1.0);
4397 let engine = state.continuum_engine.read().await;
4398 engine.set_blend_ratio(&request.path, ratio).await;
4399
4400 Json(ApiResponse::success(serde_json::json!({
4401 "path": request.path,
4402 "blend_ratio": ratio,
4403 })))
4404}
4405
4406pub async fn get_continuum_schedule(
4408 State(state): State<AdminState>,
4409) -> Json<ApiResponse<serde_json::Value>> {
4410 let engine = state.continuum_engine.read().await;
4411 let schedule = engine.get_time_schedule().await;
4412
4413 match schedule {
4414 Some(s) => Json(ApiResponse::success(serde_json::json!({
4415 "start_time": s.start_time.to_rfc3339(),
4416 "end_time": s.end_time.to_rfc3339(),
4417 "start_ratio": s.start_ratio,
4418 "end_ratio": s.end_ratio,
4419 "curve": format!("{:?}", s.curve),
4420 "duration_days": s.duration().num_days(),
4421 }))),
4422 None => Json(ApiResponse::success(serde_json::json!(null))),
4423 }
4424}
4425
4426#[derive(Deserialize)]
4428pub struct SetContinuumScheduleRequest {
4429 start_time: String,
4430 end_time: String,
4431 start_ratio: f64,
4432 end_ratio: f64,
4433 curve: Option<String>,
4434}
4435
4436pub async fn set_continuum_schedule(
4437 State(state): State<AdminState>,
4438 Json(request): Json<SetContinuumScheduleRequest>,
4439) -> Json<ApiResponse<serde_json::Value>> {
4440 let start_time = chrono::DateTime::parse_from_rfc3339(&request.start_time)
4441 .map_err(|e| format!("Invalid start_time: {}", e))
4442 .map(|dt| dt.with_timezone(&Utc));
4443
4444 let end_time = chrono::DateTime::parse_from_rfc3339(&request.end_time)
4445 .map_err(|e| format!("Invalid end_time: {}", e))
4446 .map(|dt| dt.with_timezone(&Utc));
4447
4448 match (start_time, end_time) {
4449 (Ok(start), Ok(end)) => {
4450 let curve = request
4451 .curve
4452 .as_deref()
4453 .map(|c| match c {
4454 "linear" => mockforge_core::TransitionCurve::Linear,
4455 "exponential" => mockforge_core::TransitionCurve::Exponential,
4456 "sigmoid" => mockforge_core::TransitionCurve::Sigmoid,
4457 _ => mockforge_core::TransitionCurve::Linear,
4458 })
4459 .unwrap_or(mockforge_core::TransitionCurve::Linear);
4460
4461 let schedule = mockforge_core::TimeSchedule::with_curve(
4462 start,
4463 end,
4464 request.start_ratio.clamp(0.0, 1.0),
4465 request.end_ratio.clamp(0.0, 1.0),
4466 curve,
4467 );
4468
4469 let engine = state.continuum_engine.read().await;
4470 engine.set_time_schedule(schedule.clone()).await;
4471
4472 Json(ApiResponse::success(serde_json::json!({
4473 "start_time": schedule.start_time.to_rfc3339(),
4474 "end_time": schedule.end_time.to_rfc3339(),
4475 "start_ratio": schedule.start_ratio,
4476 "end_ratio": schedule.end_ratio,
4477 "curve": format!("{:?}", schedule.curve),
4478 })))
4479 }
4480 (Err(e), _) | (_, Err(e)) => Json(ApiResponse::error(e)),
4481 }
4482}
4483
4484#[derive(Deserialize)]
4486pub struct AdvanceContinuumRatioRequest {
4487 increment: Option<f64>,
4488}
4489
4490pub async fn advance_continuum_ratio(
4491 State(state): State<AdminState>,
4492 Json(request): Json<AdvanceContinuumRatioRequest>,
4493) -> Json<ApiResponse<serde_json::Value>> {
4494 let increment = request.increment.unwrap_or(0.1);
4495 let engine = state.continuum_engine.read().await;
4496 engine.advance_ratio(increment).await;
4497 let config = engine.get_config().await;
4498
4499 Json(ApiResponse::success(serde_json::json!({
4500 "default_ratio": config.default_ratio,
4501 "increment": increment,
4502 })))
4503}
4504
4505#[derive(Deserialize)]
4507pub struct SetContinuumEnabledRequest {
4508 enabled: bool,
4509}
4510
4511pub async fn set_continuum_enabled(
4512 State(state): State<AdminState>,
4513 Json(request): Json<SetContinuumEnabledRequest>,
4514) -> Json<ApiResponse<serde_json::Value>> {
4515 let engine = state.continuum_engine.read().await;
4516 engine.set_enabled(request.enabled).await;
4517
4518 Json(ApiResponse::success(serde_json::json!({
4519 "enabled": request.enabled,
4520 })))
4521}
4522
4523pub async fn get_continuum_overrides(
4525 State(state): State<AdminState>,
4526) -> Json<ApiResponse<serde_json::Value>> {
4527 let engine = state.continuum_engine.read().await;
4528 let overrides = engine.get_manual_overrides().await;
4529
4530 Json(ApiResponse::success(serde_json::json!(overrides)))
4531}
4532
4533pub async fn clear_continuum_overrides(
4535 State(state): State<AdminState>,
4536) -> Json<ApiResponse<serde_json::Value>> {
4537 let engine = state.continuum_engine.read().await;
4538 engine.clear_manual_overrides().await;
4539
4540 Json(ApiResponse::success(serde_json::json!({
4541 "message": "All manual overrides cleared",
4542 })))
4543}
4544
4545pub async fn get_workspace(
4546 State(_state): State<AdminState>,
4547 Path(workspace_id): Path<String>,
4548) -> Json<ApiResponse<serde_json::Value>> {
4549 Json(ApiResponse::success(serde_json::json!({
4550 "workspace": {
4551 "summary": {
4552 "id": workspace_id,
4553 "name": "Mock Workspace",
4554 "description": "A mock workspace"
4555 },
4556 "folders": [],
4557 "requests": []
4558 }
4559 })))
4560}
4561
4562pub async fn delete_workspace(
4563 State(_state): State<AdminState>,
4564 Path(_workspace_id): Path<String>,
4565) -> Json<ApiResponse<String>> {
4566 Json(ApiResponse::success("Workspace deleted".to_string()))
4567}
4568
4569pub async fn set_active_workspace(
4570 State(_state): State<AdminState>,
4571 Path(_workspace_id): Path<String>,
4572) -> Json<ApiResponse<String>> {
4573 Json(ApiResponse::success("Workspace activated".to_string()))
4574}
4575
4576pub async fn create_folder(
4577 State(_state): State<AdminState>,
4578 Path(_workspace_id): Path<String>,
4579 Json(_request): Json<serde_json::Value>,
4580) -> Json<ApiResponse<String>> {
4581 Json(ApiResponse::success("Folder created".to_string()))
4582}
4583
4584pub async fn create_request(
4585 State(_state): State<AdminState>,
4586 Path(_workspace_id): Path<String>,
4587 Json(_request): Json<serde_json::Value>,
4588) -> Json<ApiResponse<String>> {
4589 Json(ApiResponse::success("Request created".to_string()))
4590}
4591
4592pub async fn execute_workspace_request(
4593 State(_state): State<AdminState>,
4594 Path((_workspace_id, _request_id)): Path<(String, String)>,
4595 Json(_request): Json<serde_json::Value>,
4596) -> Json<ApiResponse<serde_json::Value>> {
4597 Json(ApiResponse::success(serde_json::json!({
4598 "status": "executed",
4599 "response": {}
4600 })))
4601}
4602
4603pub async fn get_request_history(
4604 State(_state): State<AdminState>,
4605 Path((_workspace_id, _request_id)): Path<(String, String)>,
4606) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4607 Json(ApiResponse::success(vec![]))
4608}
4609
4610pub async fn get_folder(
4611 State(_state): State<AdminState>,
4612 Path((_workspace_id, folder_id)): Path<(String, String)>,
4613) -> Json<ApiResponse<serde_json::Value>> {
4614 Json(ApiResponse::success(serde_json::json!({
4615 "folder": {
4616 "summary": {
4617 "id": folder_id,
4618 "name": "Mock Folder",
4619 "description": "A mock folder"
4620 },
4621 "requests": []
4622 }
4623 })))
4624}
4625
4626pub async fn import_to_workspace(
4627 State(_state): State<AdminState>,
4628 Path(_workspace_id): Path<String>,
4629 Json(_request): Json<serde_json::Value>,
4630) -> Json<ApiResponse<String>> {
4631 Json(ApiResponse::success("Import to workspace completed".to_string()))
4632}
4633
4634pub async fn export_workspaces(
4635 State(_state): State<AdminState>,
4636 Json(_request): Json<serde_json::Value>,
4637) -> Json<ApiResponse<String>> {
4638 Json(ApiResponse::success("Workspaces exported".to_string()))
4639}
4640
4641pub async fn get_environments(
4643 State(_state): State<AdminState>,
4644 Path(_workspace_id): Path<String>,
4645) -> Json<ApiResponse<serde_json::Value>> {
4646 let environments = vec![serde_json::json!({
4648 "id": "global",
4649 "name": "Global",
4650 "description": "Global environment variables",
4651 "variable_count": 0,
4652 "is_global": true,
4653 "active": true,
4654 "order": 0
4655 })];
4656
4657 Json(ApiResponse::success(serde_json::json!({
4658 "environments": environments,
4659 "total": 1
4660 })))
4661}
4662
4663pub async fn create_environment(
4664 State(_state): State<AdminState>,
4665 Path(_workspace_id): Path<String>,
4666 Json(_request): Json<serde_json::Value>,
4667) -> Json<ApiResponse<String>> {
4668 Json(ApiResponse::success("Environment created".to_string()))
4669}
4670
4671pub async fn update_environment(
4672 State(_state): State<AdminState>,
4673 Path((_workspace_id, _environment_id)): Path<(String, String)>,
4674 Json(_request): Json<serde_json::Value>,
4675) -> Json<ApiResponse<String>> {
4676 Json(ApiResponse::success("Environment updated".to_string()))
4677}
4678
4679pub async fn delete_environment(
4680 State(_state): State<AdminState>,
4681 Path((_workspace_id, _environment_id)): Path<(String, String)>,
4682) -> Json<ApiResponse<String>> {
4683 Json(ApiResponse::success("Environment deleted".to_string()))
4684}
4685
4686pub async fn set_active_environment(
4687 State(_state): State<AdminState>,
4688 Path((_workspace_id, _environment_id)): Path<(String, String)>,
4689) -> Json<ApiResponse<String>> {
4690 Json(ApiResponse::success("Environment activated".to_string()))
4691}
4692
4693pub async fn update_environments_order(
4694 State(_state): State<AdminState>,
4695 Path(_workspace_id): Path<String>,
4696 Json(_request): Json<serde_json::Value>,
4697) -> Json<ApiResponse<String>> {
4698 Json(ApiResponse::success("Environment order updated".to_string()))
4699}
4700
4701pub async fn get_environment_variables(
4702 State(_state): State<AdminState>,
4703 Path((_workspace_id, _environment_id)): Path<(String, String)>,
4704) -> Json<ApiResponse<serde_json::Value>> {
4705 Json(ApiResponse::success(serde_json::json!({
4706 "variables": []
4707 })))
4708}
4709
4710pub async fn set_environment_variable(
4711 State(_state): State<AdminState>,
4712 Path((_workspace_id, _environment_id)): Path<(String, String)>,
4713 Json(_request): Json<serde_json::Value>,
4714) -> Json<ApiResponse<String>> {
4715 Json(ApiResponse::success("Environment variable set".to_string()))
4716}
4717
4718pub async fn remove_environment_variable(
4719 State(_state): State<AdminState>,
4720 Path((_workspace_id, _environment_id, _variable_name)): Path<(String, String, String)>,
4721) -> Json<ApiResponse<String>> {
4722 Json(ApiResponse::success("Environment variable removed".to_string()))
4723}
4724
4725pub async fn get_autocomplete_suggestions(
4727 State(_state): State<AdminState>,
4728 Path(_workspace_id): Path<String>,
4729 Json(_request): Json<serde_json::Value>,
4730) -> Json<ApiResponse<serde_json::Value>> {
4731 Json(ApiResponse::success(serde_json::json!({
4732 "suggestions": [],
4733 "start_position": 0,
4734 "end_position": 0
4735 })))
4736}
4737
4738pub async fn get_sync_status(
4740 State(_state): State<AdminState>,
4741 Path(_workspace_id): Path<String>,
4742) -> Json<ApiResponse<serde_json::Value>> {
4743 Json(ApiResponse::success(serde_json::json!({
4744 "status": "disabled"
4745 })))
4746}
4747
4748pub async fn configure_sync(
4749 State(_state): State<AdminState>,
4750 Path(_workspace_id): Path<String>,
4751 Json(_request): Json<serde_json::Value>,
4752) -> Json<ApiResponse<String>> {
4753 Json(ApiResponse::success("Sync configured".to_string()))
4754}
4755
4756pub async fn disable_sync(
4757 State(_state): State<AdminState>,
4758 Path(_workspace_id): Path<String>,
4759) -> Json<ApiResponse<String>> {
4760 Json(ApiResponse::success("Sync disabled".to_string()))
4761}
4762
4763pub async fn trigger_sync(
4764 State(_state): State<AdminState>,
4765 Path(_workspace_id): Path<String>,
4766) -> Json<ApiResponse<String>> {
4767 Json(ApiResponse::success("Sync triggered".to_string()))
4768}
4769
4770pub async fn get_sync_changes(
4771 State(_state): State<AdminState>,
4772 Path(_workspace_id): Path<String>,
4773) -> Json<ApiResponse<Vec<serde_json::Value>>> {
4774 Json(ApiResponse::success(vec![]))
4775}
4776
4777pub async fn confirm_sync_changes(
4778 State(_state): State<AdminState>,
4779 Path(_workspace_id): Path<String>,
4780 Json(_request): Json<serde_json::Value>,
4781) -> Json<ApiResponse<String>> {
4782 Json(ApiResponse::success("Sync changes confirmed".to_string()))
4783}
4784
4785pub async fn clear_import_history(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
4787 let mut history = state.import_history.write().await;
4788 history.clear();
4789 Json(ApiResponse::success("Import history cleared".to_string()))
4790}
4791
4792#[cfg(test)]
4793mod tests {
4794 use super::*;
4795
4796 #[test]
4797 fn test_request_metrics_creation() {
4798 use std::collections::HashMap;
4799
4800 let metrics = RequestMetrics {
4801 total_requests: 100,
4802 active_connections: 5,
4803 requests_by_endpoint: HashMap::new(),
4804 response_times: vec![10, 20, 30],
4805 response_times_by_endpoint: HashMap::new(),
4806 errors_by_endpoint: HashMap::new(),
4807 last_request_by_endpoint: HashMap::new(),
4808 };
4809
4810 assert_eq!(metrics.total_requests, 100);
4811 assert_eq!(metrics.active_connections, 5);
4812 assert_eq!(metrics.response_times.len(), 3);
4813 }
4814
4815 #[test]
4816 fn test_system_metrics_creation() {
4817 let metrics = SystemMetrics {
4818 cpu_usage_percent: 45.5,
4819 memory_usage_mb: 100,
4820 active_threads: 10,
4821 };
4822
4823 assert_eq!(metrics.active_threads, 10);
4824 assert!(metrics.cpu_usage_percent > 0.0);
4825 assert_eq!(metrics.memory_usage_mb, 100);
4826 }
4827
4828 #[test]
4829 fn test_time_series_point() {
4830 let point = TimeSeriesPoint {
4831 timestamp: Utc::now(),
4832 value: 42.5,
4833 };
4834
4835 assert_eq!(point.value, 42.5);
4836 }
4837
4838 #[test]
4839 fn test_restart_status() {
4840 let status = RestartStatus {
4841 in_progress: true,
4842 initiated_at: Some(Utc::now()),
4843 reason: Some("Manual restart".to_string()),
4844 success: None,
4845 };
4846
4847 assert!(status.in_progress);
4848 assert!(status.reason.is_some());
4849 }
4850
4851 #[test]
4852 fn test_configuration_state() {
4853 use std::collections::HashMap;
4854
4855 let state = ConfigurationState {
4856 latency_profile: LatencyProfile {
4857 name: "default".to_string(),
4858 base_ms: 100,
4859 jitter_ms: 10,
4860 tag_overrides: HashMap::new(),
4861 },
4862 fault_config: FaultConfig {
4863 enabled: false,
4864 failure_rate: 0.0,
4865 status_codes: vec![],
4866 active_failures: 0,
4867 },
4868 proxy_config: ProxyConfig {
4869 enabled: false,
4870 upstream_url: None,
4871 timeout_seconds: 30,
4872 requests_proxied: 0,
4873 },
4874 validation_settings: ValidationSettings {
4875 mode: "off".to_string(),
4876 aggregate_errors: false,
4877 validate_responses: false,
4878 overrides: HashMap::new(),
4879 },
4880 traffic_shaping: TrafficShapingConfig {
4881 enabled: false,
4882 bandwidth: crate::models::BandwidthConfig {
4883 enabled: false,
4884 max_bytes_per_sec: 1_048_576,
4885 burst_capacity_bytes: 10_485_760,
4886 tag_overrides: HashMap::new(),
4887 },
4888 burst_loss: crate::models::BurstLossConfig {
4889 enabled: false,
4890 burst_probability: 0.1,
4891 burst_duration_ms: 5000,
4892 loss_rate_during_burst: 0.5,
4893 recovery_time_ms: 30000,
4894 tag_overrides: HashMap::new(),
4895 },
4896 },
4897 };
4898
4899 assert_eq!(state.latency_profile.name, "default");
4900 assert!(!state.fault_config.enabled);
4901 assert!(!state.proxy_config.enabled);
4902 }
4903
4904 #[test]
4905 fn test_admin_state_new() {
4906 let http_addr: std::net::SocketAddr = "127.0.0.1:3000".parse().unwrap();
4907 let state = AdminState::new(
4908 Some(http_addr),
4909 None,
4910 None,
4911 None,
4912 true,
4913 8080,
4914 None,
4915 None,
4916 None,
4917 None,
4918 None,
4919 None,
4920 None,
4921 None,
4922 );
4923
4924 assert_eq!(state.http_server_addr, Some(http_addr));
4925 assert!(state.api_enabled);
4926 assert_eq!(state.admin_port, 8080);
4927 }
4928}